index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/cli/MetastorePathDeleteCommand.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.cli;
import com.netflix.bdp.s3mper.common.PathUtil;
import com.netflix.bdp.s3mper.metastore.impl.MetastoreJanitor;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.kohsuke.args4j.Argument;
import org.kohsuke.args4j.CmdLineException;
import org.kohsuke.args4j.CmdLineParser;
import org.kohsuke.args4j.Option;
import org.kohsuke.args4j.OptionHandlerFilter;
/**
* Deletes paths from the metastore using a full scan. The timeseries
* command is the preferred approach to deleting expired entries.
*
* @author dweeks
*/
public class MetastorePathDeleteCommand extends Command {
@Option(name="-ru",usage="Max read units to consume")
private int readUnits = 100;
@Option(name="-wu",usage="Max write units to consume")
private int writeUnits = 200;
@Option(name="-s",usage="Numer of scan threads")
private int scanThreads = 1;
@Option(name="-d",usage="Number of delete threads")
private int deleteThreads = 10;
@Option(name="-u",usage="Time unit (days, hours, minutes)")
private String unitType = "Days";
@Option(name="-n",usage="Number of specified units")
private int unitCount = 1;
@Argument
private List<String> args = new ArrayList<String>();
@Override
public void execute(Configuration conf, String[] args) throws Exception {
CmdLineParser parser = new CmdLineParser(this);
try {
parser.parseArgument(args);
} catch (CmdLineException e) {
System.err.println(e.getMessage());
System.err.println("java SampleMain [options...] arguments...");
// print the list of available options
parser.printUsage(System.err);
System.err.println();
System.err.println(" Example: s3mper metastore "+parser.printExample(OptionHandlerFilter.ALL));
return;
}
MetastoreJanitor janitor = new MetastoreJanitor();
janitor.initalize(PathUtil.S3N, conf);
janitor.setScanLimit(readUnits);
janitor.setDeleteLimit(writeUnits);
janitor.setScanThreads(scanThreads);
janitor.setDeleteThreads(deleteThreads);
janitor.deletePaths(TimeUnit.valueOf(unitType.toUpperCase()), unitCount);
}
}
| 300 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/common/ExponentialBackoffAlgorithm.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.common;
/**
* Provides exponential backoff based on the provided initial delay.
*
* @author dweeks
*/
public class ExponentialBackoffAlgorithm implements BackoffAlgorithm {
public long delay;
public int attempt = 0;
public ExponentialBackoffAlgorithm(long delay) {
this.delay = delay;
}
@Override
public boolean hasNext() {
return true;
}
@Override
public Long next() {
return (long) (delay * Math.pow(2, attempt++));
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove() not supported");
}
}
| 301 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/common/PathUtil.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.common;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Utility for basic path information/operations.
*
* @author dweeks
*/
public class PathUtil {
public static final URI S3N;
static {
Configuration conf = new Configuration();
String scheme = conf.get("s3mper.uri.scheme", System.getProperty("s3mper.uri.scheme", "s3n"));
S3N = new Path(scheme+"://default").toUri();
}
public static String normalize(Path path) {
return path.toUri().normalize().getSchemeSpecificPart().replaceAll("/$", "");
}
}
| 302 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/common/IncrementalBackoffAlgorithm.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.common;
import java.util.Iterator;
/**
* Provides incremental backoff based on given increment.
*
* @author dweeks
*/
public class IncrementalBackoffAlgorithm implements BackoffAlgorithm {
private long increment;
public IncrementalBackoffAlgorithm(long increment) {
this.increment = increment;
}
@Override
public boolean hasNext() {
return true;
}
@Override
public Long next() {
return increment;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove not supported");
}
}
| 303 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/common/RetryTask.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.common;
import java.util.concurrent.Callable;
import org.apache.log4j.Logger;
import static java.lang.String.*;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
/**
* A callable task that will automatically retry on failure. The retry task is
* callable itself, so it is possible to use with an executor or just invoke
* the call() method.
*
* @param <T>
* @author dweeks
*/
public class RetryTask<T> implements Callable<T> {
private static final Logger log = Logger.getLogger(RetryTask.class.getName());
private int maxRetries = 10;
private int timeout = 1000;
private BackoffAlgorithm backoff = new ExponentialBackoffAlgorithm(1000);
private Callable<T> target;
public RetryTask(Callable<T> target) {
this.target = target;
}
public RetryTask(Callable<T> target, int maxRetries, int timeout) {
this.target = target;
this.maxRetries = maxRetries;
this.timeout = timeout;
}
public RetryTask(Callable<T> target, int maxRetries, int timeout, BackoffAlgorithm backoff) {
this.target = target;
this.maxRetries = maxRetries;
this.timeout = timeout;
this.backoff = backoff;
}
@Override
public T call() throws Exception {
ExecutorService executor = Executors.newSingleThreadExecutor();
try {
for (int attempt = 1; attempt <= maxRetries; attempt++) {
try {
FutureTask<T> future = new FutureTask<T>(target);
executor.submit(future);
return future.get(timeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
throw ie;
} catch (CancellationException ce) {
throw ce;
} catch (Exception e) {
log.warn(format("Call attempt failed (%d of %d)", attempt, maxRetries));
if(attempt == maxRetries) {
throw e;
}
Thread.sleep(backoff.next());
}
}
} finally {
executor.shutdown();
}
throw new RuntimeException("Unexpected retry call failure");
}
}
| 304 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/common/BackoffAlgorithm.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.common;
import java.util.Iterator;
/**
* Interface for providing different backoff implementations.
*
* @author dweeks
*/
public interface BackoffAlgorithm extends Iterator<Long> {
}
| 305 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/listing/S3ConsistencyException.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.listing;
/**
* Exception that gets thrown if an inconsistent listing is detected.
*
* @author dweeks
*/
public class S3ConsistencyException extends RuntimeException {
public S3ConsistencyException() {
}
public S3ConsistencyException(String string) {
super(string);
}
public S3ConsistencyException(String string, Throwable thrwbl) {
super(string, thrwbl);
}
public S3ConsistencyException(Throwable thrwbl) {
super(thrwbl);
}
}
| 306 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/listing/ConsistentListingAspect.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.listing;
import com.netflix.bdp.s3mper.metastore.FileInfo;
import com.netflix.bdp.s3mper.metastore.FileSystemMetastore;
import com.netflix.bdp.s3mper.alert.AlertDispatcher;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.log4j.Logger;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.aspectj.lang.annotation.Pointcut;
import static com.netflix.bdp.s3mper.common.PathUtil.*;
import static java.lang.String.*;
import java.util.Iterator;
/**
* This class provides advice to the S3 hadoop FileSystem implementation and uses
* a metastore for consistent listing.
*
* @author dweeks
*/
@Aspect
public abstract class ConsistentListingAspect {
private static final Logger log = Logger.getLogger(ConsistentListingAspect.class.getName());
private FileSystemMetastore metastore = null;
private AlertDispatcher alertDispatcher = null;
private boolean disabled = true;
private boolean failOnError = Boolean.getBoolean("s3mper.failOnError");
private boolean taskFailOnError = Boolean.getBoolean("s3mper.task.failOnError");
private boolean checkTaskListings = Boolean.getBoolean("s3mper.listing.task.check");
private boolean failOnTimeout = Boolean.getBoolean("s3mper.failOnTimeout");
private boolean trackDirectories = Boolean.getBoolean("s3mper.listing.directory.tracking");
private boolean delistDeleteMarkedFiles = true;
private float fileThreshold = 1;
private long recheckCount = Long.getLong("s3mper.listing.recheck.count", 15);
private long recheckPeriod = Long.getLong("s3mper.listing.recheck.period", TimeUnit.MINUTES.toMillis(1));
private long taskRecheckCount = Long.getLong("s3mper.listing.task.recheck.count", 0);
private long taskRecheckPeriod = Long.getLong("s3mper.listing.task.recheck.period", TimeUnit.MINUTES.toMillis(1));
@Pointcut
public abstract void init();
/**
* Creates the metastore on initialization.
*
* #TODO The metastore isn't created instantly by DynamoDB. This should wait until
* the initialization is complete. If the store doesn't exist, calls will fail until
* it is created.
*
* @param jp
* @throws Exception
*/
@Before("init()")
public synchronized void initialize(JoinPoint jp) throws Exception {
URI uri = (URI) jp.getArgs()[0];
Configuration conf = (Configuration) jp.getArgs()[1];
updateConfig(conf);
//Check again after updating configs
if(disabled) {
return;
}
if(metastore == null) {
log.debug("Initializing S3mper Metastore");
//FIXME: This is defaulted to the dynamodb metastore impl, but shouldn't
// reference it directly like this.
Class<?> metaImpl = conf.getClass("s3mper.metastore.impl", com.netflix.bdp.s3mper.metastore.impl.DynamoDBMetastore.class);
try {
metastore = (FileSystemMetastore) ReflectionUtils.newInstance(metaImpl, conf);
metastore.initalize(uri, conf);
} catch (Exception e) {
log.error("Error initializing s3mper metastore", e);
disable();
if(failOnError) {
throw e;
}
}
} else {
log.debug("S3mper Metastore already initialized.");
}
if(alertDispatcher == null) {
log.debug("Initializing Alert Dispatcher");
try {
Class<?> dispatcherImpl = conf.getClass("s3mper.dispatcher.impl", com.netflix.bdp.s3mper.alert.impl.CloudWatchAlertDispatcher.class);
alertDispatcher = (AlertDispatcher) ReflectionUtils.newInstance(dispatcherImpl, conf);
alertDispatcher.init(uri, conf);
} catch (Exception e) {
log.error("Error initializing s3mper alert dispatcher", e);
disable();
if(failOnError) {
throw e;
}
}
} else {
alertDispatcher.setConfig(conf);
}
}
private void updateConfig(Configuration conf) {
disabled = conf.getBoolean("s3mper.disable", disabled);
if(disabled) {
log.warn("S3mper Consistency explicitly disabled.");
return;
}
failOnError = conf.getBoolean("s3mper.failOnError", failOnError);
taskFailOnError = conf.getBoolean("s3mper.task.failOnError", taskFailOnError);
checkTaskListings = conf.getBoolean("s3mper.listing.task.check", checkTaskListings);
failOnTimeout = conf.getBoolean("s3mper.failOnTimeout", failOnTimeout);
delistDeleteMarkedFiles = conf.getBoolean("s3mper.listing.delist.deleted", delistDeleteMarkedFiles);
trackDirectories = conf.getBoolean("s3mper.listing.directory.tracking", trackDirectories);
fileThreshold = conf.getFloat("s3mper.listing.threshold", fileThreshold);
recheckCount = conf.getLong("s3mper.listing.recheck.count", recheckCount);
recheckPeriod = conf.getLong("s3mper.listing.recheck.period", recheckPeriod);
taskRecheckCount = conf.getLong("s3mper.listing.task.recheck.count", taskRecheckCount);
taskRecheckPeriod = conf.getLong("s3mper.listing.task.recheck.period", taskRecheckPeriod);
}
@Pointcut
public abstract void create();
/**
* Updates the metastore when a FileSystem.create(...) method is called.
*
* @param pjp
* @return
* @throws Throwable
*/
@Around("create() && !within(ConsistentListingAspect)")
public Object metastoreUpdate(final ProceedingJoinPoint pjp) throws Throwable {
if(disabled) {
return pjp.proceed();
}
Configuration conf = ((FileSystem) pjp.getTarget()).getConf();
updateConfig(conf);
Object result = pjp.proceed();
Path path = null;
if (result instanceof Boolean && !((Boolean) result)) {
return result;
}
try {
//Locate the path parameter in the arguments
for (Object arg : pjp.getArgs()) {
if (arg instanceof Path) {
path = (Path) arg;
break;
}
}
metastore.add(path, trackDirectories && pjp.getSignature().getName().contains("mkdir"));
} catch (TimeoutException t) {
log.error("Timeout occurred adding path to metastore: " + path, t);
alertDispatcher.timeout("metastoreUpdate", Collections.singletonList(path));
if(failOnTimeout) {
throw t;
}
} catch (Exception e) {
log.error("Failed to add path to metastore: " + path, e);
if(shouldFail(conf)) {
throw e;
}
}
return result;
}
@Pointcut
public abstract void list();
/**
* Ensures that all the entries in the metastore also exist in the FileSystem listing.
*
* @param pjp
* @return
* @throws Throwable
*/
@Around("list() && !cflow(delete()) && !within(ConsistentListingAspect)")
public Object metastoreCheck(final ProceedingJoinPoint pjp) throws Throwable {
if(disabled) {
return pjp.proceed();
}
Configuration conf = ((FileSystem) pjp.getTarget()).getConf();
updateConfig(conf);
FileStatus [] s3Listing = (FileStatus[]) pjp.proceed();
List<Path> pathsToCheck = new ArrayList<Path>();
Object pathArg = pjp.getArgs()[0];
//Locate paths in the arguments
if(pathArg instanceof Path) {
pathsToCheck.add((Path)pathArg);
} else if (pathArg instanceof List) {
pathsToCheck.addAll((List)pathArg);
} else if (pathArg.getClass().isArray()) {
pathsToCheck.addAll(Arrays.asList((Path[]) pathArg));
}
//HACK: This is just to prevent the emr metrics from causing consisteny failures
for(StackTraceElement e : Thread.currentThread().getStackTrace()) {
if(e.getClassName().contains("emr.metrics")) {
log.debug("Ignoring EMR metrics listing for paths: " + pathsToCheck);
return s3Listing;
}
}
//END HACK
long recheck = recheckCount;
long delay = recheckPeriod;
try {
if (isTask(conf) && !checkTaskListings) {
log.info("Skipping consistency check for task listing");
return s3Listing;
}
if(isTask(conf)) {
recheck = taskRecheckCount;
delay = taskRecheckPeriod;
}
} catch (Exception e) {
log.error("Error checking for task side listing", e);
}
try {
List<FileInfo> metastoreListing = metastore.list(pathsToCheck);
List<Path> missingPaths = new ArrayList<Path>(0);
int checkAttempt;
for(checkAttempt=0; checkAttempt<=recheck; checkAttempt++) {
missingPaths = checkListing(metastoreListing, s3Listing);
if(delistDeleteMarkedFiles) {
s3Listing = delistDeletedPaths(metastoreListing, s3Listing);
}
if(missingPaths.isEmpty()) {
break;
}
//Check if acceptable threshold of data has been met. This is a little
//ambigious becuase S3 could potentially have more files than the
//metastore (via out-of-band access) and throw off the ratio
if(fileThreshold < 1 && metastoreListing.size() > 0) {
float ratio = s3Listing.length / (float) metastoreListing.size();
if(ratio > fileThreshold) {
log.info(format("Proceeding with incomplete listing at ratio %f (%f as acceptable). Still missing paths: %s", ratio, fileThreshold, missingPaths));
missingPaths.clear();
break;
}
}
if(recheck == 0) {
break;
}
log.info(format("Rechecking consistency in %d (ms). Files missing %d. Missing paths: %s", delay, missingPaths.size(), missingPaths));
Thread.sleep(delay);
s3Listing = (FileStatus[]) pjp.proceed();
}
if (!missingPaths.isEmpty()) {
alertDispatcher.alert(missingPaths);
if (shouldFail(conf)) {
throw new S3ConsistencyException("Consistency check failed. See go/s3mper for details. Missing paths: " + missingPaths);
} else {
log.error("Consistency check failed. See go/s3mper for details. Missing paths: " + missingPaths);
}
} else {
if(checkAttempt > 0) {
log.info(format("Listing achieved consistency after %d attempts", checkAttempt));
alertDispatcher.recovered(pathsToCheck);
}
}
} catch (TimeoutException t) {
log.error("Timeout occurred listing metastore paths: " + pathsToCheck, t);
alertDispatcher.timeout("metastoreCheck", pathsToCheck);
if(failOnTimeout) {
throw t;
}
} catch (Exception e) {
log.error("Failed to list metastore for paths: " + pathsToCheck, e);
if(shouldFail(conf)) {
throw e;
}
}
return s3Listing;
}
/**
* Check the the metastore listing against the s3 listing and return any paths
* missing from s3.
*
* @param metastoreListing
* @param s3Listing
* @return
*/
private List<Path> checkListing(List<FileInfo> metastoreListing, FileStatus [] s3Listing) {
Map<String, FileStatus> s3paths = new HashMap<String, FileStatus>();
if(s3Listing != null) {
for (FileStatus fileStatus : s3Listing) {
s3paths.put(fileStatus.getPath().toUri().normalize().getSchemeSpecificPart(), fileStatus);
}
}
List<Path> missingPaths = new ArrayList<Path>();
for (FileInfo f : metastoreListing) {
if(f.isDeleted()) {
continue;
}
if (!s3paths.containsKey(f.getPath().toUri().normalize().getSchemeSpecificPart())) {
missingPaths.add(f.getPath());
}
}
return missingPaths;
}
private FileStatus [] delistDeletedPaths(List<FileInfo> metastoreListing, FileStatus [] s3Listing) {
if(s3Listing == null || s3Listing.length == 0) {
return s3Listing;
}
Set<String> delistedPaths = new HashSet<String>();
for(FileInfo file : metastoreListing) {
if(file.isDeleted()) {
delistedPaths.add(normalize(file.getPath()));
}
}
List<FileStatus> s3files = Arrays.asList(s3Listing);
for (Iterator<FileStatus> i = s3files.iterator(); i.hasNext();) {
FileStatus file = i.next();
if(delistedPaths.contains(normalize(file.getPath())) ) {
i.remove();
}
}
return s3files.toArray(new FileStatus[s3files.size()]);
}
@Pointcut
public abstract void delete();
/**
* Deletes listing records based on a delete call from the FileSystem.
*
* @param pjp
* @return
* @throws Throwable
*/
@Around("delete() && !within(ConsistentListingAspect)")
public Object metastoreDelete(final ProceedingJoinPoint pjp) throws Throwable {
if(disabled) {
return pjp.proceed();
}
Configuration conf = ((FileSystem) pjp.getTarget()).getConf();
updateConfig(conf);
Path deletePath = (Path) pjp.getArgs()[0];
boolean recursive = false;
if(pjp.getArgs().length > 1) {
recursive = (Boolean) pjp.getArgs()[1];
}
try {
FileSystem s3fs = (FileSystem) pjp.getTarget();
Set<Path> filesToDelete = new HashSet<Path>();
filesToDelete.add(deletePath);
List<FileInfo> metastoreFiles = metastore.list(Collections.singletonList(deletePath));
for(FileInfo f : metastoreFiles) {
filesToDelete.add(f.getPath());
}
try {
if(s3fs.getFileStatus(deletePath).isDir() && recursive) {
filesToDelete.addAll(recursiveList(s3fs, deletePath));
}
} catch (Exception e) {
log.info("A problem occurred deleting path: " + deletePath +" "+ e.getMessage());
}
for(Path path : filesToDelete) {
metastore.delete(path);
}
} catch (TimeoutException t) {
log.error("Timeout occurred deleting metastore path: " + deletePath, t);
alertDispatcher.timeout("metastoreDelete", Collections.singletonList(deletePath));
if(failOnTimeout) {
throw t;
}
} catch (Exception e) {
log.error("Error deleting paths from metastore: " + deletePath, e);
if(shouldFail(conf)) {
throw e;
}
}
return pjp.proceed();
}
private List<Path> recursiveList(FileSystem fs, Path path) throws IOException {
List<Path> result = new ArrayList<Path>();
try {
result.add(path);
if (!fs.isFile(path)) {
FileStatus[] children = fs.listStatus(path);
if (children == null) {
return result;
}
for (FileStatus child : children) {
if (child.isDir()) {
result.addAll(recursiveList(fs, child.getPath()));
} else {
result.add(child.getPath());
}
}
}
} catch (Exception e) {
log.info("A problem occurred recursively deleting path: " + path + " " + e.getMessage());
}
return result;
}
/**
* Check to see if the current context is within an executing task.
*
* @param conf
* @return
*/
private boolean isTask(Configuration conf) {
return conf.get("mapred.task.id") != null;
}
/**
* Handles the various options for when failure should occur.
*
* @param conf
* @return
*/
private boolean shouldFail(Configuration conf) {
boolean isTask = isTask(conf);
return (!isTask && failOnError) || (isTask && taskFailOnError);
}
/**
* Disables listing. Once this is set, it cannot be re-enabled through
* the configuration object.
*/
private void disable() {
log.warn("Disabling s3mper listing consistency.");
disabled = true;
}
}
| 307 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/FileInfo.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore;
import org.apache.hadoop.fs.Path;
/**
* Provides basic file metadata for the metastore
*
* @author dweeks
*/
public class FileInfo {
private Path path;
private boolean deleted;
private boolean directory;
public FileInfo(Path path) {
this.path = path;
this.deleted = false;
this.directory = false;
}
public Path getPath() {
return path;
}
public void setPath(Path path) {
this.path = path;
}
public boolean isDeleted() {
return deleted;
}
public void setDeleted(boolean deleted) {
this.deleted = deleted;
}
public boolean isDirectory() {
return directory;
}
public void setDirectory(boolean directory) {
this.directory = directory;
}
}
| 308 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/MetastoreException.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore;
/**
* Encapsulates issues seen when operating on the metastore.
*
* @author dweeks
*/
public class MetastoreException extends RuntimeException {
public MetastoreException() {
}
public MetastoreException(String string) {
super(string);
}
public MetastoreException(String string, Throwable thrwbl) {
super(string, thrwbl);
}
public MetastoreException(Throwable thrwbl) {
super(thrwbl);
}
}
| 309 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/FileSystemMetastore.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore;
import java.net.URI;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Describes the basic operations used for FileSystem metastore consistency.
*
* @author dweeks
*/
public interface FileSystemMetastore {
public void initalize(URI uri, Configuration conf) throws Exception;
public List<FileInfo> list(List<Path> path) throws Exception;
public void add(Path path, boolean directory) throws Exception;
public void delete(Path path) throws Exception;
public void close();
}
| 310 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/impl/DeleteWriterTask.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore.impl;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.dynamodb.AmazonDynamoDBClient;
import com.amazonaws.services.dynamodb.model.BatchWriteItemRequest;
import com.amazonaws.services.dynamodb.model.BatchWriteItemResult;
import com.amazonaws.services.dynamodb.model.BatchWriteResponse;
import com.amazonaws.services.dynamodb.model.DeleteRequest;
import com.amazonaws.services.dynamodb.model.Key;
import com.amazonaws.services.dynamodb.model.WriteRequest;
import com.google.common.util.concurrent.RateLimiter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import org.apache.log4j.Logger;
/**
* Deletes entries on the queue from DynamoDb.
*
* @author dweeks
*/
public class DeleteWriterTask extends AbstractDynamoDBTask {
private static final Logger log = Logger.getLogger(DeleteWriterTask.class.getName());
/** Batch limit is explicit from the AWS SDK */
private final int batchLimit = 25;
final BlockingQueue<Key> deleteQueue;
public DeleteWriterTask(AmazonDynamoDBClient db, RateLimiter limiter, BlockingQueue<Key> deleteQueue) {
super(db, limiter);
this.deleteQueue = deleteQueue;
}
@Override
public Object call() throws Exception {
running = true;
Set<Key> keys = new HashSet<Key>(batchLimit);
List<WriteRequest> batch = new ArrayList<WriteRequest>(batchLimit);
try {
while (!deleteQueue.isEmpty() || running) {
deleteQueue.drainTo(keys, batchLimit);
synchronized (deleteQueue) {
deleteQueue.notifyAll();
}
if (keys.isEmpty()) {
Thread.sleep(500);
continue;
}
for (Key key : keys) {
batch.add(new WriteRequest().withDeleteRequest(new DeleteRequest().withKey(key)));
}
BatchWriteItemRequest batchRequest = new BatchWriteItemRequest();
Map<String, List<WriteRequest>> itemRequests = new HashMap<String, List<WriteRequest>>();
itemRequests.put(MetastoreJanitor.tableName, batch);
batchRequest.setRequestItems(itemRequests);
BatchWriteItemResult result = db.batchWriteItem(batchRequest);
//Resubmit failed writes
for (Map.Entry<String, List<WriteRequest>> e : result.getUnprocessedItems().entrySet()) {
for (WriteRequest w : e.getValue()) {
deleteQueue.put(w.getDeleteRequest().getKey());
}
}
//Drain capacity
for (Map.Entry<String, BatchWriteResponse> e : result.getResponses().entrySet()) {
limiter.acquire(e.getValue().getConsumedCapacityUnits().intValue());
}
if(log.isDebugEnabled()) {
log.debug(String.format("delete: %2d, queue_size: %5d, max_rate: %4.1f", keys.size(), deleteQueue.size(), limiter.getRate()));
}
keys.clear();
batch.clear();
}
} catch (InterruptedException interruptedException) {
log.error("Interrupted", interruptedException);
} catch (AmazonClientException amazonClientException) {
log.error("", amazonClientException);
}
log.info("Delete task terminating");
return Boolean.FALSE;
}
}
| 311 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/impl/AbstractDynamoDBTask.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore.impl;
import com.amazonaws.services.dynamodb.AmazonDynamoDBClient;
import com.google.common.util.concurrent.RateLimiter;
import java.util.concurrent.Callable;
/**
* Common ancestor of all DynamoDB operations.
*
* @author dweeks
*/
public abstract class AbstractDynamoDBTask implements Callable {
protected AmazonDynamoDBClient db;
protected RateLimiter limiter;
protected volatile boolean running = false;
public AbstractDynamoDBTask(AmazonDynamoDBClient db, RateLimiter limiter) {
this.db = db;
this.limiter = limiter;
}
public AmazonDynamoDBClient getDb() {
return db;
}
public void setDb(AmazonDynamoDBClient db) {
this.db = db;
}
public RateLimiter getLimiter() {
return limiter;
}
public void setLimiter(RateLimiter limiter) {
this.limiter = limiter;
}
public boolean isRunning() {
return running;
}
public void setRunning(boolean running) {
this.running = running;
}
}
| 312 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/impl/MetastoreJanitor.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore.impl;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.dynamodb.AmazonDynamoDBClient;
import com.amazonaws.services.dynamodb.model.Key;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.bdp.s3mper.metastore.FileInfo;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import static java.lang.String.*;
/**
* Class to cleanup old entries in the DynamoDb Metastore. This is intended to be a single use class.
* It is not thread safe and should not be reused.
*
* @author dweeks
*/
public class MetastoreJanitor {
private static final Logger log = Logger.getLogger(MetastoreJanitor.class.getName());
static String tableName = "ConsistentListingMetastore";
private DynamoDBMetastore metastore = null;
private AmazonDynamoDBClient db = null;
private int scanLimit = Integer.getInteger("s3mper.janitor.scan.limit", 500);
private int deleteLimit = Integer.getInteger("s3mper.janitor.delete.limit", 500);
private int queueSize = Integer.getInteger("s3mper.janitor.queue.limit", 2000);
private int scanThreads = Integer.getInteger("s3mper.janitor.threads.scan", 1);
private int deleteThreads = Integer.getInteger("s3mper.janitor.threads.delete", 10);
private RateLimiter scanLimiter;
private RateLimiter deleteLimiter;
private ExecutorService executor;
private final List<AbstractDynamoDBTask> tasks = Collections.synchronizedList(new ArrayList<AbstractDynamoDBTask>());
private final List<Future> futures = Collections.synchronizedList(new ArrayList<Future>());
private final List<Future> scanFutures = Collections.synchronizedList(new ArrayList<Future>());
private BlockingQueue<Key> queue = new LinkedBlockingQueue<Key>();
public void initalize(URI uri, Configuration conf) throws Exception {
String keyId = conf.get("fs."+uri.getScheme()+".awsAccessKeyId");
String keySecret = conf.get("fs."+uri.getScheme()+".awsSecretAccessKey");
//An override option for accessing across accounts
keyId = conf.get("s3mper.override.awsAccessKeyId", keyId);
keySecret = conf.get("s3mper.override.awsSecretAccessKey", keySecret);
db = new AmazonDynamoDBClient(new BasicAWSCredentials(keyId, keySecret));
tableName = conf.get("s3mper.metastore.name", tableName);
metastore = new DynamoDBMetastore();
metastore.initalize(uri, conf);
}
/**
* Deletes all entries for a given path (directory) from the metastore.
*
* @param path
* @throws Exception
*/
public void clearPath(Path path) throws Exception {
List<FileInfo> listing = metastore.list(Collections.singletonList(path), true);
for(FileInfo file : listing) {
metastore.delete(file.getPath());
}
}
/**
* Scans the timeseries index in dynamodb (i.e. hash key = 'epoch' ) and
* deletes entries older than the given time.
*
* @param unit
* @param time
* @throws Exception
*/
public void deleteTimeseries(TimeUnit unit, long time) throws Exception {
log.info("Starting Timeseries Delete");
log.info(format("read_units=%d, write_units=%d, queue_size=%d, scan_threads=%d, delete_threads=%d", scanLimit, deleteLimit, queueSize, scanThreads, deleteThreads));
executor = Executors.newFixedThreadPool(scanThreads+deleteThreads);
for (int i = 0; i < scanThreads; i++) {
TimeseriesScannerTask scanner = new TimeseriesScannerTask(db, scanLimiter, queue, queueSize, unit.toMillis(time));
tasks.add(scanner);
Future scanFuture = executor.submit(scanner);
futures.add(scanFuture);
scanFutures.add(scanFuture);
}
processDelete();
}
/**
* Delete paths entries older than the time period provided. This requires
* a full scan of the table, which is very resource intensive, so timeseries
* is the preferred approach for deleting entries.
*
* @param unit
* @param time
* @throws Exception
*/
public void deletePaths(TimeUnit unit, long time) throws Exception {
log.info("Starting Full Path Delete");
log.info(format("read_units=%d, write_units=%d, queue_size=%d, scan_threads=%d, delete_threads=%d", scanLimit, deleteLimit, queueSize, scanThreads, deleteThreads));
executor = Executors.newFixedThreadPool(scanThreads+deleteThreads);
log.info(format("Scanning for items older than: %d (ms)", unit.toMillis(time)));
for (int i = 0; i < scanThreads; i++) {
PathScannerTask scanner = new PathScannerTask(db, scanLimiter, queue, queueSize, unit.toMillis(time));
tasks.add(scanner);
Future scanFuture = executor.submit(scanner);
futures.add(scanFuture);
scanFutures.add(scanFuture);
}
processDelete();
}
private void processDelete() throws Exception {
registerShutdownHook();
for (int i = 0; i < deleteThreads; i++) {
DeleteWriterTask delete = new DeleteWriterTask(db, deleteLimiter, queue);
tasks.add(delete);
futures.add(executor.submit(delete));
}
synchronized(scanFutures) {
for (Future future : scanFutures) {
future.get();
}
}
synchronized(tasks) {
for (AbstractDynamoDBTask task : tasks) {
task.running = false;
}
}
log.info("Shutting down . . .");
executor.shutdown();
log.info("Shutdown complete.");
}
/**
* Attempts to shutdown cleanly by finishing processing for all entries in
* the queue. If not done cleanly, some entries timeseries entries may get
* deleted without deleting their corresponding path entries.
*/
private void registerShutdownHook() {
Runtime.getRuntime().addShutdownHook(new Thread("Metastore Janitor Shutdown Hook"){
@Override
public void run() {
log.info("Shutting down all threads");
synchronized(tasks) {
for(AbstractDynamoDBTask task : tasks) {
task.running = false;
}
}
synchronized(futures) {
for(Future future: futures) {
try {
future.get();
} catch (Exception ex) {
log.error("",ex);
}
}
}
executor.shutdown();
}
});
}
public int getScanLimit() {
return scanLimit;
}
public void setScanLimit(int scanLimit) {
this.scanLimit = scanLimit;
scanLimiter = RateLimiter.create(scanLimit);
}
public int getDeleteLimit() {
return deleteLimit;
}
public void setDeleteLimit(int deleteLimit) {
this.deleteLimit = deleteLimit;
deleteLimiter = RateLimiter.create(deleteLimit);
}
public int getScanThreads() {
return scanThreads;
}
public void setScanThreads(int scanThreads) {
this.scanThreads = scanThreads;
}
public int getDeleteThreads() {
return deleteThreads;
}
public void setDeleteThreads(int deleteThreads) {
this.deleteThreads = deleteThreads;
}
}
| 313 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/impl/AbstractScannerTask.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore.impl;
import com.amazonaws.services.dynamodb.AmazonDynamoDBClient;
import com.google.common.util.concurrent.RateLimiter;
/**
* Parent for all DynamoDB scanner tasks.
*
* @author dweeks
*/
public abstract class AbstractScannerTask extends AbstractDynamoDBTask {
public AbstractScannerTask(AmazonDynamoDBClient db, RateLimiter limiter) {
super(db, limiter);
}
}
| 314 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/impl/DynamoDBMetastore.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore.impl;
import com.netflix.bdp.s3mper.common.RetryTask;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.dynamodb.AmazonDynamoDBClient;
import com.amazonaws.services.dynamodb.model.AttributeValue;
import com.amazonaws.services.dynamodb.model.AttributeValueUpdate;
import com.amazonaws.services.dynamodb.model.CreateTableRequest;
import com.amazonaws.services.dynamodb.model.DeleteItemRequest;
import com.amazonaws.services.dynamodb.model.DeleteItemResult;
import com.amazonaws.services.dynamodb.model.Key;
import com.amazonaws.services.dynamodb.model.KeySchema;
import com.amazonaws.services.dynamodb.model.KeySchemaElement;
import com.amazonaws.services.dynamodb.model.ListTablesResult;
import com.amazonaws.services.dynamodb.model.ProvisionedThroughput;
import com.amazonaws.services.dynamodb.model.PutItemRequest;
import com.amazonaws.services.dynamodb.model.QueryRequest;
import com.amazonaws.services.dynamodb.model.QueryResult;
import com.amazonaws.services.dynamodb.model.ReturnValue;
import com.amazonaws.services.dynamodb.model.ScalarAttributeType;
import com.amazonaws.services.dynamodb.model.UpdateItemRequest;
import com.amazonaws.services.dynamodb.model.UpdateItemResult;
import com.netflix.bdp.s3mper.metastore.FileInfo;
import com.netflix.bdp.s3mper.metastore.FileSystemMetastore;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
import java.util.concurrent.Callable;
import static com.netflix.bdp.s3mper.common.PathUtil.*;
import java.util.Random;
/**
* Implements FileSystemMetastore using DynamoDB as a backend.
*
* @author dweeks
*/
@SuppressWarnings("deprecation")
public class DynamoDBMetastore implements FileSystemMetastore {
private static final Logger log = Logger.getLogger(DynamoDBMetastore.class.getName());
private String tableName = "ConsistentListingMetastore";
private AmazonDynamoDBClient db = null;
private long readUnits = 500;
private long writeUnits = 100;
private int retryCount = Integer.getInteger("s3mper.metastore.retry", 3);
private int timeout = Integer.getInteger("s3mper.metastore.timeout", 5000);
private String scheme;
private boolean deleteMarkerEnabled;
private Random rand = new Random();
static final String HASH_KEY = "path";
static final String RANGE_KEY = "file";
static final String EPOCH_VALUE = "epoch";
static final String DIRECTORY_VALUE = "dir";
static final String DELETE_MARKER = "deleted";
static final String LINK_HASH_KEY = "linkPath";
static final String LINK_RANGE_KEY = "linkFile";
static final String TIMESERIES_KEY = "epoch";
/**
* Creates the metastore table in DynamoDB if it doesn't exist with the configured
* read and write units.
*
* @param uri
* @param conf
* @throws Exception
*/
@Override
public void initalize(URI uri, Configuration conf) throws Exception {
scheme = uri.getScheme();
String keyId = conf.get("fs."+uri.getScheme()+".awsAccessKeyId");
String keySecret = conf.get("fs."+uri.getScheme()+".awsSecretAccessKey");
//An override option for accessing across accounts
keyId = conf.get("s3mper.override.awsAccessKeyId", keyId);
keySecret = conf.get("s3mper.override.awsSecretAccessKey", keySecret);
db = new AmazonDynamoDBClient(new BasicAWSCredentials(keyId, keySecret));
readUnits = conf.getLong("s3mper.metastore.read.units", readUnits);
writeUnits = conf.getLong("s3mper.metastore.write.units", writeUnits);
retryCount = conf.getInt("s3mper.metastore.retry", retryCount);
timeout = conf.getInt("s3mper.metastore.timeout", timeout);
tableName = conf.get("s3mper.metastore.name", tableName);
deleteMarkerEnabled = conf.getBoolean("s3mper.metastore.deleteMarker.enabled", false);
boolean checkTableExists = conf.getBoolean("s3mper.metastore.create", false);
if(checkTableExists) {
ListTablesResult tables = db.listTables();
if(!tables.getTableNames().contains(tableName)) {
createTable();
}
}
}
/**
* Creates the table in DynamoDB. The hash+range key is:
*
* Hash (String) | Range (String)
* File Path File Name
* Example: s3n://netflix/data test.xml
*
* The table also includes one attribute for epoch (time created), but
* that is only defined during the put operation (see add()).
*
*/
private void createTable() {
log.info("Creating table in DynamoDB: " + tableName);
CreateTableRequest createRequest = new CreateTableRequest();
createRequest.withTableName(tableName);
//Key
KeySchemaElement pathKey = new KeySchemaElement().withAttributeName(HASH_KEY).withAttributeType(ScalarAttributeType.S);
KeySchemaElement fileKey = new KeySchemaElement().withAttributeName(RANGE_KEY).withAttributeType(ScalarAttributeType.S);
KeySchema schema = new KeySchema();
schema.setHashKeyElement(pathKey);
schema.setRangeKeyElement(fileKey);
createRequest.setKeySchema(schema);
//Throughput
ProvisionedThroughput tp = new ProvisionedThroughput();
tp.setReadCapacityUnits(readUnits);
tp.setWriteCapacityUnits(writeUnits);
createRequest.setProvisionedThroughput(tp);
db.createTable(createRequest);
}
/**
* Returns a list of files that should exist in the FileSystem.
*
* @param paths
* @return
* @throws java.lang.Exception
*/
@Override
public List<FileInfo> list(List<Path> paths) throws Exception {
return list(paths, deleteMarkerEnabled);
}
/**
* Returns a list of files that should exist in the FileSystem with
* optional inclusion of deleted entries.
*
* @param paths
* @param includeDeleted
* @return
* @throws Exception
*/
public List<FileInfo> list(List<Path> paths, boolean includeDeleted) throws Exception {
List<FileInfo> listing = new ArrayList<FileInfo>();
for(Path path : paths) {
Key startKey = null;
do {
RetryTask<QueryResult> queryTask = new RetryTask(new QueryTask(path, startKey), retryCount, timeout);
QueryResult result = queryTask.call();
for(Map<String, AttributeValue> item : result.getItems()) {
FileInfo file = new FileInfo(new Path(scheme+":"+item.get(HASH_KEY).getS() +"/"+ item.get(RANGE_KEY).getS()));
if(item.containsKey(DELETE_MARKER)) {
file.setDeleted(Boolean.parseBoolean(item.get(DELETE_MARKER).getS()));
//@TODO: cleanup deleteMarker logic after deployed
if(!includeDeleted) {
continue;
}
}
if(item.containsKey(DIRECTORY_VALUE)) {
file.setDirectory(Boolean.parseBoolean((item.get(DIRECTORY_VALUE).getS())));
}
listing.add(file);
}
startKey = result.getLastEvaluatedKey();
} while(startKey != null);
}
return listing;
}
/**
* Adds a path to the metastore.
*
* @param path
* @throws java.lang.Exception
*/
@Override
public void add(final Path path, boolean directory) throws Exception {
RetryTask task = new RetryTask(new AddTask(path, directory), retryCount, timeout);
task.call();
}
/**
* Delete the provided path from the Metastore or use a delete marker.
*
* @param path
* @param deleteMarker
* @throws Exception
*/
@Override
public void delete(final Path path) throws Exception {
RetryTask task;
if(deleteMarkerEnabled) {
task = new RetryTask(new MarkDeletedTask(path), retryCount, timeout);
} else {
task = new RetryTask(new DeleteTask(path), retryCount, timeout);
}
task.call();
}
@Override
public void close() {
}
/**
* A Callable task for use with RetryTask to add a path to the
* DynamoDB table.
*
*/
private class AddTask implements Callable<Object> {
private Path path;
private boolean directory;
public AddTask(Path path, boolean directory) {
this.path = path;
this.directory = directory;
}
@Override
public Object call() throws Exception {
long epoch = System.currentTimeMillis();
AttributeValue avPath = new AttributeValue(normalize(path.getParent()));
AttributeValue avFile = new AttributeValue(path.getName());
AttributeValue avEpoch = new AttributeValue().withN(epoch+"");
PutItemRequest put = new PutItemRequest();
put.setTableName(tableName);
Map<String, AttributeValue> items = new HashMap<String, AttributeValue>();
items.put(HASH_KEY, avPath);
items.put(RANGE_KEY, avFile);
items.put(EPOCH_VALUE, avEpoch);
if(directory) {
items.put(DIRECTORY_VALUE, new AttributeValue(Boolean.TRUE.toString()));
}
put.setItem(items);
if(log.isDebugEnabled()) {
log.debug("Adding metastore entry for: " + path.toUri());
}
db.putItem(put);
PutItemRequest tsPut = new PutItemRequest();
tsPut.setTableName(tableName);
Map<String, AttributeValue> tsItems = new HashMap<String, AttributeValue>();
tsItems.put(HASH_KEY, new AttributeValue(TIMESERIES_KEY));
tsItems.put(RANGE_KEY, new AttributeValue(epoch+"-"+rand.nextInt()));
tsItems.put(LINK_HASH_KEY, avPath);
tsItems.put(LINK_RANGE_KEY, avFile);
tsPut.setItem(tsItems);
db.putItem(tsPut);
return null;
}
}
/**
* A Callable task to be used with RetryTask to query a path.
*
* Takes a path and a scan start key and returns the query result.
*/
class QueryTask implements Callable<QueryResult> {
private Path path;
private Key startKey;
public QueryTask(Path path, Key startKey) {
this.path = path;
this.startKey = startKey;
}
@Override
public QueryResult call() throws Exception {
QueryRequest query = new QueryRequest();
query.setTableName(tableName);
query.withHashKeyValue(new AttributeValue(normalize(path)));
query.setConsistentRead(true);
if(startKey != null) {
query.setExclusiveStartKey(startKey);
}
if(log.isDebugEnabled()) {
log.debug("Querying DynamoDB for path: " + path.toUri());
}
return db.query(query);
}
}
/**
* A Callable task to be used with RetryTask to delete a file.
*/
class DeleteTask implements Callable<DeleteItemResult> {
private Path path;
public DeleteTask(Path path) {
this.path = path;
}
@Override
public DeleteItemResult call() throws Exception {
DeleteItemRequest delete = new DeleteItemRequest();
delete.setTableName(tableName);
delete.setKey(new Key(new AttributeValue(normalize(path.getParent())), new AttributeValue(path.getName())));
delete.setReturnValues(ReturnValue.NONE);
if(log.isDebugEnabled()) {
log.debug("Deleting DynamoDB path: " + path.toUri());
}
return db.deleteItem(delete);
}
}
/**
* Marks a path deleted but does not actually delete the entry.
*/
private class MarkDeletedTask implements Callable<UpdateItemResult> {
private Path path;
public MarkDeletedTask(Path path) {
this.path = path;
}
@Override
public UpdateItemResult call() throws Exception {
UpdateItemRequest update = new UpdateItemRequest();
update.setTableName(tableName);
update.setKey(new Key(new AttributeValue(normalize(path.getParent())), new AttributeValue(path.getName())));
Map<String, AttributeValueUpdate> items = new HashMap<String, AttributeValueUpdate>();
items.put(DELETE_MARKER, new AttributeValueUpdate().withValue(new AttributeValue().withS(Boolean.TRUE.toString())));
items.put(EPOCH_VALUE, new AttributeValueUpdate().withValue(new AttributeValue().withN(System.currentTimeMillis()+"")));
update.setAttributeUpdates(items);
if(log.isDebugEnabled()) {
log.debug("Marking DynamoDB path deleted: " + path.toUri());
}
return db.updateItem(update);
}
}
public int getRetryCount() {
return retryCount;
}
public void setRetryCount(int retryCount) {
this.retryCount = retryCount;
}
public int getTimeout() {
return timeout;
}
public void setTimeout(int timeout) {
this.timeout = timeout;
}
}
| 315 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/impl/TimeseriesScannerTask.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore.impl;
import com.amazonaws.services.dynamodb.AmazonDynamoDBClient;
import com.amazonaws.services.dynamodb.model.AttributeValue;
import com.amazonaws.services.dynamodb.model.Key;
import com.amazonaws.services.dynamodb.model.QueryRequest;
import com.amazonaws.services.dynamodb.model.QueryResult;
import com.google.common.util.concurrent.RateLimiter;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import org.apache.log4j.Logger;
import static java.lang.String.*;
import java.util.Date;
/**
* Scans the DynamoDB table using the timeseries index and pushes the entries into a queue.
*
* @author dweeks
*/
public class TimeseriesScannerTask extends AbstractScannerTask {
private static final Logger log = Logger.getLogger(TimeseriesScannerTask.class.getName());
private final BlockingQueue<Key> deleteQueue;
private int queueSize;
private long age;
private int reportInterval = 100000;
public TimeseriesScannerTask(AmazonDynamoDBClient db, RateLimiter limiter, BlockingQueue<Key> deleteQueue, int queueSize, long age) {
super(db, limiter);
this.deleteQueue = deleteQueue;
this.queueSize = queueSize;
this.age = age;
}
@Override
public Object call() throws Exception {
running = true;
long deleteEpoch = System.currentTimeMillis() - age;
QueryRequest query = new QueryRequest();
query.setTableName(MetastoreJanitor.tableName);
query.setHashKeyValue(new AttributeValue().withS(DynamoDBMetastore.TIMESERIES_KEY));
query.setLimit(queueSize/2);
QueryResult result;
int scanCount = 0;
int deleteCount = 0;
do {
//Can't set a hard limit on the queue since paths can be resubmitted by delete task
//which can cause a deadlock.
synchronized (deleteQueue) {
while (deleteQueue.size() >= queueSize) {
deleteQueue.wait();
}
}
if(!running) {
break;
}
result = db.query(query);
scanCount += result.getCount();
long epoch = deleteEpoch;
for (Map<String, AttributeValue> i : result.getItems()) {
epoch = Long.parseLong(i.get(DynamoDBMetastore.RANGE_KEY).getS().split("-")[0]);
if (epoch >= deleteEpoch) {
log.info("Timeseries scan complete. Exiting.");
running = false;
break;
}
deleteCount += 2;
deleteQueue.put(new Key(i.get(DynamoDBMetastore.HASH_KEY), i.get(DynamoDBMetastore.RANGE_KEY)));
deleteQueue.put(new Key(i.get(DynamoDBMetastore.LINK_HASH_KEY), i.get(DynamoDBMetastore.LINK_RANGE_KEY)));
}
if(scanCount % reportInterval == 0) {
log.info(format("scanned: %d, added: %d, queue_size: %d, current_date: %s", scanCount, deleteCount, deleteQueue.size(), new Date(epoch)));
}
limiter.acquire(result.getConsumedCapacityUnits().intValue());
query.setExclusiveStartKey(result.getLastEvaluatedKey());
} while (running && result.getLastEvaluatedKey() != null);
log.info(format("Scan Complete.%nEntries Scanned: %d%nEntries Deleted: %d", scanCount, deleteCount));
return Boolean.TRUE;
}
}
| 316 |
0 | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore | Create_ds/s3mper/src/main/java/com/netflix/bdp/s3mper/metastore/impl/PathScannerTask.java | /*
*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.bdp.s3mper.metastore.impl;
import com.amazonaws.services.dynamodb.AmazonDynamoDBClient;
import com.amazonaws.services.dynamodb.model.AttributeValue;
import com.amazonaws.services.dynamodb.model.ComparisonOperator;
import com.amazonaws.services.dynamodb.model.Condition;
import com.amazonaws.services.dynamodb.model.Key;
import com.amazonaws.services.dynamodb.model.ScanRequest;
import com.amazonaws.services.dynamodb.model.ScanResult;
import com.google.common.util.concurrent.RateLimiter;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import org.apache.log4j.Logger;
/**
* Scans entries by path in DynamoDb using the epoch field to determine their age and places
* old entries into a queue.
*
* @author dweeks
*/
public class PathScannerTask extends AbstractScannerTask {
private static final Logger log = Logger.getLogger(PathScannerTask.class.getName());
private final BlockingQueue<Key> deleteQueue;
private long age;
private int queueSize;
public PathScannerTask(AmazonDynamoDBClient db, RateLimiter limiter, BlockingQueue<Key> deleteQueue, int queueSize, long age) {
super(db, limiter);
this.deleteQueue = deleteQueue;
this.age = age;
this.queueSize = queueSize;
}
@Override
public Object call() throws Exception {
running = true;
long deleteEpoch = System.currentTimeMillis() - age;
Map<String, Condition> filter = new HashMap<String, Condition>();
AttributeValue value = new AttributeValue();
value.setN(deleteEpoch + "");
Condition c = new Condition();
c.setComparisonOperator(ComparisonOperator.LT);
c.setAttributeValueList(Collections.singletonList(value));
filter.put("epoch", c);
ScanRequest scan = new ScanRequest(MetastoreJanitor.tableName);
scan.setScanFilter(filter);
scan.setLimit( (int) limiter.getRate());
ScanResult result;
int scanTotal = 0;
int matched = 0;
do {
//Can't set a hard limit on the queue since paths can be resubmitted by delete task
synchronized (deleteQueue) {
while (deleteQueue.size() >= queueSize) {
deleteQueue.wait();
}
}
if(!running) {
break;
}
result = db.scan(scan);
scanTotal += result.getScannedCount();
matched += result.getCount();
log.info(String.format("Total scanned: %d, matched: %d, added: %d, queue size: %d, consumed capacity: %f, max rate: %f", scanTotal, matched, result.getCount(), deleteQueue.size(), result.getConsumedCapacityUnits(), limiter.getRate()));
for (Map<String, AttributeValue> i : result.getItems()) {
if (!i.containsKey("epoch")) {
continue;
}
deleteQueue.put(new Key(i.get(DynamoDBMetastore.HASH_KEY), i.get(DynamoDBMetastore.RANGE_KEY)));
}
limiter.acquire(result.getConsumedCapacityUnits().intValue());
scan.setExclusiveStartKey(result.getLastEvaluatedKey());
} while (running && result.getLastEvaluatedKey() != null);
return Boolean.TRUE;
}
}
| 317 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/MRPlanCalculatorTest.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.pig.LipstickPigServer;
import org.apache.pig.backend.hadoop.executionengine.HExecutionEngine;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.MROperPlan;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.plans.PhysicalPlan;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import com.netflix.lipstick.MRPlanCalculator.MRStepType;
import com.netflix.lipstick.model.P2jPlan;
import com.netflix.lipstick.model.operators.P2jLOStore;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
public class MRPlanCalculatorTest {
@Test
public void generalTest() throws Exception {
LipstickPigServer lps = new LipstickPigServer("local");
lps.setBatchOn();
lps.registerScript("./src/test/resources/test.pig");
P2jPlanGenerator opg = getOpPlanGenerator(lps);
MROperPlan plan = getMROperPlan(lps);
Map<PhysicalOperator, Operator> p2lMap = getP2lMap(lps);
MRPlanCalculator opCalc = new MRPlanCalculator(opg.getP2jPlan(), plan, p2lMap, opg.getReverseMap());
P2jPlan opPlan = opCalc.getP2jPlan();
Map<String, MRStepType> expectedIdToStepTypeMap = new HashMap<String, MRStepType>();
expectedIdToStepTypeMap.put("tiny", MRStepType.MAPPER);
expectedIdToStepTypeMap.put("colors", MRStepType.MAPPER);
expectedIdToStepTypeMap.put("colors2", MRStepType.MAPPER);
expectedIdToStepTypeMap.put("colors3", MRStepType.MAPPER);
expectedIdToStepTypeMap.put("file://" + System.getProperty("user.dir") + "/test_out_cogrp", MRStepType.REDUCER);
expectedIdToStepTypeMap.put("file://" + System.getProperty("user.dir") + "/test_out_join", MRStepType.REDUCER);
expectedIdToStepTypeMap.put("file://" + System.getProperty("user.dir") + "/test_out_tiny_colors", MRStepType.REDUCER);
expectedIdToStepTypeMap.put("tiny_colors", MRStepType.REDUCER);
expectedIdToStepTypeMap.put("tiny_colors_join", MRStepType.REDUCER);
expectedIdToStepTypeMap.put("colors_filtered", MRStepType.UNKNOWN);
expectedIdToStepTypeMap.put("tiny_colors_cogrp", MRStepType.MAPPER);
expectedIdToStepTypeMap.put("out", MRStepType.REDUCER);
for (String scope : opPlan.getPlan().keySet()) {
P2jLogicalRelationalOperator actualOp = opPlan.getPlan().get(scope);
String actualId = getIdentifier(actualOp);
String actualStepType = actualOp.getMapReduce().getStepType();
String expectedStepType = expectedIdToStepTypeMap.get(actualId).toString();
Assert.assertEquals(actualStepType, expectedStepType);
}
}
private P2jPlanGenerator getOpPlanGenerator(LipstickPigServer lps) throws Exception {
return new P2jPlanGenerator(lps.getLP(null));
}
private Map<PhysicalOperator, Operator> getP2lMap(LipstickPigServer lps) throws Exception {
HExecutionEngine he = (HExecutionEngine)lps.getPigContext().getExecutionEngine();
he.compile(getLogicalPlan(lps), null);
Map<Operator, PhysicalOperator> l2pMap = he.getLogToPhyMap();
Map<PhysicalOperator, Operator> p2lMap = Maps.newHashMap();
for (Entry<Operator, PhysicalOperator> i : l2pMap.entrySet()) {
p2lMap.put(i.getValue(), i.getKey());
}
return p2lMap;
}
private MROperPlan getMROperPlan(LipstickPigServer lps) throws Exception {
HExecutionEngine he = (HExecutionEngine)lps.getPigContext().getExecutionEngine();
PhysicalPlan pp = he.compile(getLogicalPlan(lps), null);
MRCompiler mrc = new MRCompiler(pp, lps.getPigContext());
mrc.compile();
return mrc.getMRPlan();
}
private LogicalPlan getLogicalPlan(LipstickPigServer lps) throws Exception {
Field f = lps.getClass().getSuperclass().getDeclaredField("currDAG");
f.setAccessible(true);
Object graph = f.get(lps);
Method parseQueryMethod = graph.getClass().getDeclaredMethod("parseQuery");
parseQueryMethod.setAccessible(true);
parseQueryMethod.invoke(graph);
Method buildPlanMethod = graph.getClass().getDeclaredMethod("buildPlan", String.class);
buildPlanMethod.setAccessible(true);
buildPlanMethod.invoke(graph, new Object[] { null });
Method compilePlanMethod = graph.getClass().getDeclaredMethod("compile");
compilePlanMethod.setAccessible(true);
compilePlanMethod.invoke(graph);
Method getPlanMethod = graph.getClass().getMethod("getPlan", String.class);
return (LogicalPlan) getPlanMethod.invoke(graph, new Object[] { null });
}
private String getIdentifier(P2jLogicalRelationalOperator op) {
return (op instanceof P2jLOStore) ? ((P2jLOStore) op).getStorageLocation() : op.getAlias();
}
}
| 318 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/P2jPlanGeneratorTest.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.pig.LipstickPigServer;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.collect.Sets.SetView;
import com.netflix.lipstick.model.P2jPlan;
import com.netflix.lipstick.model.operators.P2jLOCogroup;
import com.netflix.lipstick.model.operators.P2jLOFilter;
import com.netflix.lipstick.model.operators.P2jLOJoin;
import com.netflix.lipstick.model.operators.P2jLOLimit;
import com.netflix.lipstick.model.operators.P2jLOLoad;
import com.netflix.lipstick.model.operators.P2jLOStore;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
import com.netflix.lipstick.test.util.Util;
public class P2jPlanGeneratorTest {
@Test
public void generalTest() throws Exception {
LipstickPigServer lps = new LipstickPigServer("local");
lps.setBatchOn();
lps.registerScript("./src/test/resources/test.pig");
P2jPlanGenerator opg = new P2jPlanGenerator(lps.getLP(null));
P2jPlan plan = opg.getP2jPlan();
// Build a map of scope to id from the built plan
Map<String, String> scopeToIdMap = new HashMap<String, String>();
for (String scope : plan.getPlan().keySet()) {
scopeToIdMap.put(scope, getIdentifier(plan.getPlan().get(scope)));
}
// Container for all expected P2j objects
Set<P2jLogicalRelationalOperator> expectedOps = new HashSet<P2jLogicalRelationalOperator>();
// Add all expected P2jLOLoad objects
P2jLOLoad load1 = new P2jLOLoad();
load1.setAlias("tiny");
load1.setSuccessors(Lists.newArrayList("tiny_colors", "tiny_colors_cogrp", "tiny_colors_join"));
P2jLOLoad load2 = new P2jLOLoad();
load2.setAlias("colors");
load2.setSuccessors(Lists.newArrayList("tiny_colors", "colors_filtered", "tiny_colors_cogrp", "tiny_colors_join"));
P2jLOLoad load3 = new P2jLOLoad();
load3.setAlias("colors2");
load3.setSuccessors(Lists.newArrayList("tiny_colors_cogrp", "tiny_colors_join"));
P2jLOLoad load4 = new P2jLOLoad();
load4.setAlias("colors3");
load4.setSuccessors(Lists.newArrayList("tiny_colors_join"));
expectedOps.addAll(Lists.newArrayList(load1, load2, load3, load4));
// Add all expected P2jLOStore objects
// Aliases are lost, so use storageLocation instead
P2jLOStore store1 = new P2jLOStore();
store1.setStorageLocation("file://" + System.getProperty("user.dir") + "/test_out_cogrp");
store1.setPredecessors(Lists.newArrayList("out"));
P2jLOStore store2 = new P2jLOStore();
store2.setStorageLocation("file://" + System.getProperty("user.dir") + "/test_out_join");
store2.setPredecessors(Lists.newArrayList("tiny_colors_join"));
P2jLOStore store3 = new P2jLOStore();
store3.setStorageLocation("file://" + System.getProperty("user.dir") + "/test_out_tiny_colors");
store3.setPredecessors(Lists.newArrayList("tiny_colors"));
expectedOps.addAll(Lists.newArrayList(store1, store2, store3));
// Add all expected P2jLOJoin objects
P2jLOJoin join1 = new P2jLOJoin();
join1.setAlias("tiny_colors");
join1.setPredecessors(Lists.newArrayList("tiny", "colors"));
join1.setSuccessors(Lists.newArrayList("file://" + System.getProperty("user.dir") + "/test_out_tiny_colors"));
P2jLOJoin join2 = new P2jLOJoin();
join2.setAlias("tiny_colors_join");
join2.setPredecessors(Lists.newArrayList("tiny", "colors", "colors2", "colors3"));
join2.setSuccessors(Lists.newArrayList("file://" + System.getProperty("user.dir") + "/test_out_join"));
expectedOps.addAll(Lists.newArrayList(join1, join2));
// Add all expected P2jLOFilter objects
P2jLOFilter filter1 = new P2jLOFilter();
filter1.setAlias("colors_filtered");
filter1.setPredecessors(Lists.newArrayList("colors"));
expectedOps.add(filter1);
// Add all expected P2jLOCogroup objects
P2jLOCogroup cogroup1 = new P2jLOCogroup();
cogroup1.setAlias("tiny_colors_cogrp");
cogroup1.setPredecessors(Lists.newArrayList("tiny", "colors", "colors2"));
cogroup1.setSuccessors(Lists.newArrayList("out"));
expectedOps.add(cogroup1);
// Add all expected P2jLOLimit objects
P2jLOLimit limit1 = new P2jLOLimit();
limit1.setAlias("out");
limit1.setPredecessors(Lists.newArrayList("tiny_colors_cogrp"));
limit1.setSuccessors(Lists.newArrayList("file://" + System.getProperty("user.dir") + "/test_out_cogrp"));
expectedOps.add(limit1);
// For each op, ensure the aliases and all predecessors/successors match
for (String scope : plan.getPlan().keySet()) {
P2jLogicalRelationalOperator actualOp = plan.getPlan().get(scope);
String actualId = getIdentifier(actualOp);
P2jLogicalRelationalOperator matchedOp = null;
for (P2jLogicalRelationalOperator expectedOp : expectedOps) {
String expectedId = getIdentifier(expectedOp);
if (actualId.equals(expectedId)) {
matchedOp = expectedOp;
// Compare classes
Assert.assertEquals(actualOp.getClass(), expectedOp.getClass());
// Compare predecessors
Set<String> actualPredecessorAliases = Sets.newHashSet();
for (String predScope : actualOp.getPredecessors()) {
actualPredecessorAliases.add(scopeToIdMap.get(predScope));
}
SetView<String> predDiff = Util.safeDiffSets(actualPredecessorAliases, Util.safeNewSet(expectedOp.getPredecessors()));
Assert.assertEquals(predDiff.size(), 0);
// Compare successors
Set<String> actualSuccessorAliases = Sets.newHashSet();
for (String succScope : actualOp.getSuccessors()) {
actualSuccessorAliases.add(scopeToIdMap.get(succScope));
}
SetView<String> succDiff = Util.safeDiffSets(actualSuccessorAliases, Util.safeNewSet(expectedOp.getSuccessors()));
Assert.assertEquals(succDiff.size(), 0);
break;
}
}
Assert.assertNotNull(matchedOp);
expectedOps.remove(matchedOp);
}
Assert.assertEquals(expectedOps.size(), 0);
}
private String getIdentifier(P2jLogicalRelationalOperator op) {
return (op instanceof P2jLOStore) ? ((P2jLOStore) op).getStorageLocation() : op.getAlias();
}
}
| 319 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/test | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/test/util/Util.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.test.util;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import com.google.common.collect.Sets;
import com.google.common.collect.Sets.SetView;
public class Util {
public static <T> HashSet<T> safeNewSet(Collection<T> coll) {
if (coll == null) {
return new HashSet<T>();
}
return new HashSet<T>(coll);
}
public static <T> SetView<T> safeDiffSets(Set<T> left, Set<T> right) {
if (left == null) {
left = new HashSet<T>();
}
if (right == null) {
right = new HashSet<T>();
}
return Sets.difference(left, right);
}
}
| 320 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/util/OutputSamplerTest.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.util;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.util.LinkedList;
import java.util.List;
import org.apache.pig.LoadFunc;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POStore;
import org.apache.pig.data.Tuple;
import org.apache.pig.impl.io.InterStorage;
import org.apache.pig.impl.io.InterStorage.InterInputFormat;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.apache.pig.tools.pigstats.JobStats;
import org.apache.pig.tools.pigstats.mapreduce.MRJobStats;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.netflix.lipstick.util.OutputSampler;
import com.netflix.lipstick.util.OutputSampler.SampleOutput;
public class OutputSamplerTest {
@Test
public void getSampleOutputAllOutputLessThanMaxBytesAndRows() throws Exception {
final String output = "testRecord";
final String schema = "{blah: chararray}";
final InterStorage is = getInterStorage(Lists.newArrayList(output));
List<SampleOutput> outputs = getSampleOutputs(is, schema);
Assert.assertEquals(outputs.get(0).getOutput(), output);
Assert.assertEquals(outputs.get(0).getSchema(), schema);
}
@Test
public void getSampleOutputAllOutputMoreThanMaxBytes() throws Exception {
final String schema = "{blah: chararray}";
int recordCount = 1000;
int byteCount = 256;
final InterStorage is = getInterStorage(createRecords(recordCount));
List<SampleOutput> outputs = getSampleOutputs(is, schema, recordCount, byteCount);
Assert.assertTrue(outputs.get(0).getOutput().split("\n").length < recordCount);
Assert.assertTrue(outputs.get(0).getOutput().length() <= byteCount);
Assert.assertEquals(outputs.get(0).getSchema(), schema);
}
@Test
public void getSampleOutputAllOutputMoreThanMaxRows() throws Exception {
final String schema = "{blah: chararray}";
int recordCount = 10;
int byteCount = 1024;
final InterStorage is = getInterStorage(createRecords(recordCount));
List<SampleOutput> outputs = getSampleOutputs(is, schema, recordCount, byteCount);
Assert.assertEquals(outputs.get(0).getOutput().split("\n").length, recordCount);
Assert.assertTrue(outputs.get(0).getOutput().length() <= byteCount);
Assert.assertEquals(outputs.get(0).getSchema(), schema);
}
@Test
public void getSampleOutputFirstRowLargerThanMaxBytes() throws Exception {
final String output = "This record is larger than our byte count and we expect no output";
final String schema = "{blah: chararray}";
int recordCount = 10;
int byteCount = 20;
final InterStorage is = getInterStorage(Lists.newArrayList(output));
List<SampleOutput> outputs = getSampleOutputs(is, schema, recordCount, byteCount);
Assert.assertEquals(outputs.get(0).getOutput().length(), 0);
Assert.assertEquals(outputs.get(0).getSchema(), schema);
}
private List<SampleOutput> getSampleOutputs(InterStorage is, String schema) throws Exception {
return getSampleOutputs(is, schema, 10, 1024);
}
private List<SampleOutput> getSampleOutputs(InterStorage is, String schema, int recordCount, int byteCount)
throws Exception {
POStore pos = getPOStore(is, schema);
List<POStore> posList = Lists.newLinkedList();
posList.add(pos);
JobStats js = getJobStats();
addStoresToJobStats(js, posList);
OutputSampler os = getOutputSamplerWithOverriddenLoader(is, js);
return os.getSampleOutputs(recordCount, byteCount);
}
private List<String> createRecords(int recordCount) {
final String output = "testRecord";
List<String> recordList = new LinkedList<String>();
for (int i = 0; i < recordCount; i++) {
String record = output + i;
recordList.add(record);
}
return recordList;
}
private OutputSampler getOutputSamplerWithOverriddenLoader(final InterStorage is, JobStats js) {
OutputSampler os = new OutputSampler(js) {
@Override
protected LoadFunc getLoader(POStore pos) {
return is;
}
};
return os;
}
private JobStats getJobStats() throws InstantiationException, IllegalAccessException, IllegalArgumentException,
InvocationTargetException {
Constructor<?> ctor = MRJobStats.class.getDeclaredConstructors()[0];
ctor.setAccessible(true);
return (JobStats) ctor.newInstance(null, null);
}
private POStore getPOStore(InterStorage is, String schema) throws IOException {
POStore pos = mock(POStore.class);
when(pos.getStoreFunc()).thenReturn(is);
when(pos.getSchema()).thenReturn(new Schema(new Schema.FieldSchema("blah", (byte) 55)));
return pos;
}
private InterStorage getInterStorage(List<String> returnRecords) throws IOException {
InterStorage is = mock(InterStorage.class);
when(is.getInputFormat()).thenReturn(new InterInputFormat());
List<Tuple> tuples = new LinkedList<Tuple>();
for (String record : returnRecords) {
Tuple tup = mock(Tuple.class);
when(tup.toDelimitedString(OutputSampler.DELIMITER)).thenReturn(record);
tuples.add(tup);
}
tuples.add(null);
when(is.getNext()).thenReturn(tuples.remove(0), tuples.toArray(new Tuple[0]));
return is;
}
private void addStoresToJobStats(JobStats js, List<POStore> stores) throws SecurityException, NoSuchFieldException,
IllegalArgumentException, IllegalAccessException {
Field f = js.getClass().getDeclaredField("mapStores");
f.setAccessible(true);
f.set(js, stores);
}
class BaseClass {
public String BaseClsAttr = "foo";
}
class ChildClass extends BaseClass {};
@Test
public void testGetInheritedFieldValue() throws Exception {
OutputSampler sampler = new OutputSampler(null);
ChildClass obj = new ChildClass();
String value = (String)sampler.getInheritedFieldValue(obj, "BaseClsAttr");
Assert.assertEquals("foo", value);
}
}
| 321 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/adaptors/LOJsonAdaptorTest.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import org.apache.commons.lang.StringUtils;
import org.apache.pig.LipstickPigServer;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.logical.Util;
import org.apache.pig.newplan.logical.relational.LOCogroup;
import org.apache.pig.newplan.logical.relational.LOFilter;
import org.apache.pig.newplan.logical.relational.LOJoin;
import org.apache.pig.newplan.logical.relational.LOLimit;
import org.apache.pig.newplan.logical.relational.LOLoad;
import org.apache.pig.newplan.logical.relational.LOSplitOutput;
import org.apache.pig.newplan.logical.relational.LOStore;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.netflix.lipstick.adaptors.LOJsonAdaptor.LogicalExpressionPlanSerializer;
import com.netflix.lipstick.model.operators.P2jLOCogroup;
import com.netflix.lipstick.model.operators.P2jLOFilter;
import com.netflix.lipstick.model.operators.P2jLOJoin;
import com.netflix.lipstick.model.operators.P2jLOLimit;
import com.netflix.lipstick.model.operators.P2jLOLoad;
import com.netflix.lipstick.model.operators.P2jLOSplitOutput;
import com.netflix.lipstick.model.operators.P2jLOStore;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
import com.netflix.lipstick.util.EzIterable;
public class LOJsonAdaptorTest {
@Test
public void generalTest() throws Exception {
LipstickPigServer lps = new LipstickPigServer("local");
lps.setBatchOn();
lps.registerScript("./src/test/resources/test.pig");
LogicalPlan lp = getLogicalPlan(lps);
for (Operator op : EzIterable.getIterable(lp.getOperators())) {
LogicalRelationalOperator lro = (LogicalRelationalOperator) op;
LOJsonAdaptor adaptor = getAdaptor(lro, lp);
verifyAdaptor(adaptor, lro);
}
}
private void verifyAdaptor(LOJsonAdaptor adaptor, LogicalRelationalOperator lro) throws FrontendException {
verifyGenericAdaptor(adaptor, lro);
if (adaptor instanceof LOLoadJsonAdaptor) {
Assert.assertTrue(lro instanceof LOLoad);
verifyLoadAdaptor((LOLoadJsonAdaptor) adaptor, (LOLoad) lro);
} else if (adaptor instanceof LOStoreJsonAdaptor) {
Assert.assertTrue(lro instanceof LOStore);
verifyStoreAdaptor((LOStoreJsonAdaptor) adaptor, (LOStore) lro);
} else if (adaptor instanceof LOSplitOutputJsonAdaptor) {
Assert.assertTrue(lro instanceof LOSplitOutput);
verifySplitAdaptor((LOSplitOutputJsonAdaptor) adaptor, (LOSplitOutput) lro);
} else if (adaptor instanceof LOJoinJsonAdaptor) {
Assert.assertTrue(lro instanceof LOJoin);
verifyJoinAdaptor((LOJoinJsonAdaptor) adaptor, (LOJoin) lro);
} else if (adaptor instanceof LOCogroupJsonAdaptor) {
Assert.assertTrue(lro instanceof LOCogroup);
verifyCogroupAdaptor((LOCogroupJsonAdaptor) adaptor, (LOCogroup) lro);
} else if (adaptor instanceof LOFilterJsonAdaptor) {
Assert.assertTrue(lro instanceof LOFilter);
verifyFilterAdaptor((LOFilterJsonAdaptor) adaptor, (LOFilter) lro);
} else if (adaptor instanceof LOLimitJsonAdaptor) {
Assert.assertTrue(lro instanceof LOLimit);
verifyLimitAdaptor((LOLimitJsonAdaptor) adaptor, (LOLimit) lro);
}
}
private void verifyGenericAdaptor(LOJsonAdaptor adaptor, LogicalRelationalOperator lro) throws FrontendException {
P2jLogicalRelationalOperator p2j = adaptor.getToP2jOperator();
if (lro.getSchema() != null) {
Assert.assertEquals(p2j.getSchemaString(), Util.translateSchema(lro.getSchema()).toString());
}
Assert.assertEquals(p2j.getOperator(), lro.getClass().getSimpleName());
Assert.assertEquals(p2j.getAlias(), lro.getAlias());
Assert.assertEquals(p2j.getLocation().getLine(), (Integer) lro.getLocation().line());
Assert.assertEquals(p2j.getLocation().getFilename(), lro.getLocation().file());
}
private void verifyLoadAdaptor(LOLoadJsonAdaptor adaptor, LOLoad lro) {
Assert.assertTrue(adaptor.getToP2jOperator() instanceof P2jLOLoad);
P2jLOLoad load = (P2jLOLoad) adaptor.getToP2jOperator();
Assert.assertEquals(load.getStorageLocation(), lro.getFileSpec().getFileName());
String[] funcList = StringUtils.split(lro.getFileSpec().getFuncName(), ".");
Assert.assertEquals(load.getStorageFunction(), funcList[funcList.length - 1]);
}
private void verifyStoreAdaptor(LOStoreJsonAdaptor adaptor, LOStore lro) {
Assert.assertTrue(adaptor.getToP2jOperator() instanceof P2jLOStore);
P2jLOStore store = (P2jLOStore) adaptor.getToP2jOperator();
Assert.assertEquals(store.getStorageLocation(), lro.getFileSpec().getFileName());
String[] funcList = StringUtils.split(lro.getFileSpec().getFuncName(), ".");
Assert.assertEquals(store.getStorageFunction(), funcList[funcList.length - 1]);
}
private void verifySplitAdaptor(LOSplitOutputJsonAdaptor adaptor, LOSplitOutput lro) {
Assert.assertTrue(adaptor.getToP2jOperator() instanceof P2jLOSplitOutput);
P2jLOSplitOutput split = (P2jLOSplitOutput) adaptor.getToP2jOperator();
Assert.assertEquals(split.getExpression(), LogicalExpressionPlanSerializer.serialize(lro.getFilterPlan()));
}
private void verifyJoinAdaptor(LOJoinJsonAdaptor adaptor, LOJoin lro) {
Assert.assertTrue(adaptor.getToP2jOperator() instanceof P2jLOJoin);
P2jLOJoin join = (P2jLOJoin) adaptor.getToP2jOperator();
Assert.assertNotNull(join.getJoin());
}
private void verifyCogroupAdaptor(LOCogroupJsonAdaptor adaptor, LOCogroup lro) {
Assert.assertTrue(adaptor.getToP2jOperator() instanceof P2jLOCogroup);
P2jLOCogroup cogroup = (P2jLOCogroup) adaptor.getToP2jOperator();
Assert.assertNotNull(cogroup.getGroup());
}
private void verifyFilterAdaptor(LOFilterJsonAdaptor adaptor, LOFilter lro) {
Assert.assertTrue(adaptor.getToP2jOperator() instanceof P2jLOFilter);
P2jLOFilter filter = (P2jLOFilter) adaptor.getToP2jOperator();
Assert.assertEquals(filter.getExpression(), LogicalExpressionPlanSerializer.serialize(lro.getFilterPlan()));
}
private void verifyLimitAdaptor(LOLimitJsonAdaptor adaptor, LOLimit lro) {
Assert.assertTrue(adaptor.getToP2jOperator() instanceof P2jLOLimit);
P2jLOLimit limit = (P2jLOLimit) adaptor.getToP2jOperator();
Assert.assertEquals(limit.getRowLimit(), lro.getLimit());
}
private LogicalPlan getLogicalPlan(LipstickPigServer lps) throws Exception {
Field f = lps.getClass().getSuperclass().getDeclaredField("currDAG");
f.setAccessible(true);
Object graph = f.get(lps);
Method buildPlanMethod = graph.getClass().getDeclaredMethod("buildPlan", String.class);
buildPlanMethod.setAccessible(true);
buildPlanMethod.invoke(graph, new Object[] { null });
Method getPlanMethod = graph.getClass().getMethod("getPlan", String.class);
return (LogicalPlan) getPlanMethod.invoke(graph, new Object[] { null });
}
private LOJsonAdaptor getAdaptor(LogicalRelationalOperator node, LogicalPlan lp) throws FrontendException {
if (node instanceof LOLoad) {
return new LOLoadJsonAdaptor((LOLoad) node, lp);
} else if (node instanceof LOStore) {
return new LOStoreJsonAdaptor((LOStore) node, lp);
} else if (node instanceof LOSplitOutput) {
return new LOSplitOutputJsonAdaptor((LOSplitOutput) node, lp);
} else if (node instanceof LOJoin) {
return new LOJoinJsonAdaptor((LOJoin) node, lp);
} else if (node instanceof LOCogroup) {
return new LOCogroupJsonAdaptor((LOCogroup) node, lp);
} else if (node instanceof LOFilter) {
return new LOFilterJsonAdaptor((LOFilter) node, lp);
} else if (node instanceof LOLimit) {
return new LOLimitJsonAdaptor((LOLimit) node, lp);
}
return new LOJsonAdaptor(node, lp);
}
}
| 322 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/model/PersistTest.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.HashMap;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.EntityTransaction;
import javax.persistence.Persistence;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.netflix.lipstick.model.P2jPlanStatus.StatusText;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
public class PersistTest {
EntityManagerFactory emf;
EntityManager em;
@BeforeClass
public void beforeTests() {
emf = Persistence.createEntityManagerFactory("pu");
em = emf.createEntityManager();
}
@AfterClass
public void afterTests() {
em.clear();
em.close();
}
@Test
public void testP2jCounters() throws Exception {
P2jCounters counters = new P2jCounters();
counters.setId(0);
counters.setCounters(new HashMap<String, Long>());
P2jCounters counters2 = persistAndFind(counters);
compare(counters, counters2);
}
@Test
public void testP2jJobStatus() throws Exception {
P2jJobStatus jobStatus = new P2jJobStatus();
jobStatus.setCounters(new HashMap<String, P2jCounters>());
jobStatus.setId(0);
jobStatus.setIsComplete(true);
jobStatus.setIsSuccessful(true);
jobStatus.setJobId("someJobId");
jobStatus.setJobName("someJobName");
jobStatus.setMapProgress(1.0f);
jobStatus.setReduceProgress(2.0f);
jobStatus.setScope("someScope");
jobStatus.setTotalMappers(5);
jobStatus.setTotalReducers(10);
jobStatus.setTrackingUrl("someTrackingUrl");
P2jJobStatus jobStatus2 = persistAndFind(jobStatus);
compare(jobStatus, jobStatus2);
}
@Test
public void testP2jPlan() throws Exception {
P2jPlan plan = getP2jPlan();
P2jPlan plan2 = persistAndFind(plan);
compare(plan, plan2);
}
@Test
public void testP2jPlanPackage() throws Exception {
P2jPlanPackage planPackage = new P2jPlanPackage();
planPackage.setJobName("someJobName");
planPackage.setOptimized(persistAndFind(getP2jPlan()));
planPackage.setSampleOutputMap(new HashMap<String, P2jSampleOutputList>());
planPackage.setScripts(persistAndFind(getP2jScripts()));
planPackage.setStatus(persistAndFind(getP2jPlanStatus()));
planPackage.setUnoptimized(persistAndFind(getP2jPlan()));
planPackage.setUserName("someUserName");
planPackage.setUuid("someUuid");
P2jPlanPackage planPackage2 = persistAndFind(planPackage);
compare(planPackage, planPackage2);
}
@Test
public void testP2jPlanStatus() throws Exception {
P2jPlanStatus planStatus = getP2jPlanStatus();
P2jPlanStatus planStatus2 = persistAndFind(planStatus);
compare(planStatus, planStatus2);
}
@Test
public void testP2jSampleOutput() throws Exception {
P2jSampleOutput sampleOutput = getP2jSampleOutput();
P2jSampleOutput sampleOutput2 = persistAndFind(sampleOutput);
compare(sampleOutput, sampleOutput2);
}
@Test
public void testP2jSampleOutputList() throws Exception {
P2jSampleOutputList sampleOutputList = new P2jSampleOutputList();
sampleOutputList.setSampleOutputList(Lists.newArrayList(getP2jSampleOutput(), getP2jSampleOutput(), getP2jSampleOutput()));
P2jSampleOutputList sampleOutputList2 = persistAndFind(sampleOutputList);
compare(sampleOutputList, sampleOutputList2);
}
@Test
public void testP2jScripts() throws Exception {
P2jScripts scripts = getP2jScripts();
P2jScripts scripts2 = persistAndFind(scripts);
compare(scripts, scripts2);
}
private void compare(Object a, Object b) throws JsonGenerationException, JsonMappingException, IOException {
ObjectMapper mapper = new ObjectMapper();
String strA = mapper.writeValueAsString(a);
String strB = mapper.writeValueAsString(b);
Assert.assertEquals(strA, strB);
}
@SuppressWarnings("unchecked")
private <T> T persistAndFind(T obj) throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
EntityTransaction et = em.getTransaction();
et.begin();
em.persist(obj);
em.flush();
et.commit();
T ret = (T) em.find(obj.getClass(), determineId(obj));
return ret;
}
private Object determineId(Object obj) throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
for(Method m : obj.getClass().getDeclaredMethods()) {
for(Annotation a : m.getAnnotations()) {
if(a instanceof javax.persistence.Id) {
return m.invoke(obj, new Object[0]);
}
}
}
throw new RuntimeException("Unable to determine id for object.");
}
private P2jPlan getP2jPlan() {
P2jPlan plan = new P2jPlan();
plan.setPlan(new HashMap<String, P2jLogicalRelationalOperator>());
plan.setSvg("someSvg");
return plan;
}
private P2jScripts getP2jScripts() {
P2jScripts scripts = new P2jScripts();
scripts.setScript("someScript");
return scripts;
}
private P2jPlanStatus getP2jPlanStatus() {
P2jPlanStatus planStatus = new P2jPlanStatus();
planStatus.setEndTime();
planStatus.setHeartbeatTime();
planStatus.setJobStatusMap(new HashMap<String, P2jJobStatus>());
planStatus.setProgress(5);
planStatus.setStartTime();
planStatus.setStatusText(StatusText.finished);
return planStatus;
}
private P2jSampleOutput getP2jSampleOutput() {
P2jSampleOutput sampleOutput = new P2jSampleOutput();
sampleOutput.setSampleOutput("someSampleOutput");
sampleOutput.setSchemaString("someSchemaString");
return sampleOutput;
}
}
| 323 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/warnings/JobWarningsTest.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.warnings;
import static org.testng.Assert.*;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
import java.util.List;
import org.apache.pig.tools.pigstats.JobStats;
import com.google.common.collect.Lists;
public class JobWarningsTest {
@Test
public void testFindSkewedReducersNotEnoughTasks() throws Exception {
JobWarnings jw = new JobWarnings();
List<JobWarnings.ReducerDuration> reducerTimes = Lists.newLinkedList();
List<String> taskIds;
JobWarnings.ReducerDuration rd;
for (int i = 0; i < JobWarnings.MIN_REDUCERS_FOR_SKEW; i++) {
rd = new JobWarnings.ReducerDuration("Some Job", i);
reducerTimes.add(rd);
taskIds = jw.findSkewedReducers(reducerTimes);
assertEquals(0, taskIds.size());
}
/* Now that we've trie the most number of reducers w/o checking, lets
try again with a duration value that is obviously skewed. */
rd = new JobWarnings.ReducerDuration("Some Job", 10000);
reducerTimes.add(rd);
taskIds = jw.findSkewedReducers(reducerTimes);
assertEquals(1, taskIds.size());
}
@Test
public void testFindSkewedReducersSkewedReducersPresent() throws Exception {
List<JobWarnings.ReducerDuration> reducerTimes = Lists.newLinkedList();
reducerTimes.add(new JobWarnings.ReducerDuration("task_201310241542_0008_r_000005", (1382638023848l)));
reducerTimes.add(new JobWarnings.ReducerDuration("task_201310241542_0008_r_000006", (1382638023848l)));
reducerTimes.add(new JobWarnings.ReducerDuration("task_201310241542_0008_r_000007", (1382638023849l)));
reducerTimes.add(new JobWarnings.ReducerDuration("task_201310241542_0008_r_000009", (1382638023849l)));
// This should be detected as a skew
reducerTimes.add(new JobWarnings.ReducerDuration("task_201310241542_0008_r_000008", (138263802384800l)));
JobWarnings jw = new JobWarnings();
List<String> taskIds = jw.findSkewedReducers(reducerTimes);
assertEquals(1, taskIds.size());
assertEquals("task_201310241542_0008_r_000008", taskIds.get(0));
}
}
| 324 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/lipstick/pigtolipstick/BasicP2LClientTest.java |
package com.netflix.lipstick.pigtolipstick;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
import org.apache.pig.LipstickPigServer;
import org.apache.pig.impl.PigContext;
import com.netflix.lipstick.P2jPlanGenerator;
import com.netflix.lipstick.pigstatus.PigStatusClient;
import com.netflix.lipstick.model.P2jPlanPackage;
import com.netflix.lipstick.model.P2jPlan;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.MROperPlan;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceOper;
import org.apache.pig.backend.hadoop.executionengine.HExecutionEngine;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator;
import org.apache.pig.newplan.Operator;
public class BasicP2LClientTest {
/* Create Mock P2jPlanGenerator objects with methods stubbed out to
let the BasicP2LClient.createPlan() method complete, and
pass them to the client via the setter method. */
public void setMockPlanGenerators(BasicP2LClient client) {
P2jPlan pln = mock(P2jPlan.class);
P2jPlanGenerator unopt_plan = mock(P2jPlanGenerator.class);
when(unopt_plan.getP2jPlan()).thenReturn(pln);
P2jPlanGenerator opt_plan = mock(P2jPlanGenerator.class);
when(opt_plan.getP2jPlan()).thenReturn(pln);
client.setPlanGenerators(unopt_plan, opt_plan);
}
public MROperPlan makeMockMROperPlan() {
MROperPlan plan = mock(MROperPlan.class);
when(plan.iterator()).thenReturn(new ArrayList<MapReduceOper>().iterator());
return plan;
}
/* Create a Mock PigContext object with methods stubbed out to
let the BasicP2LClient.createPlan() method complete */
public void addMockPigContext(BasicP2LClient client) {
HExecutionEngine exec_engine = mock(HExecutionEngine.class);
when(exec_engine.getLogToPhyMap()).thenReturn(new HashMap<Operator, PhysicalOperator>());
PigContext ctx = mock(PigContext.class);
when(ctx.getExecutionEngine()).thenReturn(exec_engine);
when(ctx.getProperties()).thenReturn(new Properties());
client.setPigContext(ctx);
}
public void addMockPigServer(BasicP2LClient client) {
LipstickPigServer server = mock(LipstickPigServer.class);
client.setPigServer(server);
}
@Test
public void testCreatePlanClientSaved() throws Exception {
PigStatusClient status_client = mock(PigStatusClient.class);
BasicP2LClient client = new BasicP2LClient(status_client);
addMockPigContext(client);
addMockPigServer(client);
setMockPlanGenerators(client);
client.createPlan(makeMockMROperPlan());
/* the client.savePlan() method should have been called */
verify(status_client).savePlan(any(P2jPlanPackage.class));
}
@Test
public void testCreatePlanNoPigServerJustContext() throws Exception {
PigStatusClient status_client = mock(PigStatusClient.class);
BasicP2LClient client = new BasicP2LClient(status_client);
addMockPigContext(client);
setMockPlanGenerators(client);
client.createPlan(makeMockMROperPlan());
/* the client.savePlan() method should have been called */
verify(status_client).savePlan(any(P2jPlanPackage.class));
}
}
| 325 |
0 | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/dse/pig2json | Create_ds/Lipstick/lipstick-console/src/test/java/com/netflix/dse/pig2json/model/PersistTest.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dse.pig2json.model;
import java.io.IOException;
import java.io.StringWriter;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.EntityTransaction;
import javax.persistence.Persistence;
import org.apache.commons.io.IOUtils;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.netflix.lipstick.model.P2jPlanPackage;
import com.netflix.lipstick.model.P2jPlanStatus;
public class PersistTest {
EntityManagerFactory emf;
EntityManager em;
@BeforeClass
public void beforeTests() {
emf = Persistence.createEntityManagerFactory("pu");
em = emf.createEntityManager();
}
@AfterClass
public void afterTests() {
em.clear();
em.close();
}
@Test
public void persistPlan() throws JsonParseException, JsonMappingException, IOException {
ObjectMapper mapper = new ObjectMapper();
P2jPlanPackage p = mapper.readValue(PersistTest.class.getResourceAsStream("/test.json"), P2jPlanPackage.class);
EntityTransaction et = em.getTransaction();
et.begin();
em.persist(p);
em.flush();
et.commit();
P2jPlanPackage p2 = em.find(P2jPlanPackage.class, p.getId());
String j = mapper.writeValueAsString(p);
String j2 = mapper.writeValueAsString(p2);
Assert.assertEquals(j2, j);
}
@Test
public void persistStatus() throws JsonParseException, JsonMappingException, IOException {
ObjectMapper mapper = new ObjectMapper();
StringWriter writer = new StringWriter();
IOUtils.copy(PersistTest.class.getResourceAsStream("/status.json"), writer, "UTF-8");
P2jPlanStatus p = mapper.readValue(PersistTest.class.getResourceAsStream("/status.json"), P2jPlanStatus.class);
EntityTransaction et = em.getTransaction();
et.begin();
em.persist(p);
em.flush();
et.commit();
P2jPlanStatus p2 = em.find(P2jPlanStatus.class, p.getId());
String j = mapper.writeValueAsString(p);
String j2 = mapper.writeValueAsString(p2);
Assert.assertEquals(j2, j);
}
} | 326 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/org/apache | Create_ds/Lipstick/lipstick-console/src/main/java/org/apache/pig/LipstickPigServer.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.pig;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.pig.backend.executionengine.ExecException;
import org.apache.pig.backend.hadoop.executionengine.HExecutionEngine;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.MROperPlan;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.plans.PhysicalPlan;
import org.apache.pig.impl.PigContext;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import org.apache.pig.tools.pigstats.PigProgressNotificationListener;
import org.apache.pig.tools.pigstats.PigStats;
import org.apache.pig.tools.pigstats.ScriptState;
import com.netflix.lipstick.P2jPlanGenerator;
import com.netflix.lipstick.listeners.LipstickPPNL;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* PigServer with extensions to support additional notifications to LipstickPPNL.
*
* @author jmagnusson
*
*/
public class LipstickPigServer extends PigServer {
protected Map<PhysicalOperator, Operator> p2lMap;
protected LogicalPlan p2jLogicalPlan;
protected LogicalPlan p2jOptimizedLogicalPlan;
protected PhysicalPlan p2jPhysicalPlan;
protected MROperPlan p2jMapReducePlan;
protected LipstickPPNL ppnl = null;
protected P2jPlanGenerator optimizedPlanGenerator;
protected P2jPlanGenerator unoptimizedPlanGenerator;
private static final Log LOG = LogFactory.getLog(LipstickPigServer.class);
/**
* Constructs a LipstickPigServer with the given execType and properties.
* Initializes any LipstickPPNLs that the ScriptState is aware of.
*
* @param execType
* @param properties
* @throws ExecException
*/
public LipstickPigServer(ExecType execType, Properties properties) throws ExecException {
super(execType, properties);
init();
}
/**
* Constructs a LipstickPigServer with the given execType.
* Initializes any LipstickPPNLs that the ScriptState is aware of.
* @param execType
* @throws ExecException
*/
public LipstickPigServer(ExecType execType) throws ExecException {
super(execType);
init();
}
/**
* Constructs a LipstickPigServer with the given context and connect settings.
* Initializes any LipstickPPNLs that the ScriptState is aware of.
* @param context
* @param connect
* @throws ExecException
*/
public LipstickPigServer(PigContext context, boolean connect) throws ExecException {
super(context, connect);
init();
}
/**
* Constructs a LipstickPigServer with the given context.
* Initializes any LipstickPPNLs that the ScriptState is aware of.
* @param context
* @throws ExecException
*/
public LipstickPigServer(PigContext context) throws ExecException {
super(context);
init();
}
/**
* Constructs a LipstickPigServer with the given execTypeString.
* Initializes any LipstickPPNLs that the ScriptState is aware of.
* @param execTypeString
* @throws ExecException
* @throws IOException
*/
public LipstickPigServer(String execTypeString) throws IOException {
super(execTypeString);
init();
}
/**
* Initializes any LipstickPPNLs that the ScriptState is aware of.
*/
protected void init() {
List<PigProgressNotificationListener> listeners = ScriptState.get().getAllListeners();
for (PigProgressNotificationListener l : listeners) {
if (l instanceof LipstickPPNL) {
ppnl = (LipstickPPNL) l;
ppnl.setPigServer(this);
}
}
}
/**
* Launches the given PhysicalPlan as well as sets the plan generators
* for the LipstickPPNL(s).
*/
@Override
protected PigStats launchPlan(LogicalPlan lp, String jobName) throws ExecException, FrontendException {
if (ppnl != null) {
try {
// Get optimized plan by compiling it with the appropriate execution engine
LOG.info("Compiling and optimizing logical plan...");
((HExecutionEngine)getPigContext().getExecutionEngine()).compile(lp, getPigContext().getProperties());
optimizedPlanGenerator = new P2jPlanGenerator(lp);
LOG.info("Finished compiling and optimizing logical plan");
ppnl.setPlanGenerators(unoptimizedPlanGenerator, optimizedPlanGenerator);
} catch (IOException e) {
e.printStackTrace();
}
}
return super.launchPlan(lp, jobName);
}
public List<String> getScriptCache() {
return getCurrentDAG().getScriptCache();
}
/**
* Takes advantage of the fact that parseAndBuild gets called by
* executeBatch <b>before</b> the logical plan is optimized
*/
@Override
public void parseAndBuild() throws IOException {
super.parseAndBuild();
unoptimizedPlanGenerator = new P2jPlanGenerator(getCurrentDAG().getLogicalPlan());
}
/**
* Returns the LogicalPlan contained in the current DAG with the given alias.
*
* @param alias
* @return
* @throws IOException
*/
public LogicalPlan getLP(String alias) throws IOException {
return getCurrentDAG().getPlan(alias);
}
}
| 327 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/org/apache/pig/tools | Create_ds/Lipstick/lipstick-console/src/main/java/org/apache/pig/tools/grunt/LipstickGrunt.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.pig.tools.grunt;
import java.io.BufferedReader;
import org.apache.pig.LipstickPigServer;
import org.apache.pig.backend.executionengine.ExecException;
import org.apache.pig.impl.PigContext;
public class LipstickGrunt extends Grunt {
public LipstickGrunt(BufferedReader in, PigContext pigContext) throws ExecException {
super(in, pigContext);
this.pig = new LipstickPigServer(pigContext);
if (this.in != null) {
parser = new GruntParser(this.in, pig);
}
}
}
| 328 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/P2jPlanGenerator.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.logical.relational.LOCogroup;
import org.apache.pig.newplan.logical.relational.LOFilter;
import org.apache.pig.newplan.logical.relational.LOJoin;
import org.apache.pig.newplan.logical.relational.LOLimit;
import org.apache.pig.newplan.logical.relational.LOLoad;
import org.apache.pig.newplan.logical.relational.LOSplitOutput;
import org.apache.pig.newplan.logical.relational.LOStore;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.lipstick.adaptors.LOCogroupJsonAdaptor;
import com.netflix.lipstick.adaptors.LOFilterJsonAdaptor;
import com.netflix.lipstick.adaptors.LOJoinJsonAdaptor;
import com.netflix.lipstick.adaptors.LOJsonAdaptor;
import com.netflix.lipstick.adaptors.LOLimitJsonAdaptor;
import com.netflix.lipstick.adaptors.LOLoadJsonAdaptor;
import com.netflix.lipstick.adaptors.LOSplitOutputJsonAdaptor;
import com.netflix.lipstick.adaptors.LOStoreJsonAdaptor;
import com.netflix.lipstick.model.P2jPlan;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
/**
* Generate a P2jPlan given a LogicalPlan.
*
* @author jmagnusson
*/
public class P2jPlanGenerator {
/** Mapping of logical operator to p2j operator id. */
protected Map<Operator, String> reverseMap;
/** The p2j plan. */
protected P2jPlan p2jPlan;
/**
* Instantiates a new p2j plan generator. Generates reverseMap and p2jPlan.
*
* @param lp
* the logical plan
* @throws FrontendException
* the frontend exception
*/
public P2jPlanGenerator(LogicalPlan lp) throws FrontendException {
reverseMap = generateReverseMap(lp);
Map<String, P2jLogicalRelationalOperator> nodeMap = Maps.newHashMap();
for (Entry<Operator, String> entry : reverseMap.entrySet()) {
nodeMap.put(entry.getValue(), convertNodeToP2j((LogicalRelationalOperator) entry.getKey(), lp, reverseMap));
}
p2jPlan = new P2jPlan(nodeMap);
}
/**
* Gets the reverse map of logical operators to p2j operator ids.
*
* @return the reverse map
*/
public Map<Operator, String> getReverseMap() {
return reverseMap;
}
/**
* Gets the p2j plan.
*
* @return the p2j plan
*/
public P2jPlan getP2jPlan() {
return p2jPlan;
}
/**
* Generate reverse mapping of logical operators to p2j operator id.
*
* @param lp
* the logical plan
* @return the reverse map
*/
protected Map<Operator, String> generateReverseMap(LogicalPlan lp) {
Map<Operator, String> map = Maps.newLinkedHashMap();
Integer counter = 1;
Iterator<Operator> ops = lp.getOperators();
while (ops.hasNext()) {
map.put(ops.next(), counter.toString());
counter++;
}
return map;
}
/**
* Given a list of logical operators, return the list of p2j opeartor ids.
* they represent
*
* @param nodes
* list of logical operators
* @param reverseMap
* mapping of logical operator to p2j operator ids
* @return the list of p2j operator ids
*/
protected List<String> generateP2jIdList(List<Operator> nodes, Map<Operator, String> reverseMap) {
List<String> chain = Lists.newLinkedList();
if (nodes != null && !nodes.isEmpty()) {
for (Operator i : nodes) {
chain.add(reverseMap.get(i));
}
}
return chain;
}
/**
* Convert a LogicalRelationalOperator to a P2jLogicalRelationalOperator.
*
* @param node
* the LogicalRelationalOperator to convert
* @param lp
* the LogicalPlan
* @param reverseMap
* the mapping of logical operator to
* P2jLogicalRelationalOperator id
* @return a P2jLogicalRelationalOperator representing the node passed in
* @throws FrontendException
*
*/
protected P2jLogicalRelationalOperator convertNodeToP2j(LogicalRelationalOperator node,
LogicalPlan lp,
Map<Operator, String> reverseMap) throws FrontendException {
P2jLogicalRelationalOperator p2jNode = convertNodeToAdaptor(node, lp).getToP2jOperator();
p2jNode.setPredecessors(generateP2jIdList(lp.getPredecessors(node), reverseMap));
p2jNode.setSuccessors(generateP2jIdList(lp.getSuccessors(node), reverseMap));
p2jNode.setUid(reverseMap.get(node));
return p2jNode;
}
/**
* Convert a LogicalRelationalOperator to an LOJsonAdaptor.
*
* @param node
* the LogicalRelationalOperator to convert
* @param lp
* the LogicalPlan containing node
* @return a LOJsonAdaptor representing node
* @throws FrontendException
*/
protected LOJsonAdaptor convertNodeToAdaptor(LogicalRelationalOperator node, LogicalPlan lp)
throws FrontendException {
if (node instanceof LOLoad) {
return new LOLoadJsonAdaptor((LOLoad) node, lp);
} else if (node instanceof LOStore) {
return new LOStoreJsonAdaptor((LOStore) node, lp);
} else if (node instanceof LOSplitOutput) {
return new LOSplitOutputJsonAdaptor((LOSplitOutput) node, lp);
} else if (node instanceof LOJoin) {
return new LOJoinJsonAdaptor((LOJoin) node, lp);
} else if (node instanceof LOCogroup) {
return new LOCogroupJsonAdaptor((LOCogroup) node, lp);
} else if (node instanceof LOFilter) {
return new LOFilterJsonAdaptor((LOFilter) node, lp);
} else if (node instanceof LOLimit) {
return new LOLimitJsonAdaptor((LOLimit) node, lp);
}
return new LOJsonAdaptor(node, lp);
}
}
| 329 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/Pig2DotGenerator.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.apache.pig.impl.util.Utils;
import org.apache.pig.parser.ParserException;
import org.kohsuke.graphviz.Edge;
import org.kohsuke.graphviz.Graph;
import org.kohsuke.graphviz.Node;
import org.kohsuke.graphviz.Style;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.googlecode.jatl.Html;
import com.netflix.lipstick.model.P2jPlan;
import com.netflix.lipstick.model.operators.P2jLOCogroup;
import com.netflix.lipstick.model.operators.P2jLOFilter;
import com.netflix.lipstick.model.operators.P2jLOJoin;
import com.netflix.lipstick.model.operators.P2jLOLimit;
import com.netflix.lipstick.model.operators.P2jLOLoad;
import com.netflix.lipstick.model.operators.P2jLOSplitOutput;
import com.netflix.lipstick.model.operators.P2jLOStore;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator.Join;
import com.netflix.lipstick.model.operators.elements.JoinExpression;
import com.netflix.lipstick.model.operators.elements.SchemaElement;
/**
* Transforms P2jPlan into graphviz output.
*
* @author jmagnusson
*
*/
public class Pig2DotGenerator {
private static final Log LOG = LogFactory.getLog(Pig2DotGenerator.class);
protected static final String BG_CLUSTER = "#E9E9E9";
protected static final String BG_ALIAS = "#424242";
protected static final String BG_EXPRESSION = "#BCBCBC";
protected static final String BG_MAP_TASK = "#3299BB";
protected static final String BG_RED_TASK = "#FF9900";
protected static final String BG_UNK_TASK = "#BF0A0D";
protected static final String BG_WHITE = "#FFFFFF";
protected Map<String, P2jLogicalRelationalOperator> p2jMap = null;
protected P2jPlan p2jPlan;
/**
* Instantiates a new Pig2DotGenerator.
*
* @param p2jPlan the P2jPlan to convert to dot / graphical format
*/
public Pig2DotGenerator(P2jPlan p2jPlan) {
this.p2jPlan = p2jPlan;
p2jMap = p2jPlan.getPlan();
}
/**
* Returns the column span for the html representation of a logical operator.
*
* @param oper the logical operator
* @return a string representing the column span
*/
protected String getColspan(P2jLogicalRelationalOperator oper) {
Integer colspan = null;
String operator = oper.getOperator().toLowerCase();
if (operator.equals("lojoin") || operator.equals("locogroup")) {
Join join;
if (operator.equals("lojoin")) {
join = ((P2jLOJoin) oper).getJoin();
} else {
join = ((P2jLOCogroup) oper).getGroup();
}
colspan = join.getExpression().size();
}
return colspan != null ? colspan.toString() : "2";
}
/**
* Returns the header color for a logical operator based on MR step type.
*
* @param oper the logical operator
* @return a string representing the header color
*/
protected String getJobColor(P2jLogicalRelationalOperator oper) {
if (oper.getMapReduce() != null) {
String stepType = oper.getMapReduce().getStepType();
if (stepType.equals("MAPPER")) {
return BG_MAP_TASK;
}
if (stepType.equals("REDUCER")) {
return BG_RED_TASK;
}
}
return BG_UNK_TASK;
}
/**
* Checks if a logical operator's schema is equal to that of its predecessor.
*
* @param oper the logical operator
* @return True if the schemas are equal, otherwise False
*/
protected Boolean schemaEqualsPredecessor(P2jLogicalRelationalOperator oper) {
if (oper.getSchemaString() != null) {
String operString = oper.getSchemaString();
for (String predName : oper.getPredecessors()) {
P2jLogicalRelationalOperator pred = p2jMap.get(predName);
try {
if (pred.getSchemaString() != null) {
String predString = pred.getSchemaString();
if (!Schema.equals(Utils.getSchemaFromBagSchemaString(predString),
Utils.getSchemaFromBagSchemaString(operString),
true,
false)) {
return false;
}
}
} catch (ParserException e) {
LOG.warn("Error comparing operator predecessors: ", e);
return false;
}
}
return true;
}
return false;
}
/**
* Check if the schema should be displayed for a logical operator.
*
* @param oper the logical operator
* @return a boolean indicating whether or not the schema should be displayed
*/
protected Boolean displaySchema(P2jLogicalRelationalOperator oper) {
if (oper.getLocation().getLine() != null
&& !schemaEqualsPredecessor(oper)
&& !oper.getOperator().equalsIgnoreCase("LOSplit")
&& !oper.getOperator().equalsIgnoreCase("LOFilter")
&& !oper.getOperator().equalsIgnoreCase("LODistinct")
&& !oper.getOperator().equalsIgnoreCase("LOLimit")
&& !oper.getOperator().equalsIgnoreCase("LOJoin")
&& !oper.getOperator().equalsIgnoreCase("LOCogroup")) {
return true;
}
return false;
}
/**
* Append the html representation of a logical operator's schema to preexisting html.
*
* @param html the html to append to
* @param oper the logical operator
* @return the appended html
*/
protected Html genSchema(Html html, P2jLogicalRelationalOperator oper) {
if (displaySchema(oper)) {
if (oper.getSchema() != null) {
Iterator<SchemaElement> iter = oper.getSchema().iterator();
while (iter.hasNext()) {
SchemaElement e = iter.next();
html.tr().bgcolor(BG_WHITE);
html.td().bgcolor(BG_WHITE).text(e.getAlias() != null ? e.getAlias() : "?").end();
html.td().bgcolor(BG_WHITE).text(e.getType()).end();
html.end();
}
}
}
return html;
}
/**
* Append the html representation of join expressions to some preexisting html.
*
* @param html the html to append to
* @param oper the logical operator
* @return the appened html
*/
protected Html genJoinExpressions(Html html, P2jLogicalRelationalOperator oper) {
if (oper.getOperator().equalsIgnoreCase("LOJoin") || oper.getOperator().equalsIgnoreCase("LOCogroup")) {
Join join;
if (oper.getOperator().equalsIgnoreCase("LOJoin")) {
join = ((P2jLOJoin) oper).getJoin();
} else {
join = ((P2jLOCogroup) oper).getGroup();
}
Set<Entry<String, JoinExpression>> expressions = join.getExpression().entrySet();
List<List<String>> exp = Lists.newArrayList();
for (Entry<String, JoinExpression> entry : expressions) {
exp.add(entry.getValue().getFields());
}
if (expressions.size() > 1) {
html.tr();
for (Entry<String, JoinExpression> entry : expressions) {
html.td().bgcolor(BG_EXPRESSION).text(entry.getKey() == null ? "null" : entry.getKey()).end();
}
html.end();
}
for (int i = 0; i < exp.get(0).size(); i++) {
html.tr();
for (int j = 0; j < exp.size(); j++) {
html.td().bgcolor(BG_WHITE).text(exp.get(j).get(i)).end();
}
html.end();
}
html.end();
}
return html;
}
/**
* Generate additional info for a logical operator's node header.
*
* @param oper the logical operator
* @return a string containing additional header text
*/
protected String getAdditionalInfo(P2jLogicalRelationalOperator oper) {
if (oper.getLocation().getMacro().size() > 0) {
return "MACRO: " + oper.getLocation().getMacro().get(0);
}
if (oper.getOperator().equalsIgnoreCase("LOLimit")) {
return Long.toString(((P2jLOLimit) oper).getRowLimit());
}
if (oper.getOperator().equalsIgnoreCase("LOJoin")) {
P2jLOJoin loj = (P2jLOJoin) oper;
return loj.getJoin().getType() + ", " + loj.getJoin().getStrategy();
}
return "";
}
/**
* Append a row containing misc data for a logical operator to preexisting html.
*
* @param html the html to append to
* @param oper the logical operator
* @return the appended html
*/
protected Html genMiscRow(Html html, P2jLogicalRelationalOperator oper) {
String expression = null;
if (oper.getOperator().equalsIgnoreCase("LOFilter")) {
expression = ((P2jLOFilter) oper).getExpression();
}
if (oper.getOperator().equalsIgnoreCase("LOSplitOutput")) {
expression = ((P2jLOSplitOutput) oper).getExpression();
}
if (expression != null) {
html.tr().td().colspan(getColspan(oper)).bgcolor(BG_EXPRESSION);
html.text(expression).end(2);
}
String storageLocation = null;
String storageFunction = null;
if (oper.getOperator().equalsIgnoreCase("LOStore")) {
storageLocation = ((P2jLOStore) oper).getStorageLocation();
storageFunction = ((P2jLOStore) oper).getStorageFunction();
} else if (oper.getOperator().equalsIgnoreCase("LOLoad")) {
storageLocation = ((P2jLOLoad) oper).getStorageLocation();
storageFunction = ((P2jLOLoad) oper).getStorageFunction();
}
if (storageLocation != null) {
html.tr().td().colspan(getColspan(oper)).bgcolor(BG_EXPRESSION);
html.text(storageLocation).end(2);
html.tr().td().colspan(getColspan(oper)).bgcolor(BG_EXPRESSION);
html.text(storageFunction).end(2);
}
return html;
}
/**
* Append a row describing the operation type of a logical operator to preexisting html.
*
* @param html the html to append to
* @param oper the logical operator
* @return the appended html
*/
protected Html genOperationRow(Html html, P2jLogicalRelationalOperator oper) {
String additionalInfo = getAdditionalInfo(oper);
if (additionalInfo.length() > 0) {
additionalInfo = " (" + additionalInfo + ")";
}
html.tr().td().colspan(getColspan(oper)).bgcolor(getJobColor(oper));
String op;
if (oper.getOperator().equalsIgnoreCase("LOCogroup")
&& ((P2jLOCogroup) oper).getGroup().getExpression().size() < 2) {
op = "GROUP";
} else {
op = oper.getOperator().substring(2).toUpperCase();
}
html.text(op + additionalInfo).end(2);
return html;
}
/**
* Append a row containing the pig alias responsible for a logical operator to preexisting html.
*
* @param html the html to append to
* @param oper the logical operator
* @return the appended html
*/
protected Html genAliasRow(Html html, P2jLogicalRelationalOperator oper) {
if (oper.getAlias() != null && !oper.getOperator().equalsIgnoreCase("LOSplit")) {
html.tr().td().colspan(getColspan(oper)).bgcolor(BG_ALIAS).font().color("#FFFFFF");
html.text(oper.getAlias()).end(3);
}
return html;
}
/**
* Generate the html describing a logical operator.
*
* @param oper the logical operator
* @return a string representation of the html
*/
protected String genNodeHtml(P2jLogicalRelationalOperator oper) {
StringWriter writer = new StringWriter();
Html html = new Html(writer);
html.font().attr("point-size", "12").table().border("0").attr("cellborder", "1").cellspacing("0");
genOperationRow(html, oper);
genMiscRow(html, oper);
genAliasRow(html, oper);
genJoinExpressions(html, oper);
genSchema(html, oper);
html.endAll();
return writer.toString();
}
/**
* Put attributes on a node in the graph (id, html, shape) based on logical operator.
*
* @param node the node
* @param oper the logical operator
*/
protected void attributeGraphNode(Node node, P2jLogicalRelationalOperator oper) {
node.id(oper.getUid());
node.attr("id", oper.getUid());
node.attr("html", genNodeHtml(oper));
node.attr("shape", "none");
}
/**
* Append a node to the proper subgraph based on map/reduce job.
*
* @param subgraphs map of M/R job scope to subgraph
* @param node the graph node to append
* @param oper the logical operator associated with the graph node
* @return a boolean indicating whether the node was appended to a subgraph
*/
protected Boolean appendToSubgraph(Map<String, Graph> subgraphs, Node node, P2jLogicalRelationalOperator oper) {
String jid = null;
if (oper.getMapReduce() != null && oper.getMapReduce().getJobId() != null) {
jid = oper.getMapReduce().getJobId();
if (!subgraphs.containsKey(jid)) {
Graph g = new Graph();
g.id("cluster_" + jid.replaceAll("-", ""));
g.attr("bgcolor", BG_CLUSTER);
Style s = new Style();
s.attr("rounded");
s.attr("filled");
g.style(s);
subgraphs.put(jid, g);
}
subgraphs.get(jid).node(node);
return false;
}
return false;
}
/**
* Generate a graph object for the logical plan.
*
* @return the graph object
*/
protected Graph generateGraph() {
Graph gv = new Graph();
Map<String, Graph> subgraphs = Maps.newHashMap();
gv.attr("rankdir", "TB");
Map<P2jLogicalRelationalOperator, Node> graphMap = Maps.newHashMap();
for (Entry<String, P2jLogicalRelationalOperator> e : p2jMap.entrySet()) {
Node node = new Node();
graphMap.put(e.getValue(), node);
}
for (Entry<P2jLogicalRelationalOperator, Node> e : graphMap.entrySet()) {
Node node = e.getValue();
attributeGraphNode(node, e.getKey());
if (!appendToSubgraph(subgraphs, node, e.getKey())) {
gv.node(node);
}
for (String i : e.getKey().getSuccessors()) {
P2jLogicalRelationalOperator dst = p2jMap.get(i);
Edge edge = new Edge(node, graphMap.get(dst));
gv.edge(edge);
}
}
for (Entry<String, Graph> sg : subgraphs.entrySet()) {
gv.subGraph(sg.getValue());
}
return gv;
}
/**
* Generate a dot representation of the P2jPlan in the specified format.
*
* @param format the format
* @return a string representation of the plan in the format specified
* @throws InterruptedException an interrupted exception
* @throws IOException Signals that an I/O exception has occurred.
*/
public String generatePlan(String format) throws InterruptedException, IOException {
LOG.info("Generating script graphic of type " + format);
ByteArrayOutputStream os = new ByteArrayOutputStream();
List<String> args = Lists.newArrayList();
args.add("dot");
args.add("-T" + format);
Graph g = generateGraph();
g.generateTo(args, os);
return os.toString();
}
}
| 330 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/Main.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.StringReader;
import java.text.ParseException;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import jline.ConsoleReader;
import jline.ConsoleReaderInputStream;
import jline.History;
import org.antlr.runtime.RecognitionException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import org.apache.pig.LoadFunc;
import org.apache.pig.PigException;
import org.apache.pig.PigConfiguration;
import org.apache.pig.PigRunner.ReturnCode;
import org.apache.pig.backend.executionengine.ExecException;
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
import org.apache.pig.classification.InterfaceAudience;
import org.apache.pig.classification.InterfaceStability;
import org.apache.pig.impl.PigContext;
import org.apache.pig.impl.PigImplConstants;
import org.apache.pig.impl.io.FileLocalizer;
import org.apache.pig.impl.util.JarManager;
import org.apache.pig.impl.util.LogUtils;
import org.apache.pig.impl.util.ObjectSerializer;
import org.apache.pig.impl.util.PropertiesUtil;
import org.apache.pig.impl.util.UDFContext;
import org.apache.pig.impl.util.Utils;
import org.apache.pig.parser.DryRunGruntParser;
import org.apache.pig.scripting.ScriptEngine;
import org.apache.pig.scripting.ScriptEngine.SupportedScriptLang;
import org.apache.pig.tools.cmdline.CmdLineParser;
import org.apache.pig.tools.grunt.LipstickGrunt;
import org.apache.pig.tools.pigstats.PigProgressNotificationListener;
import org.apache.pig.tools.pigstats.PigStats;
import org.apache.pig.tools.pigstats.PigStatsUtil;
import org.apache.pig.tools.pigstats.ScriptState;
import org.apache.pig.tools.timer.PerformanceTimerFactory;
import com.google.common.io.CharStreams;
/**
* Main class for Pig engine.
*/
@InterfaceAudience.LimitedPrivate({"Oozie"})
@InterfaceStability.Stable
public class Main {
private final static Log log = LogFactory.getLog(Main.class);
private static final String LOG4J_CONF = "log4jconf";
private static final String BRIEF = "brief";
private static final String DEBUG = "debug";
private static final String VERBOSE = "verbose";
private static final String version;
private static final String majorVersion;
private static final String minorVersion;
private static final String patchVersion;
private static final String svnRevision;
private static final String buildTime;
private enum ExecMode {STRING, FILE, SHELL, UNKNOWN}
private static final String PROP_FILT_SIMPL_OPT
= "pig.exec.filterLogicExpressionSimplifier";
static {
try {
log.info("\n"
+ CharStreams.toString(new InputStreamReader(Main.class.getResourceAsStream("/lipstick_build.txt"),
"UTF-8")));
} catch (IOException e) {
}
}
protected static final String PROGRESS_NOTIFICATION_LISTENER_KEY = "pig.notification.listener";
protected static final String PROGRESS_NOTIFICATION_LISTENER_ARG_KEY = "pig.notification.listener.arg";
static {
Attributes attr=null;
try {
String findContainingJar = JarManager.findContainingJar(Main.class);
if (findContainingJar != null) {
JarFile jar = new JarFile(findContainingJar);
final Manifest manifest = jar.getManifest();
final Map<String,Attributes> attrs = manifest.getEntries();
attr = attrs.get("org/apache/pig");
} else {
log.info("Unable to read pigs manifest file as we are not running from a jar, version information unavailable");
}
} catch (Exception e) {
log.warn("Unable to read pigs manifest file, version information unavailable", e);
}
if (attr!=null) {
version = attr.getValue("Implementation-Version");
svnRevision = attr.getValue("Svn-Revision");
buildTime = attr.getValue("Build-TimeStamp");
String[] split = version.split("\\.");
majorVersion=split[0];
minorVersion=split[1];
patchVersion=split[2];
} else {
version=null;
majorVersion=null;
minorVersion=null;
patchVersion=null;
svnRevision=null;
buildTime=null;
}
}
/**
* The Main-Class for the Pig Jar that will provide a shell and setup a classpath appropriate
* for executing Jar files. Warning, this method calls System.exit().
*
* @param args
* -jar can be used to add additional jar files (colon separated). - will start a
* shell. -e will execute the rest of the command line as if it was input to the
* shell.
* @throws IOException
*/
public static void main(String args[]) {
System.exit(run(args, null));
}
static int run(String args[], PigProgressNotificationListener listener) {
int rc = 1;
boolean verbose = false;
boolean gruntCalled = false;
boolean deleteTempFiles = true;
String logFileName = null;
try {
Configuration conf = new Configuration(false);
GenericOptionsParser parser = new GenericOptionsParser(conf, args);
conf = parser.getConfiguration();
Properties properties = new Properties();
PropertiesUtil.loadDefaultProperties(properties);
properties.putAll(ConfigurationUtil.toProperties(conf));
if (listener == null) {
listener = makeListener(properties);
}
String[] pigArgs = parser.getRemainingArgs();
boolean userSpecifiedLog = false;
boolean checkScriptOnly = false;
BufferedReader pin = null;
boolean debug = false;
boolean dryrun = false;
boolean embedded = false;
List<String> params = new ArrayList<String>();
List<String> paramFiles = new ArrayList<String>();
HashSet<String> disabledOptimizerRules = new HashSet<String>();
CmdLineParser opts = new CmdLineParser(pigArgs);
opts.registerOpt('4', "log4jconf", CmdLineParser.ValueExpected.REQUIRED);
opts.registerOpt('b', "brief", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('c', "check", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('d', "debug", CmdLineParser.ValueExpected.REQUIRED);
opts.registerOpt('e', "execute", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('f', "file", CmdLineParser.ValueExpected.REQUIRED);
opts.registerOpt('g', "embedded", CmdLineParser.ValueExpected.REQUIRED);
opts.registerOpt('h', "help", CmdLineParser.ValueExpected.OPTIONAL);
opts.registerOpt('i', "version", CmdLineParser.ValueExpected.OPTIONAL);
opts.registerOpt('l', "logfile", CmdLineParser.ValueExpected.REQUIRED);
opts.registerOpt('m', "param_file", CmdLineParser.ValueExpected.OPTIONAL);
opts.registerOpt('p', "param", CmdLineParser.ValueExpected.OPTIONAL);
opts.registerOpt('r', "dryrun", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('t', "optimizer_off", CmdLineParser.ValueExpected.REQUIRED);
opts.registerOpt('v', "verbose", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('w', "warning", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('x', "exectype", CmdLineParser.ValueExpected.REQUIRED);
opts.registerOpt('F', "stop_on_failure", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('M', "no_multiquery", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('N', "no_fetch", CmdLineParser.ValueExpected.NOT_ACCEPTED);
opts.registerOpt('P', "propertyFile", CmdLineParser.ValueExpected.REQUIRED);
ExecMode mode = ExecMode.UNKNOWN;
String file = null;
String engine = null;
// set up client side system properties in UDF context
UDFContext.getUDFContext().setClientSystemProps(properties);
char opt;
while ((opt = opts.getNextOpt()) != CmdLineParser.EndOfOpts) {
switch (opt) {
case '4':
String log4jconf = opts.getValStr();
if(log4jconf != null){
properties.setProperty(LOG4J_CONF, log4jconf);
}
break;
case 'b':
properties.setProperty(BRIEF, "true");
break;
case 'c':
checkScriptOnly = true;
break;
case 'd':
String logLevel = opts.getValStr();
if (logLevel != null) {
properties.setProperty(DEBUG, logLevel);
}
debug = true;
break;
case 'e':
mode = ExecMode.STRING;
break;
case 'f':
mode = ExecMode.FILE;
file = opts.getValStr();
break;
case 'g':
embedded = true;
engine = opts.getValStr();
break;
case 'F':
properties.setProperty("stop.on.failure", ""+true);
break;
case 'h':
String topic = opts.getValStr();
if (topic != null)
if (topic.equalsIgnoreCase("properties"))
printProperties();
else{
System.out.println("Invalide help topic - " + topic);
usage();
}
else
usage();
return ReturnCode.SUCCESS;
case 'i':
System.out.println(getVersionString());
return ReturnCode.SUCCESS;
case 'l':
//call to method that validates the path to the log file
//and sets up the file to store the client side log file
String logFileParameter = opts.getValStr();
if (logFileParameter != null && logFileParameter.length() > 0) {
logFileName = validateLogFile(logFileParameter, null);
} else {
logFileName = validateLogFile(logFileName, null);
}
userSpecifiedLog = true;
properties.setProperty("pig.logfile", (logFileName == null? "": logFileName));
break;
case 'm':
paramFiles.add(opts.getValStr());
break;
case 'M':
// turns off multiquery optimization
properties.setProperty("opt.multiquery",""+false);
break;
case 'N':
properties.setProperty(PigConfiguration.OPT_FETCH,""+false);
break;
case 'p':
params.add(opts.getValStr());
break;
case 'r':
// currently only used for parameter substitution
// will be extended in the future
dryrun = true;
break;
case 't':
disabledOptimizerRules.add(opts.getValStr());
break;
case 'v':
properties.setProperty(VERBOSE, ""+true);
verbose = true;
break;
case 'w':
properties.setProperty("aggregate.warning", ""+false);
break;
case 'x':
properties.setProperty("exectype", opts.getValStr());
break;
case 'P':
{
InputStream inputStream = null;
try {
FileLocalizer.FetchFileRet localFileRet = FileLocalizer.fetchFile(properties, opts.getValStr());
inputStream = new BufferedInputStream(new FileInputStream(localFileRet.file));
properties.load(inputStream) ;
} catch (IOException e) {
throw new RuntimeException("Unable to parse properties file '" + opts.getValStr() + "'");
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
}
}
}
}
break;
default: {
Character cc = Character.valueOf(opt);
throw new AssertionError("Unhandled option " + cc.toString());
}
}
}
// create the context with the parameter
PigContext pigContext = new PigContext(properties);
// create the static script state object
ScriptState scriptState = pigContext.getExecutionEngine().instantiateScriptState();
String commandLine = LoadFunc.join((AbstractList<String>)Arrays.asList(args), " ");
scriptState.setCommandLine(commandLine);
if (listener != null) {
scriptState.registerListener(listener);
} else {
scriptState.registerListener(new com.netflix.lipstick.listeners.LipstickPPNL());
}
ScriptState.start(scriptState);
pigContext.getProperties().setProperty("pig.cmd.args", commandLine);
if(logFileName == null && !userSpecifiedLog) {
logFileName = validateLogFile(properties.getProperty("pig.logfile"), null);
}
pigContext.getProperties().setProperty("pig.logfile", (logFileName == null? "": logFileName));
// configure logging
configureLog4J(properties, pigContext);
log.info(getVersionString().replace("\n", ""));
if(logFileName != null) {
log.info("Logging error messages to: " + logFileName);
}
deleteTempFiles = Boolean.valueOf(properties.getProperty(
PigConfiguration.PIG_DELETE_TEMP_FILE, "true"));
if( ! Boolean.valueOf(properties.getProperty(PROP_FILT_SIMPL_OPT, "false"))){
//turn off if the user has not explicitly turned on this optimization
disabledOptimizerRules.add("FilterLogicExpressionSimplifier");
}
pigContext.getProperties().setProperty(PigImplConstants.PIG_OPTIMIZER_RULES_KEY,
ObjectSerializer.serialize(disabledOptimizerRules));
PigContext.setClassLoader(pigContext.createCl(null));
// construct the parameter substitution preprocessor
LipstickGrunt grunt = null;
BufferedReader in;
String substFile = null;
paramFiles = fetchRemoteParamFiles(paramFiles, properties);
pigContext.setParams(params);
pigContext.setParamFiles(paramFiles);
switch (mode) {
case FILE: {
String remainders[] = opts.getRemainingArgs();
if (remainders != null) {
pigContext.getProperties().setProperty(PigContext.PIG_CMD_ARGS_REMAINDERS,
ObjectSerializer.serialize(remainders));
}
FileLocalizer.FetchFileRet localFileRet = FileLocalizer.fetchFile(properties, file);
if (localFileRet.didFetch) {
properties.setProperty("pig.jars.relative.to.dfs", "true");
}
scriptState.setFileName(file);
if (embedded) {
return runEmbeddedScript(pigContext, localFileRet.file.getPath(), engine);
} else {
SupportedScriptLang type = determineScriptType(localFileRet.file.getPath());
if (type != null) {
return runEmbeddedScript(pigContext, localFileRet.file
.getPath(), type.name().toLowerCase());
}
}
//Reader is created by first loading "pig.load.default.statements" or .pigbootup file if available
in = new BufferedReader(new InputStreamReader(Utils.getCompositeStream(new FileInputStream(localFileRet.file), properties)));
// run parameter substitution preprocessor first
substFile = file + ".substituted";
pin = runParamPreprocessor(pigContext, in, substFile, debug || dryrun || checkScriptOnly);
if (dryrun) {
if (dryrun(substFile, pigContext)) {
log.info("Dry run completed. Substituted pig script is at "
+ substFile
+ ". Expanded pig script is at "
+ file + ".expanded");
} else {
log.info("Dry run completed. Substituted pig script is at "
+ substFile);
}
return ReturnCode.SUCCESS;
}
logFileName = validateLogFile(logFileName, file);
pigContext.getProperties().setProperty("pig.logfile", (logFileName == null? "": logFileName));
// Set job name based on name of the script
pigContext.getProperties().setProperty(PigContext.JOB_NAME,
"PigLatin:" +new File(file).getName()
);
if (!debug) {
new File(substFile).deleteOnExit();
}
scriptState.setScript(new File(file));
grunt = new LipstickGrunt(pin, pigContext);
gruntCalled = true;
if(checkScriptOnly) {
grunt.checkScript(substFile);
System.err.println(file + " syntax OK");
rc = ReturnCode.SUCCESS;
} else {
int results[] = grunt.exec();
rc = getReturnCodeForStats(results);
}
return rc;
}
case STRING: {
if(checkScriptOnly) {
System.err.println("ERROR:" +
"-c (-check) option is only valid " +
"when executing pig with a pig script file)");
return ReturnCode.ILLEGAL_ARGS;
}
// Gather up all the remaining arguments into a string and pass them into
// grunt.
StringBuffer sb = new StringBuffer();
String remainders[] = opts.getRemainingArgs();
for (int i = 0; i < remainders.length; i++) {
if (i != 0) sb.append(' ');
sb.append(remainders[i]);
}
sb.append('\n');
scriptState.setScript(sb.toString());
in = new BufferedReader(new StringReader(sb.toString()));
grunt = new LipstickGrunt(in, pigContext);
gruntCalled = true;
int results[] = grunt.exec();
return getReturnCodeForStats(results);
}
default:
break;
}
// If we're here, we don't know yet what they want. They may have just
// given us a jar to execute, they might have given us a pig script to
// execute, or they might have given us a dash (or nothing) which means to
// run grunt interactive.
String remainders[] = opts.getRemainingArgs();
if (remainders == null) {
if(checkScriptOnly) {
System.err.println("ERROR:" +
"-c (-check) option is only valid " +
"when executing pig with a pig script file)");
return ReturnCode.ILLEGAL_ARGS;
}
// Interactive
mode = ExecMode.SHELL;
//Reader is created by first loading "pig.load.default.statements" or .pigbootup file if available
ConsoleReader reader = new ConsoleReader(Utils.getCompositeStream(System.in, properties), new OutputStreamWriter(System.out));
reader.setDefaultPrompt("grunt> ");
final String HISTORYFILE = ".pig_history";
String historyFile = System.getProperty("user.home") + File.separator + HISTORYFILE;
reader.setHistory(new History(new File(historyFile)));
ConsoleReaderInputStream inputStream = new ConsoleReaderInputStream(reader);
grunt = new LipstickGrunt(new BufferedReader(new InputStreamReader(inputStream)), pigContext);
grunt.setConsoleReader(reader);
gruntCalled = true;
grunt.run();
return ReturnCode.SUCCESS;
} else {
pigContext.getProperties().setProperty(PigContext.PIG_CMD_ARGS_REMAINDERS, ObjectSerializer.serialize(remainders));
// They have a pig script they want us to run.
mode = ExecMode.FILE;
FileLocalizer.FetchFileRet localFileRet = FileLocalizer.fetchFile(properties, remainders[0]);
if (localFileRet.didFetch) {
properties.setProperty("pig.jars.relative.to.dfs", "true");
}
scriptState.setFileName(remainders[0]);
if (embedded) {
return runEmbeddedScript(pigContext, localFileRet.file.getPath(), engine);
} else {
SupportedScriptLang type = determineScriptType(localFileRet.file.getPath());
if (type != null) {
return runEmbeddedScript(pigContext, localFileRet.file
.getPath(), type.name().toLowerCase());
}
}
//Reader is created by first loading "pig.load.default.statements" or .pigbootup file if available
InputStream seqInputStream = Utils.getCompositeStream(new FileInputStream(localFileRet.file), properties);
in = new BufferedReader(new InputStreamReader(seqInputStream));
// run parameter substitution preprocessor first
substFile = remainders[0] + ".substituted";
pin = runParamPreprocessor(pigContext, in, substFile, debug || dryrun || checkScriptOnly);
if (dryrun) {
if (dryrun(substFile, pigContext)) {
log.info("Dry run completed. Substituted pig script is at "
+ substFile
+ ". Expanded pig script is at "
+ remainders[0] + ".expanded");
} else {
log.info("Dry run completed. Substituted pig script is at "
+ substFile);
}
return ReturnCode.SUCCESS;
}
logFileName = validateLogFile(logFileName, remainders[0]);
pigContext.getProperties().setProperty("pig.logfile", (logFileName == null? "": logFileName));
if (!debug) {
new File(substFile).deleteOnExit();
}
// Set job name based on name of the script
pigContext.getProperties().setProperty(PigContext.JOB_NAME,
"PigLatin:" +new File(remainders[0]).getName()
);
scriptState.setScript(localFileRet.file);
grunt = new LipstickGrunt(pin, pigContext);
gruntCalled = true;
if(checkScriptOnly) {
grunt.checkScript(substFile);
System.err.println(remainders[0] + " syntax OK");
rc = ReturnCode.SUCCESS;
} else {
int results[] = grunt.exec();
rc = getReturnCodeForStats(results);
}
return rc;
}
// Per Utkarsh and Chris invocation of jar file via pig depricated.
} catch (ParseException e) {
usage();
rc = ReturnCode.PARSE_EXCEPTION;
PigStatsUtil.setErrorMessage(e.getMessage());
PigStatsUtil.setErrorThrowable(e);
} catch (org.apache.pig.tools.parameters.ParseException e) {
// usage();
rc = ReturnCode.PARSE_EXCEPTION;
PigStatsUtil.setErrorMessage(e.getMessage());
PigStatsUtil.setErrorThrowable(e);
} catch (IOException e) {
if (e instanceof PigException) {
PigException pe = (PigException)e;
rc = (pe.retriable()) ? ReturnCode.RETRIABLE_EXCEPTION
: ReturnCode.PIG_EXCEPTION;
PigStatsUtil.setErrorMessage(pe.getMessage());
PigStatsUtil.setErrorCode(pe.getErrorCode());
} else {
rc = ReturnCode.IO_EXCEPTION;
PigStatsUtil.setErrorMessage(e.getMessage());
}
PigStatsUtil.setErrorThrowable(e);
if(!gruntCalled) {
LogUtils.writeLog(e, logFileName, log, verbose, "Error before Pig is launched");
}
} catch (Throwable e) {
rc = ReturnCode.THROWABLE_EXCEPTION;
PigStatsUtil.setErrorMessage(e.getMessage());
PigStatsUtil.setErrorThrowable(e);
if(!gruntCalled) {
LogUtils.writeLog(e, logFileName, log, verbose, "Error before Pig is launched");
}
} finally {
if (deleteTempFiles) {
// clear temp files
FileLocalizer.deleteTempFiles();
}
PerformanceTimerFactory.getPerfTimerFactory().dumpTimers();
}
return rc;
}
protected static PigProgressNotificationListener makeListener(Properties properties) {
try {
return PigContext.instantiateObjectFromParams(
ConfigurationUtil.toConfiguration(properties),
PROGRESS_NOTIFICATION_LISTENER_KEY,
PROGRESS_NOTIFICATION_LISTENER_ARG_KEY,
PigProgressNotificationListener.class);
} catch (ExecException e) {
throw new RuntimeException(e);
}
}
private static int getReturnCodeForStats(int[] stats) {
return (stats[1] == 0) ? ReturnCode.SUCCESS // no failed jobs
: (stats[0] == 0) ? ReturnCode.FAILURE // no succeeded jobs
: ReturnCode.PARTIAL_FAILURE; // some jobs have failed
}
public static boolean dryrun(String scriptFile, PigContext pigContext)
throws RecognitionException, IOException {
BufferedReader rd = new BufferedReader(new FileReader(scriptFile));
DryRunGruntParser dryrun = new DryRunGruntParser(rd, scriptFile,
pigContext);
boolean hasMacro = dryrun.parseStopOnError();
if (hasMacro) {
String expandedFile = scriptFile.replace(".substituted",
".expanded");
BufferedWriter fw = new BufferedWriter(new FileWriter(expandedFile));
fw.append(dryrun.getResult());
fw.close();
}
return hasMacro;
}
//TODO jz: log4j.properties should be used instead
private static void configureLog4J(Properties properties, PigContext pigContext) {
// TODO Add a file appender for the logs
// TODO Need to create a property in the properties file for it.
// sgroschupf, 25Feb2008: this method will be obsolete with PIG-115.
String log4jconf = properties.getProperty(LOG4J_CONF);
String trueString = "true";
boolean brief = trueString.equalsIgnoreCase(properties.getProperty(BRIEF));
Level logLevel = Level.INFO;
String logLevelString = properties.getProperty(DEBUG);
if (logLevelString != null){
logLevel = Level.toLevel(logLevelString, Level.INFO);
}
Properties props = new Properties();
FileReader propertyReader = null;
if (log4jconf != null) {
try {
propertyReader = new FileReader(log4jconf);
props.load(propertyReader);
}
catch (IOException e)
{
System.err.println("Warn: Cannot open log4j properties file, use default");
}
finally
{
if (propertyReader != null) try {propertyReader.close();} catch(Exception e) {}
}
}
if (props.size() == 0) {
props.setProperty("log4j.logger.org.apache.pig", logLevel.toString());
if((logLevelString = System.getProperty("pig.logfile.level")) == null){
props.setProperty("log4j.rootLogger", "INFO, PIGCONSOLE");
}
else{
logLevel = Level.toLevel(logLevelString, Level.INFO);
props.setProperty("log4j.logger.org.apache.pig", logLevel.toString());
props.setProperty("log4j.rootLogger", "INFO, PIGCONSOLE, F");
props.setProperty("log4j.appender.F","org.apache.log4j.RollingFileAppender");
props.setProperty("log4j.appender.F.File",properties.getProperty("pig.logfile"));
props.setProperty("log4j.appender.F.layout","org.apache.log4j.PatternLayout");
props.setProperty("log4j.appender.F.layout.ConversionPattern", brief ? "%m%n" : "%d [%t] %-5p %c - %m%n");
}
props.setProperty("log4j.appender.PIGCONSOLE","org.apache.log4j.ConsoleAppender");
props.setProperty("log4j.appender.PIGCONSOLE.target", "System.err");
props.setProperty("log4j.appender.PIGCONSOLE.layout","org.apache.log4j.PatternLayout");
props.setProperty("log4j.appender.PIGCONSOLE.layout.ConversionPattern", brief ? "%m%n" : "%d [%t] %-5p %c - %m%n");
}
PropertyConfigurator.configure(props);
logLevel = Logger.getLogger("org.apache.pig").getLevel();
if (logLevel==null) {
logLevel = Logger.getLogger("org.apache.pig").getEffectiveLevel();
}
Properties backendProps = pigContext.getLog4jProperties();
backendProps.setProperty("log4j.logger.org.apache.pig", logLevel.toString());
pigContext.setLog4jProperties(backendProps);
pigContext.setDefaultLogLevel(logLevel);
}
private static List<String> fetchRemoteParamFiles(List<String> paramFiles, Properties properties)
throws IOException {
List<String> paramFiles2 = new ArrayList<String>();
for (String param: paramFiles) {
FileLocalizer.FetchFileRet localFileRet = FileLocalizer.fetchFile(properties, param);
paramFiles2.add(localFileRet.file.getAbsolutePath());
}
return paramFiles2;
}
// returns the stream of final pig script to be passed to Grunt
private static BufferedReader runParamPreprocessor(PigContext context, BufferedReader origPigScript,
String scriptFile, boolean createFile)
throws org.apache.pig.tools.parameters.ParseException, IOException{
if (createFile) {
return context.doParamSubstitutionOutputToFile(origPigScript, scriptFile);
} else {
String substituted = context.doParamSubstitution(origPigScript);
return new BufferedReader(new StringReader(substituted));
}
}
/**
* Returns the major version of Pig being run.
*/
public static String getMajorVersion() {
return majorVersion;
}
/**
* Returns the major version of the Pig build being run.
*/
public static String getMinorVersion() {
return minorVersion;
}
/**
* Returns the patch version of the Pig build being run.
*/
public static String getPatchVersion() {
return patchVersion;
}
/**
* Returns the svn revision number of the Pig build being run.
*/
public static String getSvnRevision() {
return svnRevision;
}
/**
* Returns the built time of the Pig build being run.
*/
public static String getBuildTime() {
return buildTime;
}
private static String getVersionString() {
try {
return CharStreams.toString(new InputStreamReader(Main.class.getResourceAsStream("/lipstick_build.txt"),
"UTF-8"));
} catch (IOException e) {
return "Lipstick - version unknown";
}
}
/**
* Print usage string.
*/
public static void usage()
{
System.out.println("\n"+getVersionString()+"\n");
System.out.println("USAGE: Pig [options] [-] : Run interactively in grunt shell.");
System.out.println(" Pig [options] -e[xecute] cmd [cmd ...] : Run cmd(s).");
System.out.println(" Pig [options] [-f[ile]] file : Run cmds found in file.");
System.out.println(" options include:");
System.out.println(" -4, -log4jconf - Log4j configuration file, overrides log conf");
System.out.println(" -b, -brief - Brief logging (no timestamps)");
System.out.println(" -c, -check - Syntax check");
System.out.println(" -d, -debug - Debug level, INFO is default");
System.out.println(" -e, -execute - Commands to execute (within quotes)");
System.out.println(" -f, -file - Path to the script to execute");
System.out.println(" -g, -embedded - ScriptEngine classname or keyword for the ScriptEngine");
System.out.println(" -h, -help - Display this message. You can specify topic to get help for that topic.");
System.out.println(" properties is the only topic currently supported: -h properties.");
System.out.println(" -i, -version - Display version information");
System.out.println(" -l, -logfile - Path to client side log file; default is current working directory.");
System.out.println(" -m, -param_file - Path to the parameter file");
System.out.println(" -p, -param - Key value pair of the form param=val");
System.out.println(" -r, -dryrun - Produces script with substituted parameters. Script is not executed.");
System.out.println(" -t, -optimizer_off - Turn optimizations off. The following values are supported:");
System.out.println(" SplitFilter - Split filter conditions");
System.out.println(" PushUpFilter - Filter as early as possible");
System.out.println(" MergeFilter - Merge filter conditions");
System.out.println(" PushDownForeachFlatten - Join or explode as late as possible");
System.out.println(" LimitOptimizer - Limit as early as possible");
System.out.println(" ColumnMapKeyPrune - Remove unused data");
System.out.println(" AddForEach - Add ForEach to remove unneeded columns");
System.out.println(" MergeForEach - Merge adjacent ForEach");
System.out.println(" GroupByConstParallelSetter - Force parallel 1 for \"group all\" statement");
System.out.println(" All - Disable all optimizations");
System.out.println(" All optimizations listed here are enabled by default. Optimization values are case insensitive.");
System.out.println(" -v, -verbose - Print all error messages to screen");
System.out.println(" -w, -warning - Turn warning logging on; also turns warning aggregation off");
System.out.println(" -x, -exectype - Set execution mode: local|mapreduce, default is mapreduce.");
System.out.println(" -F, -stop_on_failure - Aborts execution on the first failed job; default is off");
System.out.println(" -M, -no_multiquery - Turn multiquery optimization off; default is on");
System.out.println(" -N, -no_fetch - Turn fetch optimization off; default is on");
System.out.println(" -P, -propertyFile - Path to property file");
System.out.println(" -printCmdDebug - Overrides anything else and prints the actual command used to run Pig, including");
System.out.println(" any environment variables that are set by the pig command.");
}
public static void printProperties(){
System.out.println("The following properties are supported:");
System.out.println(" Logging:");
System.out.println(" verbose=true|false; default is false. This property is the same as -v switch");
System.out.println(" brief=true|false; default is false. This property is the same as -b switch");
System.out.println(" debug=OFF|ERROR|WARN|INFO|DEBUG; default is INFO. This property is the same as -d switch");
System.out.println(" aggregate.warning=true|false; default is true. If true, prints count of warnings");
System.out.println(" of each type rather than logging each warning.");
System.out.println(" Performance tuning:");
System.out.println(" pig.cachedbag.memusage=<mem fraction>; default is 0.2 (20% of all memory).");
System.out.println(" Note that this memory is shared across all large bags used by the application.");
System.out.println(" pig.skewedjoin.reduce.memusagea=<mem fraction>; default is 0.3 (30% of all memory).");
System.out.println(" Specifies the fraction of heap available for the reducer to perform the join.");
System.out.println(" pig.exec.nocombiner=true|false; default is false. ");
System.out.println(" Only disable combiner as a temporary workaround for problems.");
System.out.println(" opt.multiquery=true|false; multiquery is on by default.");
System.out.println(" Only disable multiquery as a temporary workaround for problems.");
System.out.println(" opt.fetch=true|false; fetch is on by default.");
System.out.println(" Scripts containing Filter, Foreach, Limit, Stream, and Union can be dumped without MR jobs.");
System.out.println(" pig.tmpfilecompression=true|false; compression is off by default.");
System.out.println(" Determines whether output of intermediate jobs is compressed.");
System.out.println(" pig.tmpfilecompression.codec=lzo|gzip; default is gzip.");
System.out.println(" Used in conjunction with pig.tmpfilecompression. Defines compression type.");
System.out.println(" pig.noSplitCombination=true|false. Split combination is on by default.");
System.out.println(" Determines if multiple small files are combined into a single map.");
System.out.println(" pig.exec.mapPartAgg=true|false. Default is false.");
System.out.println(" Determines if partial aggregation is done within map phase, ");
System.out.println(" before records are sent to combiner.");
System.out.println(" pig.exec.mapPartAgg.minReduction=<min aggregation factor>. Default is 10.");
System.out.println(" If the in-map partial aggregation does not reduce the output num records");
System.out.println(" by this factor, it gets disabled.");
System.out.println(" " + PROP_FILT_SIMPL_OPT + "=true|false; Default is false.");
System.out.println(" Enable optimizer rules to simplify filter expressions.");
System.out.println(" Miscellaneous:");
System.out.println(" exectype=mapreduce|local; default is mapreduce. This property is the same as -x switch");
System.out.println(" pig.additional.jars=<colon seperated list of jars>. Used in place of register command.");
System.out.println(" udf.import.list=<comma seperated list of imports>. Used to avoid package names in UDF.");
System.out.println(" stop.on.failure=true|false; default is false. Set to true to terminate on the first error.");
System.out.println(" pig.datetime.default.tz=<UTC time offset>. e.g. +08:00. Default is the default timezone of the host.");
System.out.println(" Determines the timezone used to handle datetime datatype and UDFs. ");
System.out.println("Additionally, any Hadoop property can be specified.");
}
private static String validateLogFile(String logFileName, String scriptName) {
String strippedDownScriptName = null;
if(scriptName != null) {
File scriptFile = new File(scriptName);
if(!scriptFile.isDirectory()) {
String scriptFileAbsPath;
try {
scriptFileAbsPath = scriptFile.getCanonicalPath();
strippedDownScriptName = getFileFromCanonicalPath(scriptFileAbsPath);
} catch (IOException ioe) {
log.warn("Could not compute canonical path to the script file " + ioe.getMessage());
strippedDownScriptName = null;
}
}
}
String defaultLogFileName = (strippedDownScriptName == null ? "pig_" : strippedDownScriptName) + new Date().getTime() + ".log";
File logFile;
if(logFileName != null) {
logFile = new File(logFileName);
//Check if the file name is a directory
//append the default file name to the file
if(logFile.isDirectory()) {
if(logFile.canWrite()) {
try {
logFileName = logFile.getCanonicalPath() + File.separator + defaultLogFileName;
} catch (IOException ioe) {
log.warn("Could not compute canonical path to the log file " + ioe.getMessage());
return null;
}
return logFileName;
} else {
log.warn("Need write permission in the directory: " + logFileName + " to create log file.");
return null;
}
} else {
//we have a relative path or an absolute path to the log file
//check if we can write to the directory where this file is/will be stored
if (logFile.exists()) {
if(logFile.canWrite()) {
try {
logFileName = new File(logFileName).getCanonicalPath();
} catch (IOException ioe) {
log.warn("Could not compute canonical path to the log file " + ioe.getMessage());
return null;
}
return logFileName;
} else {
//do not have write permissions for the log file
//bail out with an error message
log.warn("Cannot write to file: " + logFileName + ". Need write permission.");
return logFileName;
}
} else {
logFile = logFile.getParentFile();
if(logFile != null) {
//if the directory is writable we are good to go
if(logFile.canWrite()) {
try {
logFileName = new File(logFileName).getCanonicalPath();
} catch (IOException ioe) {
log.warn("Could not compute canonical path to the log file " + ioe.getMessage());
return null;
}
return logFileName;
} else {
log.warn("Need write permission in the directory: " + logFile + " to create log file.");
return logFileName;
}
}//end if logFile != null else is the default in fall through
}//end else part of logFile.exists()
}//end else part of logFile.isDirectory()
}//end if logFileName != null
//file name is null or its in the current working directory
//revert to the current working directory
String currDir = System.getProperty("user.dir");
logFile = new File(currDir);
logFileName = currDir + File.separator + (logFileName == null? defaultLogFileName : logFileName);
if(logFile.canWrite()) {
return logFileName;
}
log.warn("Cannot write to log file: " + logFileName);
return null;
}
private static String getFileFromCanonicalPath(String canonicalPath) {
return canonicalPath.substring(canonicalPath.lastIndexOf(File.separator));
}
private static SupportedScriptLang determineScriptType(String file)
throws IOException {
return ScriptEngine.getSupportedScriptLang(file);
}
private static int runEmbeddedScript(PigContext pigContext, String file, String engine)
throws IOException {
log.info("Run embedded script: " + engine);
pigContext.connect();
ScriptEngine scriptEngine = ScriptEngine.getInstance(engine);
Map<String, List<PigStats>> statsMap = scriptEngine.run(pigContext, file);
PigStatsUtil.setStatsMap(statsMap);
int failCount = 0;
int totalCount = 0;
for (List<PigStats> lst : statsMap.values()) {
if (lst != null && !lst.isEmpty()) {
for (PigStats stats : lst) {
if (!stats.isSuccessful()) failCount++;
totalCount++;
}
}
}
return (totalCount > 0 && failCount == totalCount) ? ReturnCode.FAILURE
: (failCount > 0) ? ReturnCode.PARTIAL_FAILURE
: ReturnCode.SUCCESS;
}
}
| 331 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/MRPlanCalculator.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceOper;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.MROperPlan;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.OriginalLocation;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.plans.PhysicalPlan;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PODemux;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLocalRearrange;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POPreCombinerLocalRearrange;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POSplit;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POStore;
import org.apache.pig.impl.io.FileSpec;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.collect.Sets.SetView;
import com.netflix.lipstick.model.P2jPlan;
import com.netflix.lipstick.model.operators.P2jLOStore;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
/**
*
* Assigns map/reduce stages to all operators in a p2jPlan.
*
* @author jmagnusson
*
*/
public class MRPlanCalculator {
private static final Log LOG = LogFactory.getLog(MRPlanCalculator.class);
protected MROperPlan mrp;
protected Map<PhysicalOperator, Operator> phy2LogMap;
protected Map<String, P2jLogicalRelationalOperator> p2jMap;
protected P2jPlan p2jPlan;
protected Map<Operator, String> reverseMap;
protected Map<String, Operator> locationMap;
/**
* Possible map/reduce job phases.
*
*/
public static enum MRStepType {
MAPPER, REDUCER, COMBINER, UNKNOWN
}
/**
* Instantiates a new map/reduce plan calculator. Assigns map/reduce plans
* to p2jPlan.
*
* @param p2jPlan
* the P2jPlan
* @param mrp
* the MROperPlan
* @param phy2LogMap
* physical to logical operator map
* @param reverseMap
* reverse map of logical operator to operator uid
*/
public MRPlanCalculator(P2jPlan p2jPlan,
MROperPlan mrp,
Map<PhysicalOperator, Operator> phy2LogMap,
Map<Operator, String> reverseMap) {
this.mrp = mrp;
this.phy2LogMap = phy2LogMap;
this.p2jMap = p2jPlan.getPlan();
this.p2jPlan = p2jPlan;
this.reverseMap = reverseMap;
this.locationMap = generateLocationMap();
p2jPlan.setPlan(assignMRStagesToNodes());
}
/**
* Get the P2jPlan with map/reduce jobs assigned.
*
* @return p2jPlan with map/reduce jobs assigned
*/
public P2jPlan getP2jPlan() {
return p2jPlan;
}
/**
* Generate a map of pig script code location to logical operators.
*
* @return map of location to logical operator
*/
protected Map<String, Operator> generateLocationMap() {
Map<String, Operator> locationMap = Maps.newHashMap();
for (Operator op : reverseMap.keySet()) {
LogicalRelationalOperator logicalOp = (LogicalRelationalOperator) op;
OriginalLocation loc = new OriginalLocation(logicalOp.getAlias(),
logicalOp.getLocation().line(),
logicalOp.getLocation().offset());
locationMap.put(loc.toString(), logicalOp);
}
LOG.debug(locationMap);
return locationMap;
}
/**
* Assign map/reduce jobs to P2jLogicalRelationalOperators, first by
* iterating through the MROperPlan and mapping physical operators to
* P2jLogicalRelationalOperators. Finally assign operators that could not be
* mapped by using information from the unassigned operators successors and
* predecessors via assignMRStagesToUnknownNodes.
*
* @return the p2jMap with map/reduce jobs assigned to all nodes
*/
protected Map<String, P2jLogicalRelationalOperator> assignMRStagesToNodes() {
for (MapReduceOper job : mrp) {
String jid = job.getOperatorKey().toString();
assignMRStagesToPlan(job.mapPlan, jid, MRStepType.MAPPER);
assignMRStagesToPlan(job.reducePlan, jid, MRStepType.REDUCER);
assignMRStagesToPlan(job.combinePlan, jid, MRStepType.COMBINER);
}
// assign to the operators that were not assigned previously
assignMRStagesToUnknownNodes();
return p2jMap;
}
/**
* Return the P2jLogicalRelationalOperator associated with a physical store
* operator.
*
* @param pop
* the physical store operator
* @return the P2jLogicalRelationalOperator associated with pop
*/
protected P2jLogicalRelationalOperator getOpForStore(POStore pop) {
FileSpec pofs = pop.getSFile();
for (Entry<String, P2jLogicalRelationalOperator> entry : p2jMap.entrySet()) {
if (entry.getValue() instanceof P2jLOStore) {
P2jLOStore store = (P2jLOStore) entry.getValue();
if (store.getStorageLocation().equals(pofs.getFileName())
&& pofs.getFuncName().endsWith(store.getStorageFunction())) {
return store;
}
}
}
return null;
}
/**
* Assign map/reduce job and step to logical operators identifiable through
* a physical plan.
*
* @param pp
* the PhysicalPlan
* @param jid
* a string representing the M/R job id of the plan
* @param stepType
* map/reduce phase for the physical plan
*/
protected void assignMRStagesToPlan(PhysicalPlan pp, String jid, MRStepType stepType) {
for (PhysicalOperator pop : pp) {
assignMRStage(pop, jid, stepType);
}
}
/**
* Given a physical operator, attempts to map it to a logical operator. If a
* suitable mapping can be found, assign map reduce phase "jid, stepType" to
* the logical operator.
*
* @param pop
* the physical operator
* @param jid
* the map/reduce job id
* @param stepType
* the map/reduce step (MAPPER, REDUCER, COMBINER, UNKNOWN)
*/
protected void assignMRStage(PhysicalOperator pop, String jid, MRStepType stepType) {
// special cases - find other operators inside these that need to be
// assigned
if (pop instanceof POLocalRearrange) {
for (PhysicalPlan ipl : ((POLocalRearrange) pop).getPlans()) {
assignMRStagesToPlan(ipl, jid, stepType);
}
} else if (pop instanceof PODemux) {
for (PhysicalPlan ipl : ((PODemux) pop).getPlans()) {
assignMRStagesToPlan(ipl, jid, stepType);
}
} else if (pop instanceof POPreCombinerLocalRearrange) {
for (PhysicalPlan ipl : ((POPreCombinerLocalRearrange) pop).getPlans()) {
assignMRStagesToPlan(ipl, jid, stepType);
}
} else if (pop instanceof POSplit) {
for (PhysicalPlan ipl : ((POSplit) pop).getPlans()) {
assignMRStagesToPlan(ipl, jid, stepType);
}
}
String stepTypeString = stepType.toString();
if (pop instanceof POStore) {
P2jLogicalRelationalOperator node = getOpForStore((POStore) pop);
if (node != null) {
node.setMapReduce(jid, stepTypeString);
return;
}
} else if (phy2LogMap.containsKey(pop) && reverseMap.containsKey(phy2LogMap.get(pop))) {
String nodeId = reverseMap.get(phy2LogMap.get(pop));
P2jLogicalRelationalOperator node = p2jMap.get(nodeId);
node.setMapReduce(jid, stepTypeString);
LOG.debug("Found key for: " + pop.toString());
return;
} else {
LOG.debug("No node for pop: " + pop + pop.getClass() + " ... Searching locationMap.");
boolean didAssign = false;
for (OriginalLocation loc : pop.getOriginalLocations()) {
LOG.debug("Checking location: " + loc);
if (locationMap.containsKey(loc.toString())) {
P2jLogicalRelationalOperator node = p2jMap.get(reverseMap.get(locationMap.get(loc.toString())));
LOG.debug("Found location... " + node);
if (node.getMapReduce() == null) {
if (node.getOperator().equalsIgnoreCase("LOJoin")
|| node.getOperator().equalsIgnoreCase("LOGroup")) {
stepTypeString = MRStepType.REDUCER.toString();
}
node.setMapReduce(jid, stepTypeString);
didAssign = true;
LOG.debug("Assign location... " + node);
}
}
}
if (didAssign) {
return;
}
}
LOG.debug("*** Couldn't assign " + pop.getClass() + pop);
}
/**
* Assign map/reduce jobs to previously unassigned
* P2jLogicalRelationalOperators in the plan, using information from the
* operator's successors and predecessors.
*/
protected void assignMRStagesToUnknownNodes() {
for (P2jLogicalRelationalOperator node : p2jMap.values()) {
if (node.getMapReduce() == null || node.getMapReduce().getJobId() == null) {
String jobId = resolveJobForNode(node);
if (jobId != null) {
node.setMapReduce(jobId, MRStepType.UNKNOWN.toString());
}
}
}
}
/**
* Attempts to determine a map/reduce job that is responsible for a
* P2jLogicalOperator, via the job information from the operator's
* predecessors and successors.
*
* @param node
* the P2jLogicalRelationalOperator
* @return a String representing the map/reduce job id
*/
protected String resolveJobForNode(P2jLogicalRelationalOperator node) {
Set<String> pred = generateScopesForNode(node, new ScopeGetter() {
@Override
public List<String> getScopes(P2jLogicalRelationalOperator node) {
return node.getPredecessors();
}
});
Set<String> succ = generateScopesForNode(node, new ScopeGetter() {
@Override
public List<String> getScopes(P2jLogicalRelationalOperator node) {
return node.getSuccessors();
}
});
SetView<String> intersect = Sets.intersection(pred, succ);
if (intersect.size() > 0) {
return intersect.iterator().next();
} else if (succ.size() == 1) {
return succ.iterator().next();
} else if (pred.size() > 0) {
return pred.iterator().next();
} else if (succ.size() > 0) {
return succ.iterator().next();
}
return null;
}
interface ScopeGetter {
List<String> getScopes(P2jLogicalRelationalOperator node);
}
/**
* Generate the set of map reduce jobs accessible from node in the direction
* defined by ScopeGetter.
*
* @param node
* the P2jLogicalRelationalOperator to search from
* @param scopeGetter
* the ScopeGetter defining the direction of the search
* @return a set of map/reduce job scopes
*/
protected Set<String> generateScopesForNode(P2jLogicalRelationalOperator node, ScopeGetter scopeGetter) {
Set<String> scopes = Sets.newHashSet();
for (String id : scopeGetter.getScopes(node)) {
P2jLogicalRelationalOperator job = p2jMap.get(id);
if (job.getMapReduce() != null && job.getMapReduce().getJobId() != null) {
scopes.add(job.getMapReduce().getJobId());
} else {
scopes.addAll(generateScopesForNode(job, scopeGetter));
}
}
return scopes;
}
}
| 332 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/util/OutputSampler.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.util;
import java.lang.Class;
import java.lang.reflect.Field;
import java.lang.IllegalAccessException;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.pig.LoadFunc;
import org.apache.pig.StoreFuncInterface;
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POStore;
import org.apache.pig.builtin.PigStorage;
import org.apache.pig.data.Tuple;
import org.apache.pig.impl.PigContext;
import org.apache.pig.impl.io.InterStorage;
import org.apache.pig.impl.io.ReadToEndLoader;
import org.apache.pig.impl.io.TFileStorage;
import org.apache.pig.tools.pigstats.JobStats;
import org.apache.pig.tools.pigstats.ScriptState;
/**
* Output sampler of intermediate results of Pig jobs.
*
* @author nbates
*
*/
public class OutputSampler {
public static class SampleOutput {
private final String schema;
private final String output;
/**
* Constructs a SampleOutput from the given schema/output strings.
*
* @param schema
* @param output
*/
public SampleOutput(String schema, String output) {
this.schema = schema;
this.output = output;
}
public String getSchema() {
return schema;
}
public String getOutput() {
return output;
}
}
private static final Log LOG = LogFactory.getLog(OutputSampler.class);
public static final String DELIMITER = "\001";
private final JobStats jobStats;
/**
* Constructs an OutputSampler from a JobStats object.
*
* @param jobStats
*/
public OutputSampler(JobStats jobStats) {
this.jobStats = jobStats;
}
/**
* Check if StoreFunc is safe to sample data from.
*
* @param store the StoreFunc
* @return true if safe to sample, otherwise false
*/
protected boolean storeFuncValidForReading(StoreFuncInterface store) {
return store instanceof InterStorage || store instanceof TFileStorage || store.getClass() == PigStorage.class;
}
/**
* Returns a list of sample outputs limited by the maxRowsPerEntry and
* maxBytesPerEntry.
*
* @param maxRowsPerEntry
* @param maxBytesPerEntry
* @return
*/
public List<SampleOutput> getSampleOutputs(int maxRowsPerEntry, int maxBytesPerEntry) {
List<SampleOutput> sampleOutputs = new LinkedList<SampleOutput>();
for (POStore storeInfo : getStoreInfo(jobStats)) {
LOG.info("Sample output: " + storeInfo);
LOG.info("StoreFunc: " + storeInfo.getStoreFunc().getClass());
if (storeInfo != null && storeFuncValidForReading(storeInfo.getStoreFunc())) {
String schema = (storeInfo.getSchema() == null) ? ("") : storeInfo.getSchema().toString();
sampleOutputs.add(new SampleOutput(schema, getSampleRows(storeInfo, maxRowsPerEntry, maxBytesPerEntry)));
}
}
return sampleOutputs;
}
/* For a given object retrieve the value of a named field, regardless
of what class in the object's inheritance hierarchy the field was
declared upon, and raises NoSuchFieldException if the field does
not exist on any class in the hierarchy. */
public Object getInheritedFieldValue(Object obj, String fieldName) throws IllegalAccessException, NoSuchFieldException {
return getInheritedFieldValue(obj, obj.getClass(), fieldName);
}
protected Object getInheritedFieldValue(Object obj, Class cls, String fieldName) throws IllegalAccessException, NoSuchFieldException {
try {
Field f = cls.getDeclaredField(fieldName);
f.setAccessible(true);
return f.get(obj);
} catch (NoSuchFieldException e) {
Class souper = cls.getSuperclass();
if (souper != null) {
return getInheritedFieldValue(obj, souper, fieldName);
} else {
/* getSuperclass() returns null if we've gotten all the
way up to Object. At this point we've checked every class
in the heirarchy so the field must not exist. */
throw new NoSuchFieldException(fieldName);
}
}
}
@SuppressWarnings("unchecked")
private List<POStore> getStoreInfo(JobStats jobStats) {
List<POStore> storeInfo = new LinkedList<POStore>();
// Use reflection to get the store info for the jobStats
// Done b/c the OutputStats from jobStats.getOutputs()
// doesn't include intermediate (temp) outputs
List<POStore> mapStores = null;
List<POStore> reduceStores = null;
try {
mapStores = (LinkedList<POStore>) getInheritedFieldValue(jobStats, "mapStores");
} catch (Exception e) {
LOG.warn("Failed to get map store information for jobId [" + jobStats.getJobId() + "].", e);
}
try {
reduceStores = (LinkedList<POStore>) getInheritedFieldValue(jobStats, "reduceStores");
} catch (Exception e) {
LOG.warn("Failed to get reduce store information for jobId [" + jobStats.getJobId() + "].", e);
}
if (mapStores != null) {
storeInfo.addAll(mapStores);
} else {
LOG.info("No map store information for jobId [" + jobStats.getJobId() + "].");
}
if (reduceStores != null) {
storeInfo.addAll(reduceStores);
} else {
LOG.info("No reduce store information for jobId [" + jobStats.getJobId() + "].");
}
return storeInfo;
}
private String getSampleRows(POStore store, int maxRows, int maxBytes) {
// Load the proper amount of data
StringBuilder sb = new StringBuilder();
try {
LoadFunc loader = getLoader(store);
if (loader != null) {
int rowCount = 0;
Tuple t = loader.getNext();
while (t != null && rowCount < maxRows) {
String strTuple = t.toDelimitedString(DELIMITER);
if (strTuple != null) {
if (sb.length() + strTuple.length() + DELIMITER.length() > maxBytes) {
break;
}
if (sb.length() > 0) {
sb.append('\n');
}
sb.append(strTuple);
}
rowCount++;
t = loader.getNext();
}
}
} catch (Exception e) {
String sampleDescription = (sb.length() > 0) ? "full" : "any";
LOG.warn("Unable to get " + sampleDescription + " sample for: " + store.getSFile(), e);
}
return sb.toString();
}
protected LoadFunc getLoader(POStore store) {
// Create a loader from the POStore
// Sampled from JobStats class
LoadFunc loader = null;
PigContext pigContext = ScriptState.get().getPigContext();
try {
LoadFunc originalLoadFunc = (LoadFunc) PigContext.instantiateFuncFromSpec(store.getSFile().getFuncSpec());
loader = new ReadToEndLoader(originalLoadFunc,
ConfigurationUtil.toConfiguration(pigContext.getProperties()),
store.getSFile().getFileName(),
0);
} catch (Exception e) {
LOG.warn("Unable to get sample rows for: " + store.getSFile(), e);
}
return loader;
}
}
| 333 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/util/EzIterable.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.util;
import java.util.Iterator;
/**
* Utility to facilitate easy iteration.
* @author nbates
*
* @param <T>
*/
public class EzIterable<T> implements Iterable<T> {
private final Iterator<T> iter;
/**
* Constructs an EzIterable with the given iterator.
*
* @param iter
*/
public EzIterable(Iterator<T> iter) {
this.iter = iter;
}
@Override
public Iterator<T> iterator() {
return iter;
}
/**
* Returns a strongly typed EzIterable based on the input iterator.
*
* @param iter
* @return
*/
public static <T> EzIterable<T> getIterable(Iterator<T> iter) {
return new EzIterable<T>(iter);
}
}
| 334 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/pigstatus/PigStatusClient.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.pigstatus;
import com.netflix.lipstick.model.P2jPlanPackage;
import com.netflix.lipstick.model.P2jPlanStatus;
import com.netflix.lipstick.model.P2jSampleOutputList;
/**
*
* Interface for Lipstick client communication to server.
*
* @author nbates
*
*/
public interface PigStatusClient {
/**
* Persists a P2JPlanPackage which will presumably be used
* by the server with which the client is interacting.
*
* @param plans
* @return
*/
String savePlan(P2jPlanPackage plans);
/**
* Saves the status of the P2jPlanStatus.
* It's expected that this will trigger an update of the
* P2jPlanPackage with the given uuid.
*
* @param uuid
* @param status
*/
void saveStatus(String uuid, P2jPlanStatus status);
/**
* Saves the sample output for a given job.
* It's expected that this will trigger an update of the
* P2jPlanPackage with the given uuid.
*
* @param uuid
* @param jobId
* @param sampleOutputList
*/
void saveSampleOutput(String uuid, String jobId, P2jSampleOutputList sampleOutputList);
}
| 335 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/pigstatus/RestfulPigStatusClient.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.pigstatus;
import java.util.Map;
import java.util.List;
import java.util.ArrayList;
import java.util.PriorityQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.codehaus.jackson.map.ObjectMapper;
import com.netflix.lipstick.model.P2jPlanPackage;
import com.netflix.lipstick.model.P2jPlanStatus;
import com.netflix.lipstick.model.P2jSampleOutputList;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
/**
* RESTful client implementation of PigStatusClient.
*
* @author nbates
*
*/
public class RestfulPigStatusClient implements PigStatusClient {
protected enum RequestVerb {
POST, PUT
};
private static final Log LOG = LogFactory.getLog(RestfulPigStatusClient.class);
protected ObjectMapper om = new ObjectMapper();
public static class Server implements Comparable<Server> {
public String url;
public Long penalty;
public Server(String url, Long penalty) {
this.url = url;
this.penalty = penalty;
}
public void penalize() {
this.penalty = penalty*2l; // Double penalty each time
}
public int compareTo(Server other) {
return penalty.compareTo(other.penalty);
}
}
protected PriorityQueue<Server> lipstickServers = null;
/**
* Constructs a default RestfulPigStatusClient.
*/
public RestfulPigStatusClient() {
}
/**
* Constructs a RestfulPigStatusClient with the given serviceUrls.
*
* @param serviceUrls
*/
public RestfulPigStatusClient(String serviceUrls) {
LOG.info("Initializing " + this.getClass() + " with serviceUrls: " + serviceUrls);
initializeServers(serviceUrls);
}
protected void initializeServers(String serviceUrls) {
String[] urls = serviceUrls.split(",");
lipstickServers = new PriorityQueue(urls.length);
for (String url : urls) {
lipstickServers.add(new Server(url, 1l));
}
}
protected void rebuildServers(List<Server> servers) {
for (Server s : servers) {
lipstickServers.add(s);
}
}
protected String getServiceUrl() {
Server s = lipstickServers.peek();
return s.url;
}
@Override
public String savePlan(P2jPlanPackage plans) {
plans.getStatus().setHeartbeatTime();
ClientResponse response = makeRequest("/job/", plans, RequestVerb.POST);
if (response == null) {
return null;
}
try {
String output = (String) om.readValue(response.getEntity(String.class), Map.class).get("uuid");
if (!plans.getUuid().equals(output)) {
LOG.error("Incorrect uuid returned from server");
}
String serviceUrl = getServiceUrl();
LOG.info("This script has been assigned uuid: " + output);
LOG.info("Navigate to " + serviceUrl + "#job/" + output + " to view progress.");
return plans.getUuid();
} catch (Exception e) {
LOG.error("Error getting uuid from server response.", e);
}
return null;
}
@Override
public void saveStatus(String uuid, P2jPlanStatus status) {
status.setHeartbeatTime();
String resource = "/job/" + uuid;
makeRequest(resource, status, RequestVerb.PUT);
String serviceUrl = getServiceUrl();
LOG.info("Navigate to " + serviceUrl + "#job/" + uuid + " to view progress.");
}
@Override
public void saveSampleOutput(String uuid, String jobId, P2jSampleOutputList sampleOutputList) {
String resource = String.format("/job/%s/sampleOutput/%s", uuid, jobId);
makeRequest(resource, sampleOutputList, RequestVerb.PUT);
}
protected ClientResponse makeRequest(String resource, Object requestObj, RequestVerb verb) {
List<Server> penalized = new ArrayList<Server>();
Client client = Client.create();
ClientResponse response = null;
// Go through queue and get servers in increasing order of penalty
while (lipstickServers.size() > 0) {
String serviceUrl = getServiceUrl();
LOG.info("Trying Lipstick server "+serviceUrl);
String resourceUrl = serviceUrl + resource;
WebResource webResource = client.resource(resourceUrl);
response = sendRequest(webResource, requestObj, verb);
if (response != null) {
rebuildServers(penalized);
return response;
} else {
Server s = lipstickServers.poll();
s.penalize();
penalized.add(s);
}
}
rebuildServers(penalized);
return null;
}
protected ClientResponse sendRequest(WebResource webResource, Object requestObj, RequestVerb verb) {
ClientResponse response = null;
try {
String resourceUrl = webResource.getURI().toURL().toString();
LOG.debug("Sending " + verb + " request to " + resourceUrl);
LOG.debug(om.writeValueAsString(requestObj));
switch (verb) {
case POST:
response = webResource.type("application/json").post(ClientResponse.class,
om.writeValueAsString(requestObj));
break;
case PUT:
response = webResource.type("application/json").put(ClientResponse.class,
om.writeValueAsString(requestObj));
break;
default:
throw new RuntimeException("Invalid verb: " + verb + " for resourceUrl: " + resourceUrl);
}
if (response != null && response.getStatus() != 200) {
LOG.error("Error contacting Lipstick server. Received status code " + response.getStatus());
LOG.debug(response.getEntity(String.class));
throw new RuntimeException("Failed : HTTP error code : " + response.getStatus());
}
return response;
} catch (Exception e) {
LOG.error("Error contacting Lipstick server.");
LOG.debug("Stacktrace", e);
}
return null;
}
}
| 336 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/adaptors/LOJsonAdaptor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.pig.data.DataType;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.logical.Util;
import org.apache.pig.newplan.logical.expression.BinCondExpression;
import org.apache.pig.newplan.logical.expression.ConstantExpression;
import org.apache.pig.newplan.logical.expression.LogicalExpression;
import org.apache.pig.newplan.logical.expression.LogicalExpressionPlan;
import org.apache.pig.newplan.logical.expression.MapLookupExpression;
import org.apache.pig.newplan.logical.expression.ProjectExpression;
import org.apache.pig.newplan.logical.expression.UserFuncExpression;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
import org.apache.pig.parser.PigParserNode.InvocationPoint;
import org.apache.pig.parser.SourceLocation;
import com.google.common.collect.Lists;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
/**
* Base adaptor class to convert pig LogicalRelationalOperator to P2jLogicalRelationalOperator.
*
* @author jmagnusson
*/
public class LOJsonAdaptor {
protected static final Map<Byte, String> TYPESMAP = DataType.genTypeToNameMap();
protected final P2jLogicalRelationalOperator p2j;
private static final Log LOG = LogFactory.getLog(LOJsonAdaptor.class);
/**
* Instantiate a new LOJsonAdaptor.
*
* @param node the LogicalRelationalOperator to convert
* @param lp the LogicalPlan containing the LogicalRelationalOperator
* @throws FrontendException
*/
public LOJsonAdaptor(LogicalRelationalOperator node, LogicalPlan lp) throws FrontendException {
this(node, new P2jLogicalRelationalOperator(), lp);
}
protected LOJsonAdaptor(LogicalRelationalOperator node, P2jLogicalRelationalOperator p2j, LogicalPlan lp) throws FrontendException {
this.p2j = p2j;
p2j.setSchemaString(node.getSchema() != null ? Util.translateSchema(node.getSchema()).toString() : null);
p2j.setOperator(node.getClass().getSimpleName());
p2j.setAlias(node.getAlias());
p2j.setLocation(node.getLocation().line(), node.getLocation().file(), getMacroList(node));
}
/**
* Get the newly created P2jLogicalRelationalOperator.
* @return the newly created P2jLogicalRelationalOperator
*/
public P2jLogicalRelationalOperator getToP2jOperator() {
return p2j;
}
/**
* Get the list of macros for an operator.
* @param node the operator
* @return a list of macros
*/
protected List<String> getMacroList(LogicalRelationalOperator node) {
List<String> macro = Lists.newArrayList();
SourceLocation loc = node.getLocation();
if (loc.node() != null) {
InvocationPoint p = loc.node().getNextInvocationPoint();
while (p != null) {
if (p.getMacro() != null) {
macro.add(p.getMacro());
}
p = loc.node().getNextInvocationPoint();
}
}
return macro;
}
/**
* Return the uids for a list of operators.
*
* @param nodes the list of operators
* @param reverseMap a mapping of operator to uid
* @return a list of uids
*/
protected List<String> getChain(List<Operator> nodes, Map<Operator, String> reverseMap) {
List<String> chain = Lists.newLinkedList();
if (nodes != null && !nodes.isEmpty()) {
for (Operator i : nodes) {
chain.add(reverseMap.get(i));
}
}
return chain;
}
/**
* Convertor of LogicalExpressionPlan to readable text string.
*
* @author jmagnusson
*/
static class LogicalExpressionPlanSerializer {
private static final Map<String, Op> SPECOPS = createMap();
private static class Op {
protected String string;
protected Integer priority;
public Op(String string, Integer priority) {
this.string = string;
this.priority = priority;
}
}
private static Map<String, Op> createMap() {
Map<String, Op> aMap = new HashMap<String, Op>();
aMap.put("Multiply", new Op(" * ", 7));
aMap.put("Divide", new Op(" / ", 7));
aMap.put("Add", new Op(" + ", 6));
aMap.put("Subtract", new Op(" - ", 6));
aMap.put("GreaterThan", new Op(" > ", 5));
aMap.put("GreaterThanEqual", new Op(" >= ", 5));
aMap.put("LessThan", new Op(" < ", 5));
aMap.put("LessThanEqual", new Op(" <= ", 5));
aMap.put("Equal", new Op(" == ", 4));
aMap.put("NotEqual", new Op(" != ", 4));
// aMap.put("Not", new Op("!", 3));
aMap.put("And", new Op(" and ", 2));
aMap.put("Or", new Op(" or ", 1));
return Collections.unmodifiableMap(aMap);
}
private static String paren(String str, Operator parent, Operator child) {
String pKey = parent.getName();
String cKey = child.getName();
if (SPECOPS.containsKey(cKey) && SPECOPS.containsKey(pKey)) {
if (SPECOPS.get(pKey).priority > SPECOPS.get(cKey).priority) {
return "(" + str + ")";
}
}
return str;
}
private static String nodeToString(LogicalExpression node, LogicalExpressionPlan plan) throws FrontendException {
if (node instanceof ConstantExpression) {
Object value = ((ConstantExpression) node).getValue();
if (value != null) {
return value.toString();
} else {
// This should ONLY happen in edge cases when the constant expression is built
// incorrectly, eg with the ASSERT operator in the case where there's no message
return "null";
}
}
List<String> outList = Lists.newArrayList();
List<Operator> s = plan.getSuccessors(node);
if (s != null && !s.isEmpty()) {
ListIterator<Operator> iter = s.listIterator();
if (node instanceof BinCondExpression) {
BinCondExpression n = (BinCondExpression) node;
return "("
+ nodeToString(n.getCondition(), plan)
+ " ? "
+ nodeToString(n.getLhs(), plan)
+ " : "
+ nodeToString(n.getRhs(), plan)
+ ")";
}
while (iter.hasNext()) {
Operator op = iter.next();
String outString = nodeToString((LogicalExpression) op, plan);
if (node instanceof MapLookupExpression) {
outString += "#'" + ((MapLookupExpression) node).getLookupKey() + "'";
}
outList.add(paren(outString, node, op));
}
if (node instanceof MapLookupExpression) {
return StringUtils.join(outList, ", ");
}
if (node instanceof UserFuncExpression) {
String className = ((UserFuncExpression) node).getFuncSpec().getClassName();
ArrayList<String> split = Lists.newArrayList(StringUtils.split(className, "."));
className = split.get(split.size() - 1);
return className + '(' + StringUtils.join(outList, ", ") + ')';
}
if (SPECOPS.containsKey(node.getName())) {
if (outList.size() == 1) {
return SPECOPS.get(node.getName()).string + outList.get(0);
}
return StringUtils.join(outList, SPECOPS.get(node.getName()).string);
}
String name = node.getName();
if (name.equals("Cast")) {
return outList.get(0);
}
return name + '(' + StringUtils.join(outList, ", ") + ')';
}
if (node.getFieldSchema().alias != null) {
return node.getFieldSchema().alias;
}
if (node instanceof ProjectExpression) {
return '$' + ((Integer) ((ProjectExpression) node).getColNum()).toString();
}
return "?";
}
/**
* Convert a LogicalExpressionPlan to a human readable string.
*
* @param src the LogicalExpressionPlan
* @return a human readable string
*/
public static String serialize(LogicalExpressionPlan src) {
List<Operator> sources = src.getSources();
if (!sources.isEmpty()) {
try {
return nodeToString((LogicalExpression) src.getSources().get(0), src);
} catch (FrontendException e) {
LOG.error(e);
throw new RuntimeException(e);
}
}
return "";
}
}
}
| 337 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/adaptors/LOLoadJsonAdaptor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import org.apache.commons.lang.StringUtils;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.logical.relational.LOLoad;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import com.netflix.lipstick.model.operators.P2jLOLoad;
/**
* LOLoad to Lipstick model adaptor.
*
* @author jmagnusson
*
*/
public class LOLoadJsonAdaptor extends LOJsonAdaptor {
/**
* Initializes a new LOLoadJsonAdaptor and additionally sets the storage
* function and location on the P2jLOLoad object being created.
*
* @param node
* @param lp
* @throws FrontendException
*/
public LOLoadJsonAdaptor(LOLoad node, LogicalPlan lp) throws FrontendException {
super(node, new P2jLOLoad(), lp);
P2jLOLoad load = (P2jLOLoad) p2j;
load.setStorageLocation(node.getFileSpec().getFileName());
String[] funcList = StringUtils.split(node.getFileSpec().getFuncName(), ".");
load.setStorageFunction(funcList[funcList.length - 1]);
}
}
| 338 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/adaptors/LOSplitOutputJsonAdaptor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.logical.relational.LOSplitOutput;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import com.netflix.lipstick.model.operators.P2jLOSplitOutput;
/**
* LOSplit to Lipstick model adaptor.
*
* @author jmagnusson
*/
public class LOSplitOutputJsonAdaptor extends LOJsonAdaptor {
/**
* Initializes a new LOSplitOutputJsonAdaptor and additionally sets the
* filter expression on the P2jLOSplitOutput object that is created.
*
* @param node the LOSplitOutput to adapt to P2jLOSplitOutput
* @param lp the logical plan containing node
* @throws FrontendException
*/
public LOSplitOutputJsonAdaptor(LOSplitOutput node, LogicalPlan lp) throws FrontendException {
super(node, new P2jLOSplitOutput(), lp);
((P2jLOSplitOutput) p2j).setExpression(LogicalExpressionPlanSerializer.serialize(node.getFilterPlan()));
}
}
| 339 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/adaptors/LOJoinJsonAdaptor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import java.util.List;
import java.util.Map;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.impl.util.MultiMap;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.logical.expression.LogicalExpressionPlan;
import org.apache.pig.newplan.logical.relational.LOJoin;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.lipstick.model.operators.P2jLOJoin;
/**
* LOJoin to Lipstick model adaptor.
*
* @author jmagnusson
*
*/
public class LOJoinJsonAdaptor extends LOJsonAdaptor {
/**
* Instantiate a new LOJoinJsonAdaptor and populate the join expression and join type fields.
*
* @param node LOJoin operator to convert to P2jLOJoin.
* @param lp the LogicalPlan containing node
* @throws FrontendException
*/
public LOJoinJsonAdaptor(LOJoin node, LogicalPlan lp) throws FrontendException {
super(node, new P2jLOJoin(), lp);
P2jLOJoin join = (P2jLOJoin) p2j;
join.setJoin(node.getJoinType().toString(), getJoinType(node), getJoinExpressions(node, lp));
}
/**
* Generate a map describing the group expressing for an operator.
*
* @param node the LOJoin operator
* @param lp the LogicalPlan containing node
* @return a map of alias name to ordered list of fields constituting the join criteria for that alias
*/
protected Map<String, List<String>> getJoinExpressions(LOJoin node, LogicalPlan lp) {
Map<String, List<String>> expressions = Maps.newHashMap();
List<Operator> inputs = node.getInputs(lp);
MultiMap<Integer, LogicalExpressionPlan> plans = node.getExpressionPlans();
for (Integer i : plans.keySet()) {
List<String> planStrings = Lists.newArrayList();
for (LogicalExpressionPlan p : plans.get(i)) {
planStrings.add(LogicalExpressionPlanSerializer.serialize(p));
}
expressions.put(String.valueOf(((LogicalRelationalOperator) inputs.get(i)).getAlias()), planStrings);
}
return expressions;
}
/**
* Get the join type (inner/[full,left,right]outer) for node.
*
* @param node the LOJoin operator to inspect
* @return a string describing the join type
*/
protected String getJoinType(LOJoin node) {
boolean[] innerFlags = node.getInnerFlags();
int sum = 0;
for (boolean i : innerFlags) {
if (i) {
sum++;
}
}
if (sum == innerFlags.length) {
return "INNER";
} else if (sum == 0) {
return "FULL OUTER";
} else if (innerFlags[0]) {
return "LEFT OUTER";
}
return "RIGHT OUTER";
}
}
| 340 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/adaptors/LOCogroupJsonAdaptor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import java.util.List;
import java.util.Map;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.impl.util.MultiMap;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.logical.expression.LogicalExpressionPlan;
import org.apache.pig.newplan.logical.relational.LOCogroup;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.lipstick.model.operators.P2jLOCogroup;
/**
* Adaptor for LOCogroup to Lipstick model.
*
* @author jmagnusson
*
*/
public class LOCogroupJsonAdaptor extends LOJsonAdaptor {
/**
* Instantiate a new LOCogroupJsonAdaptor and populate the group type and group expression field.
*
* @param node LOCogroup operator to convert to P2jLOCogroup.
* @param lp the LogicalPlan containing node
* @throws FrontendException
*/
public LOCogroupJsonAdaptor(LOCogroup node, LogicalPlan lp) throws FrontendException {
super(node, new P2jLOCogroup(), lp);
P2jLOCogroup cogroup = (P2jLOCogroup) p2j;
cogroup.setGroup(node.getGroupType().toString(), getGroupType(node), getGroupExpressions(node, lp));
}
/**
* Get the group type (inner/outer) for node.
*
* @param node the LOCogroup operator to inspect
* @return a string describing the group type
*/
protected String getGroupType(LOCogroup node) {
String ret = "INNER";
for (boolean flag : node.getInner()) {
if (!flag) {
ret = "OUTER";
break;
}
}
return ret;
}
/**
* Generate a map describing the group expressing for an operator.
*
* @param node the LOCogroup operator
* @param lp the LogicalPlan containing node
* @return a map of alias name to ordered list of fields constituting the group criteria for that alias
*/
protected Map<String, List<String>> getGroupExpressions(LOCogroup node, LogicalPlan lp) {
Map<String, List<String>> expressions = Maps.newHashMap();
List<Operator> inputs = node.getInputs(lp);
MultiMap<Integer, LogicalExpressionPlan> plans = node.getExpressionPlans();
for (Integer i : plans.keySet()) {
List<String> planStrings = Lists.newArrayList();
for (LogicalExpressionPlan p : plans.get(i)) {
planStrings.add(LogicalExpressionPlanSerializer.serialize(p));
}
expressions.put(String.valueOf(((LogicalRelationalOperator) inputs.get(i)).getAlias()), planStrings);
}
return expressions;
}
}
| 341 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/adaptors/LOFilterJsonAdaptor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.logical.relational.LOFilter;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import com.netflix.lipstick.model.operators.P2jLOFilter;
/**
* LOFilter to Lipstick model adaptor.
*
* @author jmagnusson
*
*/
public class LOFilterJsonAdaptor extends LOJsonAdaptor {
/**
* Instantiate a new LOFilterJsonAdaptor and populate the filter expression field.
*
* @param node LOFilterJsonAdaptor operator to convert to P2jLOCogroup.
* @param lp the LogicalPlan containing node
* @throws FrontendException
*/
public LOFilterJsonAdaptor(LOFilter node, LogicalPlan lp) throws FrontendException {
super(node, new P2jLOFilter(), lp);
P2jLOFilter filter = (P2jLOFilter) p2j;
filter.setExpression(LogicalExpressionPlanSerializer.serialize(node.getFilterPlan()));
}
}
| 342 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/adaptors/LOLimitJsonAdaptor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.logical.relational.LOLimit;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import com.netflix.lipstick.model.operators.P2jLOLimit;
/**
* LOLimit to Lipstick model adaptor.
*
* @author jmagnusson
*
*/
public class LOLimitJsonAdaptor extends LOJsonAdaptor {
/**
* Initializes a new LOLimitJsonAdaptor and sets the row limit on the P2jLOLimit object created.
*
* @param node the LOLimit operator to convert to P2jLOLimit
* @param lp the logical plan containing node
* @throws FrontendException
*/
public LOLimitJsonAdaptor(LOLimit node, LogicalPlan lp) throws FrontendException {
super(node, new P2jLOLimit(), lp);
((P2jLOLimit) p2j).setRowLimit(node.getLimit());
}
}
| 343 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/adaptors/LOStoreJsonAdaptor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.adaptors;
import org.apache.commons.lang.StringUtils;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.logical.relational.LOStore;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import com.netflix.lipstick.model.operators.P2jLOStore;
/**
* LOStore to Lipstick model adaptor.
*
* @author jmagnusson
*
*/
public class LOStoreJsonAdaptor extends LOJsonAdaptor {
/**
* Initializes a new LOStoreJsonAdaptor and additionally sets the storage
* function and location on the P2jLOStore object being created.
*
* @param node the LOStore object to convert to P2jLOStore
* @param lp the logical plan containing node
* @throws FrontendException
*/
public LOStoreJsonAdaptor(LOStore node, LogicalPlan lp) throws FrontendException {
super(node, new P2jLOStore(), lp);
P2jLOStore store = (P2jLOStore) p2j;
store.setStorageLocation(node.getFileSpec().getFileName());
String[] funcList = StringUtils.split(node.getFileSpec().getFuncName(), ".");
store.setStorageFunction(funcList[funcList.length - 1]);
}
}
| 344 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jPlanStatus.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.util.Date;
import java.util.Map;
import java.util.Set;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.OneToMany;
import javax.persistence.Transient;
import org.codehaus.jackson.annotate.JsonIgnore;
import com.google.common.collect.Maps;
/**
* Lipstick Pig job overall status.
*
* @author jmagnusson
*
*/
@Entity
public class P2jPlanStatus {
private long id;
private Map<String, P2jJobStatus> jobStatusMap;
private int progress = 0;
private Date startTime = null;
private Date endTime = null;
private Date heartbeatTime = null;
private StatusText statusText = null;
public static enum StatusText {
finished, running, terminated, failed
}
/**
* Creates a default P2jPlanStatus object.
*/
public P2jPlanStatus() {
jobStatusMap = Maps.newHashMap();
}
@Id
@GeneratedValue
public long getId() {
return id;
}
/**
* Returns a P2jJobStatus based on the given jid.
* If there is no job with the given jid, return null.
*
* @param jid
* @return
*/
public P2jJobStatus getJob(String jid) {
if (hasJob(jid)) {
return jobStatusMap.get(jid);
}
return null;
}
@Transient
@JsonIgnore
public Set<String> getJobList() {
return jobStatusMap.keySet();
}
@OneToMany(cascade = CascadeType.ALL)
public Map<String, P2jJobStatus> getJobStatusMap() {
return jobStatusMap;
}
/**
* Returns the current progress for the P2jPlanStatus object.
* Progress should have the range [0, 100].
*
* @return
*/
public int getProgress() {
return progress;
}
/**
* Returns the current end time for the P2jPlanStatus object.
* End time should be null if the plan hasn't finished.
*
* @return
*/
public Date getEndTime() {
return endTime;
}
/**
* Returns the current start time for the P2jPlanStatus object.
* Start time should be null if the plan hasn't started.
*
* @return
*/
public Date getStartTime() {
return startTime;
}
@Enumerated(EnumType.STRING)
public StatusText getStatusText() {
return statusText;
}
/**
* Returns the most recent heartbeat time for the P2jPlanStatus object.
*
* @return
*/
public Date getHeartbeatTime() {
return heartbeatTime;
}
/**
* Sets the statusText for the P2jPlanStatus object.
*
* @param statusText
*/
public void setStatusText(StatusText statusText) {
this.statusText = statusText;
}
/**
* Checks if the plan has a job with the
* id of the passed in P2jJobStatus.
*
* @param job
* @return
*/
public boolean hasJob(P2jJobStatus job) {
return hasJob(job.getJobId());
}
/**
* Checks if the plan has a job with
* id matching the given jid.
*
* @param jid
* @return
*/
public boolean hasJob(String jid) {
return jobStatusMap.containsKey(jid);
}
/**
* Sets the id for the P2jPlanStatus object.
*
* @param id
*/
public void setId(long id) {
this.id = id;
}
/**
* Sets the jobStatusMap for the P2jPlanStatus object.
*
* @param jobStatusMap
*/
public void setJobStatusMap(Map<String, P2jJobStatus> jobStatusMap) {
this.jobStatusMap = jobStatusMap;
}
/**
* Sets the progress for the P2jPlanStatus object.
*
* @param progress
* @return
*/
public P2jPlanStatus setProgress(int progress) {
this.progress = progress;
return this;
}
/**
* Sets the startTime for the P2jPlanStatus object to the current time.
*
* @return
*/
public P2jPlanStatus setStartTime() {
startTime = new Date();
return this;
}
/**
* Sets the startTime for the P2jPlanStatus object.
*
* @param startTime
* @return
*/
public P2jPlanStatus setStartTime(Date startTime) {
this.startTime = startTime;
return this;
}
/**
* Sets the endTime for the P2jPlanStatus object to the current time.
*
* @return
*/
public P2jPlanStatus setEndTime() {
endTime = new Date();
return this;
}
/**
* Sets the endTime for the P2jPlanStatus object.
*
* @param endTime
* @return
*/
public P2jPlanStatus setEndTime(Date endTime) {
this.endTime = endTime;
return this;
}
/**
* Sets the heartbeat time for the P2jPlanStatus object to the current time.
*/
public void setHeartbeatTime() {
this.heartbeatTime = new Date();
}
/**
* Sets the heartbeat time for the P2jPlanStatus object.
*
* @param heartbeatTime
*/
public void setHeartbeatTime(Date heartbeatTime) {
this.heartbeatTime = heartbeatTime;
}
/**
* Updates the job reference that the plan has
* based on id of the passed in P2jJobStatus.
*
* @param job
* @return
*/
public P2jPlanStatus updateWith(P2jJobStatus job) {
jobStatusMap.put(job.getJobId(), job);
return this;
}
/**
* Updates this plan with non-null information from the passed in plan.
*
* @param plan
* @return
*/
public P2jPlanStatus updateWith(P2jPlanStatus plan) {
if (plan.getProgress() > 0) {
setProgress(plan.getProgress());
}
if (plan.getStartTime() != null) {
setStartTime(plan.getStartTime());
}
if (plan.getEndTime() != null) {
setEndTime(plan.getEndTime());
}
if (plan.getHeartbeatTime() != null) {
setHeartbeatTime(plan.getHeartbeatTime());
}
if (plan.getStatusText() != null) {
setStatusText(plan.getStatusText());
}
for (String jid : plan.getJobList()) {
this.updateWith(plan.getJob(jid));
}
return this;
}
}
| 345 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jPlan.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.util.Map;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Lob;
import javax.persistence.OneToMany;
import com.netflix.lipstick.model.operators.P2jLogicalRelationalOperator;
/**
* Container for a logical plan.
*
* @author jmagnusson
*
*/
@Entity
public class P2jPlan {
private long id;
private Map<String, P2jLogicalRelationalOperator> plan = null;
private String svg = null;
/**
* Construct an empty P2jPlan object.
*/
public P2jPlan() {
}
/**
* Construct a new P2jPlan object and set the plan.
* @param plan
*/
public P2jPlan(Map<String, P2jLogicalRelationalOperator> plan) {
super();
setPlan(plan);
}
/**
* Construct a new P2jPlan object and set both the plan and svg representation of the plan.
*
* @param plan
* @param svg
*/
public P2jPlan(Map<String, P2jLogicalRelationalOperator> plan, String svg) {
super();
setPlan(plan);
setSvg(svg);
}
@Id
@GeneratedValue
public long getId() {
return id;
}
/**
* Get the logical plan.
*
* @return a representation of the logical plan as a map of uid to P2jLogicalRelationalOperator.
*/
@OneToMany(cascade = CascadeType.ALL)
public Map<String, P2jLogicalRelationalOperator> getPlan() {
return plan;
}
/**
* Get the graphical representation of the plan in svg format.
*
* @return a string containing the svg representation of the logical plan
*/
@Lob
public String getSvg() {
return svg;
}
public void setId(long id) {
this.id = id;
}
public void setPlan(Map<String, P2jLogicalRelationalOperator> plan) {
this.plan = plan;
}
public void setSvg(String svg) {
this.svg = svg;
}
}
| 346 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/Utils.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.util.List;
import java.util.Map;
import org.apache.pig.data.DataType;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.apache.pig.newplan.logical.Util;
import org.apache.pig.newplan.logical.relational.LogicalSchema;
import org.apache.pig.newplan.logical.relational.LogicalSchema.LogicalFieldSchema;
import org.apache.pig.parser.ParserException;
import com.google.common.collect.Lists;
import com.netflix.lipstick.model.operators.elements.SchemaElement;
/**
* Utilities related to the lipstick model.
*
* @author jmagnusson
*/
public final class Utils {
/** Mapping of type id to string type. */
protected static final Map<Byte, String> TYPESMAP = DataType.genTypeToNameMap();
private Utils() { }
/**
* Produces a list of Lipstick SchemaElements from a string representation of a schema.
*
* @param schemaString the schema string
* @return a list of schema elements
* @throws ParserException the parser exception
*/
public static List<SchemaElement> processSchema(String schemaString) throws ParserException {
String tempString = schemaString.replace(".", "_");
tempString = tempString.substring(1, tempString.length() - 1);
Schema temp = org.apache.pig.impl.util.Utils.getSchemaFromString(tempString);
return processSchema(Util.translateSchema(temp));
}
/**
* Produces a list of Lipstick SchemaElements given a LogicalSchema.
*
* @param src the LogicalSchema
* @return a list of SchemaElements
*/
public static List<SchemaElement> processSchema(LogicalSchema src) {
if (src == null) {
return null;
}
List<SchemaElement> schemaList = Lists.newArrayList();
List<LogicalFieldSchema> fields = src.getFields();
for (LogicalFieldSchema f : fields) {
SchemaElement ele = new SchemaElement();
if (f.alias != null) {
ele.setAlias(f.alias.toString());
}
ele.setType(TYPESMAP.get(f.type));
if (f.schema != null) {
ele.setSchemaElements(processSchema(f.schema));
}
schemaList.add(ele);
}
return schemaList;
}
}
| 347 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jPlanPackage.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.util.Map;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.OneToMany;
import javax.persistence.OneToOne;
/**
* Top level Lipstick model object.
*
* @author jmagnusson
*
*/
@Entity
public class P2jPlanPackage {
private long id;
private P2jPlan optimized;
private P2jScripts scripts;
private P2jPlanStatus status;
private P2jPlan unoptimized;
private String userName;
private String jobName;
private String uuid;
private Map<String, P2jSampleOutputList> sampleOutputMap;
/**
* Construct an empty P2jPlanPackage.
*/
public P2jPlanPackage() {
}
/**
* Construct a new P2jPlanPackage.
*
* @param optimized the optimized P2jPlan
* @param unoptimized the unoptimized P2jPlan
* @param script the script the plans were derived from
* @param uuid a unique identifier for this object
*/
public P2jPlanPackage(P2jPlan optimized, P2jPlan unoptimized, String script, String uuid) {
this.optimized = optimized;
this.unoptimized = unoptimized;
this.scripts = new P2jScripts(script);
this.status = new P2jPlanStatus();
this.uuid = uuid;
}
@Id
@GeneratedValue
public long getId() {
return id;
}
@OneToOne(cascade = CascadeType.ALL)
public P2jPlan getOptimized() {
return optimized;
}
@OneToOne(cascade = CascadeType.ALL)
public P2jScripts getScripts() {
return scripts;
}
@OneToOne(cascade = CascadeType.ALL)
public P2jPlanStatus getStatus() {
return status;
}
@OneToOne(cascade = CascadeType.ALL)
public P2jPlan getUnoptimized() {
return unoptimized;
}
public String getUserName() {
return userName;
}
public String getJobName() {
return jobName;
}
@Column(unique = true)
public String getUuid() {
return uuid;
}
public void setId(long id) {
this.id = id;
}
public void setOptimized(P2jPlan optimized) {
this.optimized = optimized;
}
public void setScripts(P2jScripts scripts) {
this.scripts = scripts;
}
public void setStatus(P2jPlanStatus status) {
this.status = status;
}
public void setUnoptimized(P2jPlan unoptimized) {
this.unoptimized = unoptimized;
}
public void setUserName(String userName) {
this.userName = userName;
}
public void setJobName(String jobName) {
this.jobName = jobName;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
@OneToMany(cascade = CascadeType.ALL)
public Map<String, P2jSampleOutputList> getSampleOutputMap() {
return sampleOutputMap;
}
public void setSampleOutputMap(Map<String, P2jSampleOutputList> sampleOutputMap) {
this.sampleOutputMap = sampleOutputMap;
}
}
| 348 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jJobStatus.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.util.Map;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.OneToMany;
/**
* Container for the status of an individual map/reduce job.
* @author jmagnusson
*
*/
@Entity
public class P2jJobStatus {
private Map<String, P2jCounters> counters;
private Map<String, P2jWarning> warnings;
private String scope;
private String jobId;
private String jobName;
private String trackingUrl;
private boolean isComplete;
private boolean isSuccessful;
private float mapProgress;
private float reduceProgress;
private int totalMappers;
private int totalReducers;
private long id;
private long startTime;
private long finishTime;
private long recordsWritten;
private long bytesWritten;
/**
* Initialize an empty P2jJobStatus object.
*/
public P2jJobStatus() {
}
@Id
@GeneratedValue
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
@OneToMany(cascade = CascadeType.ALL)
public Map<String, P2jCounters> getCounters() {
return counters;
}
public void setCounters(Map<String, P2jCounters> counters) {
this.counters = counters;
}
@OneToMany(cascade = CascadeType.ALL)
public Map<String, P2jWarning> getWarnings() {
return warnings;
}
public void setWarnings(Map<String, P2jWarning> warnings) {
this.warnings = warnings;
}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
public String getJobName() {
return jobName;
}
public void setJobName(String jobName) {
this.jobName = jobName;
}
public String getTrackingUrl() {
return trackingUrl;
}
public void setTrackingUrl(String trackingUrl) {
this.trackingUrl = trackingUrl;
}
public boolean getIsComplete() {
return isComplete;
}
public void setIsComplete(Boolean isComplete) {
this.isComplete = isComplete;
}
public boolean getIsSuccessful() {
return isSuccessful;
}
public void setIsSuccessful(boolean isSuccessful) {
this.isSuccessful = isSuccessful;
}
public float getMapProgress() {
return mapProgress;
}
public void setMapProgress(float mapProgress) {
this.mapProgress = mapProgress;
}
public float getReduceProgress() {
return reduceProgress;
}
public void setReduceProgress(float reduceProgress) {
this.reduceProgress = reduceProgress;
}
public int getTotalMappers() {
return totalMappers;
}
public void setTotalMappers(int totalMappers) {
this.totalMappers = totalMappers;
}
public int getTotalReducers() {
return totalReducers;
}
public void setTotalReducers(int totalReducers) {
this.totalReducers = totalReducers;
}
public String getScope() {
return scope;
}
public void setScope(String scope) {
this.scope = scope;
}
public long getStartTime() {
return startTime;
}
public void setStartTime(long startTime) {
this.startTime = startTime;
}
public long getFinishTime() {
return finishTime;
}
public void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
public long getRecordsWritten() {
return recordsWritten;
}
public void setRecordsWritten(long recordsWritten) {
this.recordsWritten = recordsWritten;
}
public long getBytesWritten() {
return bytesWritten;
}
public void setBytesWritten(long bytesWritten) {
this.bytesWritten = bytesWritten;
}
}
| 349 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jWarning.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.util.Map;
import javax.persistence.ElementCollection;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import com.google.common.collect.Maps;
/**
* Container for a warning, a map containing attributes specific to
* a single warning about a pig script or map/reduce job.
* @author mroddy
*
*/
@Entity
public class P2jWarning {
private Map<String, String> warningAttributes = Maps.newHashMap();
private long id;
private String jobId;
private String warningKey;
@ElementCollection
public Map<String, String> getWarningAttributes() {
return warningAttributes;
}
@Id
@GeneratedValue
public long getId() {
return id;
}
public void setWarningAttributes(Map<String, String> warningAttributes) {
this.warningAttributes = warningAttributes;
}
public void setId(long id) {
this.id = id;
}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
public String getWarningKey() {
return warningKey;
}
public void setWarningKey(String warningKey) {
this.warningKey = warningKey;
}
}
| 350 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jSampleOutput.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Lob;
/**
* Container for sample output of a store operation in a map/reduce job.
*
* @author jmagnusson
*
*/
@Entity
public class P2jSampleOutput {
private long id;
private String schemaString;
private String sampleOutput;
@Id
@GeneratedValue
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
@Lob
public String getSchemaString() {
return schemaString;
}
public void setSchemaString(String schemaString) {
this.schemaString = schemaString;
}
@Lob
public String getSampleOutput() {
return sampleOutput;
}
public void setSampleOutput(String sampleOutput) {
this.sampleOutput = sampleOutput;
}
}
| 351 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jSampleOutputList.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.util.List;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.OneToMany;
import com.google.common.collect.Lists;
/**
* Container for the set of sample output data produced by a map/reduce job.
*
* @author jmagnusson
*
*/
@Entity
public class P2jSampleOutputList {
private List<P2jSampleOutput> sampleOutputList;
private long id;
/**
* Construct an empty P2jSampleOutputList.
*/
public P2jSampleOutputList() {
}
@OneToMany(cascade = CascadeType.ALL)
public List<P2jSampleOutput> getSampleOutputList() {
return sampleOutputList;
}
public void setSampleOutputList(List<P2jSampleOutput> sampleOutputList) {
this.sampleOutputList = sampleOutputList;
}
/**
* Add a new P2jSampleOutput to the P2jSampleOutputList.
*
* @param sampleOutput the P2jSampleOutput to add
*/
public void add(P2jSampleOutput sampleOutput) {
if (sampleOutputList == null) {
sampleOutputList = Lists.newArrayList();
}
sampleOutputList.add(sampleOutput);
}
@Id
@GeneratedValue
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
}
| 352 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jScripts.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Lob;
/**
* Model object for pig scripts.
*
* @author jmagnusson
*
*/
@Entity
public class P2jScripts {
private long id;
private String script;
/**
* Create an empty P2jScripts.
*/
public P2jScripts() {
}
/**
* Create a new P2jScripts and set script.
*
* @param script a pig script
*/
public P2jScripts(String script) {
this.script = script;
}
@Id
@GeneratedValue
public long getId() {
return id;
}
@Lob
public String getScript() {
return script;
}
public void setId(long id) {
this.id = id;
}
public void setScript(String script) {
this.script = script;
}
}
| 353 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/P2jCounters.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model;
import java.util.Map;
import javax.persistence.ElementCollection;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import com.google.common.collect.Maps;
/**
* Container for Map/Reduce job counters.
* @author jmagnusson
*
*/
@Entity
public class P2jCounters {
private Map<String, Long> counters = Maps.newHashMap();
private long id;
@ElementCollection
public Map<String, Long> getCounters() {
return counters;
}
@Id
@GeneratedValue
public long getId() {
return id;
}
public void setCounters(Map<String, Long> counters) {
this.counters = counters;
}
public void setId(long id) {
this.id = id;
}
}
| 354 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/P2jLOFilter.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators;
import javax.persistence.Column;
import javax.persistence.Entity;
/**
* Lipstick model object for LOFilter operator.
*
* @author jmagnusson
*
*/
@Entity
public class P2jLOFilter extends P2jLogicalRelationalOperator {
private String expression;
@Column(length = 2048)
public String getExpression() {
return expression;
}
public void setExpression(String expression) {
this.expression = expression;
}
}
| 355 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/P2jLogicalRelationalOperator.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import javax.persistence.CascadeType;
import javax.persistence.CollectionTable;
import javax.persistence.ElementCollection;
import javax.persistence.Embeddable;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Inheritance;
import javax.persistence.InheritanceType;
import javax.persistence.Lob;
import javax.persistence.OneToMany;
import javax.persistence.Transient;
import org.apache.pig.parser.ParserException;
import org.codehaus.jackson.annotate.JsonTypeInfo;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.lipstick.model.Utils;
import com.netflix.lipstick.model.operators.elements.JoinExpression;
import com.netflix.lipstick.model.operators.elements.SchemaElement;
/**
* Base Lipstick model object for logical operators.
*
* @author jmagnusson
*
*/
@Entity
@Inheritance(strategy = InheritanceType.SINGLE_TABLE)
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "@class")
public class P2jLogicalRelationalOperator {
@Embeddable
public static class Join {
private Map<String, JoinExpression> expression;
private String strategy;
private String type;
/**
* Creates a default Join object.
*/
public Join() {
}
/**
* Creates a Join object with the given strategy, type, and expression map.
*
* @param strategy
* @param type
* @param expression
*/
public Join(String strategy, String type, Map<String, List<String>> expression) {
this.strategy = strategy;
this.type = type;
this.expression = Maps.newHashMap();
for (Entry<String, List<String>> e : expression.entrySet()) {
this.expression.put(e.getKey(), new JoinExpression(e.getValue()));
}
}
@OneToMany(cascade = CascadeType.ALL)
public Map<String, JoinExpression> getExpression() {
return expression;
}
public void setExpression(Map<String, JoinExpression> expression) {
this.expression = expression;
}
public String getStrategy() {
return strategy;
}
public void setStrategy(String strategy) {
this.strategy = strategy;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
}
@Embeddable
public static class Location {
private String filename = null;
private Integer line = null;
private List<String> macro = null;
/**
* Creates a default Location object.
*/
public Location() {
}
/**
* Creates a Location object with the given line, filename, and macro information.
*
* @param line
* @param filename
* @param macro
*/
public Location(Integer line, String filename, List<String> macro) {
this.line = line;
this.filename = filename;
this.macro = macro;
}
public String getFilename() {
return filename;
}
public void setFilename(String filename) {
this.filename = filename;
}
public Integer getLine() {
return line;
}
public void setLine(Integer line) {
this.line = line;
}
@ElementCollection
@CollectionTable(name = "StringCollection")
public List<String> getMacro() {
return macro;
}
public void setMacro(List<String> macro) {
this.macro = macro;
}
}
@Embeddable
public static class MRStage {
private String jobId = null;
private String stepType = null;
/**
* Creates a default MRStage object.
*/
public MRStage() {
}
/**
* Creates a MRStage object with the given jobId and stepType.
*
* @param jobId
* @param stepType
*/
public MRStage(String jobId, String stepType) {
this.jobId = jobId;
this.stepType = stepType;
}
public String getJobId() {
return jobId;
}
public void setJobId(String jobId) {
this.jobId = jobId;
}
public String getStepType() {
return stepType;
}
public void setStepType(String stepType) {
this.stepType = stepType;
}
}
private String alias;
private long id;
private Location location;
private MRStage mapReduce;
private String operator;
private List<String> predecessors;
private List<SchemaElement> schema;
private String schemaString;
private List<String> successors;
private String uid;
public String getAlias() {
return alias;
}
@Id
@GeneratedValue
public long getId() {
return id;
}
public Location getLocation() {
return location;
}
public MRStage getMapReduce() {
return mapReduce;
}
public String getOperator() {
return operator;
}
@ElementCollection
public List<String> getPredecessors() {
return predecessors;
}
@Transient
public List<SchemaElement> getSchema() {
return schema;
}
@Lob
public String getSchemaString() {
return schemaString;
}
@ElementCollection
public List<String> getSuccessors() {
return successors;
}
public String getUid() {
return uid;
}
public void setAlias(String alias) {
this.alias = alias;
}
public void setId(long id) {
this.id = id;
}
/**
* Creates a Location object from the line, filename, and macro. Assigns it as the
* P2jLogicalRelationalOperator's location field.
*
* @param line
* @param filename
* @param macro
*/
public void setLocation(int line, String filename, List<String> macro) {
setLocation(new Location(line, filename, macro));
}
public void setLocation(Location location) {
this.location = location;
}
public void setMapReduce(MRStage mapReduce) {
this.mapReduce = mapReduce;
}
/**
* Creates a MRStage object from the jobId and stepType. Assigns it as the
* P2jLogicalRelationalOperator's mapReduce field.
*
* @param jobId
* @param stepType
*/
public void setMapReduce(String jobId, String stepType) {
setMapReduce(new MRStage(jobId, stepType));
}
public void setOperator(String operator) {
this.operator = operator;
}
public void setPredecessors(List<String> predecessors) {
this.predecessors = predecessors;
}
/**
* Sets the P2jLogicalRelationalOperator's schema to the passed in schema,
* or to any empty list if the passed in schema is null.
*
* @param schema
*/
public void setSchema(List<SchemaElement> schema) {
if (schema == null) {
schema = Lists.newArrayList();
}
this.schema = schema;
}
/**
* Sets the P2jLogicalRelationalOperator's schemaString.
*
* @param schemaString
*/
public void setSchemaString(String schemaString) {
if (schemaString != null) {
try {
setSchema(Utils.processSchema(schemaString));
} catch (ParserException e) {
e.printStackTrace();
}
this.schemaString = schemaString.replace(".", "_");
} else {
setSchema(null);
}
}
public void setSuccessors(List<String> successors) {
this.successors = successors;
}
public void setUid(String uid) {
this.uid = uid;
}
}
| 356 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/P2jLOStore.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators;
import javax.persistence.Column;
import javax.persistence.Entity;
/**
* Lipstick model object for LOStore operator.
*
* @author jmagnusson
*
*/
@Entity
public class P2jLOStore extends P2jLogicalRelationalOperator {
@Column(length = 2048)
public String getStorageLocation() {
return storageLocation;
}
public void setStorageLocation(String storageLocation) {
this.storageLocation = storageLocation;
}
public String getStorageFunction() {
return storageFunction;
}
public void setStorageFunction(String storageFunction) {
this.storageFunction = storageFunction;
}
private String storageLocation;
private String storageFunction;
}
| 357 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/P2jLOLimit.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators;
import javax.persistence.Entity;
/**
* Lipstick model object for LOLimit operator.
*
* @author jmagnusson
*
*/
@Entity
public class P2jLOLimit extends P2jLogicalRelationalOperator {
private long rowLimit;
public long getRowLimit() {
return rowLimit;
}
public void setRowLimit(long rowLimit) {
this.rowLimit = rowLimit;
}
}
| 358 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/P2jLOSplitOutput.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators;
import javax.persistence.Column;
import javax.persistence.Entity;
/**
* Lipstick model object for LOSplitOutput operator.
*
* @author jmagnusson
*
*/
@Entity
public class P2jLOSplitOutput extends P2jLogicalRelationalOperator {
private String expression;
@Column(length = 2048)
public String getExpression() {
return expression;
}
public void setExpression(String expression) {
this.expression = expression;
}
}
| 359 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/P2jLOJoin.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators;
import java.util.List;
import java.util.Map;
import javax.persistence.Entity;
/**
* Lipstick model object for LOJoin operator.
*
* @author jmagnusson
*
*/
@Entity
public class P2jLOJoin extends P2jLogicalRelationalOperator {
private Join join;
public Join getJoin() {
return join;
}
public void setJoin(Join join) {
this.join = join;
}
/**
* Set the join attribute.
*
* @param strategy the join strategy used
* @param type the join type used
* @param expression a map of alias to list of fields being joined
*/
public void setJoin(String strategy, String type, Map<String, List<String>> expression) {
join = new Join(strategy, type, expression);
}
}
| 360 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/P2jLOLoad.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators;
import javax.persistence.Column;
import javax.persistence.Entity;
/**
* Lipstick model object for LOLoad operator.
*
* @author jmagnusson
*
*/
@Entity
public class P2jLOLoad extends P2jLogicalRelationalOperator {
@Column(length = 2048)
public String getStorageLocation() {
return storageLocation;
}
public void setStorageLocation(String storageLocation) {
this.storageLocation = storageLocation;
}
public String getStorageFunction() {
return storageFunction;
}
public void setStorageFunction(String storageFunction) {
this.storageFunction = storageFunction;
}
private String storageLocation;
private String storageFunction;
}
| 361 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/P2jLOCogroup.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators;
import java.util.List;
import java.util.Map;
import javax.persistence.Entity;
/**
* Lipstick model object for LOCogroup operator.
*
* @author jmagnusson
*
*/
@Entity
public class P2jLOCogroup extends P2jLogicalRelationalOperator {
private Join group;
public Join getGroup() {
return group;
}
public void setGroup(Join group) {
this.group = group;
}
/**
* Set the group attribute.
*
* @param strategy the join strategy used
* @param type the join type used
* @param expression a map of alias to list of fields being joined
*/
public void setGroup(String strategy, String type, Map<String, List<String>> expression) {
group = new Join(strategy, type, expression);
}
}
| 362 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/elements/SchemaElement.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators.elements;
import java.util.List;
import org.codehaus.jackson.annotate.JsonProperty;
/**
* Lipstick model object representing an individual field in a schema.
*
* @author jmagnusson
*
*/
public class SchemaElement {
private String alias = null;
private String type = null;
private List<SchemaElement> schemaElements = null;
/**
* Construct an empty SchemaElement.
*/
public SchemaElement() {
}
/**
* Construct a new schema element.
*
* @param alias element alias
* @param type element type
* @param uid uid of this element
* @param schemaElements a schema embedded in this element, expressed as a list of SchemaElements
*/
public SchemaElement(String alias, String type, Long uid, List<SchemaElement> schemaElements) {
this.alias = alias;
this.type = type;
this.schemaElements = schemaElements;
}
public String getAlias() {
return alias;
}
public void setAlias(String alias) {
this.alias = alias;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
@JsonProperty("schema")
public List<SchemaElement> getSchemaElements() {
return schemaElements;
}
@JsonProperty("schema")
public void setSchemaElements(List<SchemaElement> schemaElements) {
this.schemaElements = schemaElements;
}
}
| 363 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/model/operators/elements/JoinExpression.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.model.operators.elements;
import java.util.List;
import javax.persistence.ElementCollection;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
/**
* Lipstick model object representing a join expression.
*
* @author jmagnusson
*
*/
@Entity
public class JoinExpression {
private List<String> fields;
private long id;
/**
* Construct an empty JoinExpression object.
*/
public JoinExpression() {
}
/**
* Construct a new JoinExpression object and set fields attribute.
* @param fields the list of fields being joined
*/
public JoinExpression(List<String> fields) {
this.fields = fields;
}
@ElementCollection
public List<String> getFields() {
return fields;
}
public void setFields(List<String> fields) {
this.fields = fields;
}
@Id
@GeneratedValue
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
}
| 364 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/listeners/LipstickPPNL.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.listeners;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.pig.LipstickPigServer;
import org.apache.pig.impl.PigContext;
import org.apache.pig.impl.plan.OperatorPlan;
import org.apache.pig.tools.pigstats.JobStats;
import org.apache.pig.tools.pigstats.OutputStats;
import org.apache.pig.tools.pigstats.PigProgressNotificationListener;
import com.google.common.collect.Lists;
import com.netflix.lipstick.P2jPlanGenerator;
import com.netflix.lipstick.pigtolipstick.BasicP2LClient;
import com.netflix.lipstick.pigtolipstick.P2LClient;
/**
* Lipstick Pig Progress notification listener.
*
* Manages initialization of lipstick clients and routing events to active
* clients.
*
* @author jmagnusson
* @author nbates
*
*/
public class LipstickPPNL implements PigProgressNotificationListener {
private static final Log LOG = LogFactory.getLog(LipstickPPNL.class);
protected static final String LIPSTICK_UUID_PROP_NAME = "lipstick.uuid.prop.name";
protected static final String LIPSTICK_UUID_PROP_DEFAULT = "lipstick.uuid";
protected static final String LIPSTICK_URL_PROP = "lipstick.server.url";
protected LipstickPigServer ps;
protected PigContext context;
protected List<P2LClient> clients = Lists.newLinkedList();
protected List<PPNLErrorHandler> errorHandlers = Lists.newLinkedList();
/**
* Initialize a new LipstickPPNL object.
*/
public LipstickPPNL() {
LOG.info("--- Init TBPPNL ---");
}
public void addErrorHandler(PPNLErrorHandler errHandler) {
errorHandlers.add(errHandler);
}
/**
* Check if any clients are active.
*
* @return true, if at least one active client has been initialized
*/
protected boolean clientIsActive() {
if (clients != null && !clients.isEmpty()) {
return true;
}
return false;
}
/**
* Sets a reference to the pig server.
*
* @param ps
* the pig server
*/
public void setPigServer(LipstickPigServer ps) {
this.ps = ps;
setPigContext(ps.getPigContext());
}
/**
* Sets a reference to the pig context. Used if running
* without a LipstickPigServer.
*
* @param ps
* the pig server
*/
public void setPigContext(PigContext context) {
this.context = context;
}
/**
* Sets the plan generators. Initializes Lipstick clients if they have not
* already been initialized.
*
* @param unoptimizedPlanGenerator
* the unoptimized plan generator
* @param optimizedPlanGenerator
* the optimized plan generator
*/
public void setPlanGenerators(P2jPlanGenerator unoptimizedPlanGenerator, P2jPlanGenerator optimizedPlanGenerator) {
try {
// this is the first time we can grab a conf from pig context so
// initClients here
initClients();
if (clientIsActive()) {
Properties props = context.getProperties();
String uuidPropName = props.getProperty(LIPSTICK_UUID_PROP_NAME, LIPSTICK_UUID_PROP_DEFAULT);
String uuid = props.getProperty(uuidPropName);
if ((uuid == null) || uuid.isEmpty()) {
uuid = UUID.randomUUID().toString();
props.put(uuidPropName, uuid);
}
LOG.info("UUID: " + uuid);
LOG.info(clients);
for (P2LClient client : clients) {
client.setPlanGenerators(unoptimizedPlanGenerator, optimizedPlanGenerator);
client.setPigServer(ps);
client.setPigContext(context);
client.setPlanId(uuid);
}
}
} catch (Exception e) {
LOG.error("Caught unexpected exception", e);
for (PPNLErrorHandler errHandler : errorHandlers) {
errHandler.handlePlanGeneratorsError(e);
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* initialPlanNotification(java.lang.String,
* org.apache.pig.backend.hadoop.executionengine
* .mapReduceLayer.plans.MROperPlan)
*/
@Override
public void initialPlanNotification(String scriptId, OperatorPlan plan) {
try {
if (clientIsActive()) {
for (P2LClient client : clients) {
client.createPlan(plan);
}
}
} catch (Exception e) {
LOG.error("Caught unexpected exception", e);
for (PPNLErrorHandler errHandler : errorHandlers) {
errHandler.handleInitialPlanNotificationError(e);
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* launchStartedNotification(java.lang.String, int)
*/
@Override
public void launchStartedNotification(String scriptId, int numJobsToLaunch) {
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* jobsSubmittedNotification(java.lang.String, int)
*/
@Override
public void jobsSubmittedNotification(String scriptId, int numJobsSubmitted) {
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* jobStartedNotification(java.lang.String, java.lang.String)
*/
@Override
public void jobStartedNotification(String scriptId, String assignedJobId) {
try {
if (clientIsActive()) {
for (P2LClient client : clients) {
client.jobStarted(assignedJobId);
}
}
} catch (Exception e) {
LOG.error("Caught unexpected exception", e);
for (PPNLErrorHandler errHandler : errorHandlers) {
errHandler.handleJobStartedNotificationError(e);
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* jobFinishedNotification(java.lang.String,
* org.apache.pig.tools.pigstats.JobStats)
*/
@Override
public void jobFinishedNotification(String scriptId, JobStats jobStats) {
try {
if (clientIsActive()) {
for (P2LClient client : clients) {
client.jobFinished(jobStats);
}
}
} catch (Exception e) {
LOG.error("Caught unexpected exception", e);
for (PPNLErrorHandler errHandler : errorHandlers) {
errHandler.handleJobFinishedNotificationError(e);
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* jobFailedNotification(java.lang.String,
* org.apache.pig.tools.pigstats.JobStats)
*/
@Override
public void jobFailedNotification(String scriptId, JobStats jobStats) {
try {
if (clientIsActive()) {
for (P2LClient client : clients) {
client.jobFailed(jobStats);
}
}
} catch (Exception e) {
LOG.error("Caught unexpected exception", e);
for (PPNLErrorHandler errHandler : errorHandlers) {
errHandler.handleJobFailedNotificationError(e);
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* outputCompletedNotification(java.lang.String,
* org.apache.pig.tools.pigstats.OutputStats)
*/
@Override
public void outputCompletedNotification(String scriptId, OutputStats outputStats) {
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* progressUpdatedNotification(java.lang.String, int)
*/
@Override
public void progressUpdatedNotification(String scriptId, int progress) {
try {
if (clientIsActive()) {
for (P2LClient client : clients) {
client.updateProgress(progress);
}
}
} catch (Exception e) {
LOG.error("Caught unexpected exception", e);
for (PPNLErrorHandler errHandler : errorHandlers) {
errHandler.handleProgressUpdatedNotificationError(e);
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.pig.tools.pigstats.PigProgressNotificationListener#
* launchCompletedNotification(java.lang.String, int)
*/
@Override
public void launchCompletedNotification(String scriptId, int numJobsSucceeded) {
try {
if (clientIsActive()) {
for (P2LClient client : clients) {
client.planCompleted();
}
}
} catch (Exception e) {
LOG.error("Caught unexpected exception", e);
for (PPNLErrorHandler errHandler : errorHandlers) {
errHandler.handleLaunchCompletedNotificationError(e);
}
}
}
/**
* Initialize the clients from properties in the pig context.
*/
protected void initClients() {
// Make sure client list is empty before initializing.
// For example, this prevents initailizing multiple times when
// executing multiple runs in a grunt shell session.
Properties props = ps.getPigContext().getProperties();
if (clients.isEmpty() && props.containsKey(LIPSTICK_URL_PROP)) {
// Initialize the client
clients.add(new BasicP2LClient(props.getProperty(LIPSTICK_URL_PROP)));
}
}
}
| 365 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/listeners/PPNLErrorHandler.java | package com.netflix.lipstick.listeners;
/**
* Lipstick Pig Progress notification listener Error Handler
*
* For finer grain error hanlding from the LipstickPPNL, implement
* this interface, then pass an instance to LipstickPPNL.addErrorHandler()
*/
public interface PPNLErrorHandler {
/**
* Called when an unhandled excepction from LipstickPPNL.setPlanGenerators() occurs
*/
public void handlePlanGeneratorsError(Exception e);
/**
* Called when an unhandled excepction from LipstickPPNL.initialPlanNotification() occurs
*/
public void handleInitialPlanNotificationError(Exception e);
/**
* Called when an unhandled excepction from LipstickPPNL.jobStartedNotification() occurs
*/
public void handleJobStartedNotificationError(Exception e);
/**
* Called when an unhandled excepction from LipstickPPNL.jobFinishedNotification() occurs
*/
public void handleJobFinishedNotificationError(Exception e);
/**
* Called when an unhandled excepction from LipstickPPNL.jobFailedNotification() occurs
*/
public void handleJobFailedNotificationError(Exception e);
/**
* Called when an unhandled excepction from LipstickPPNL.progressUpdatedNotification() occurs
*/
public void handleProgressUpdatedNotificationError(Exception e);
/**
* Called when an unhandled excepction from LipstickPPNL.launchCompletedNotification() occurs
*/
public void handleLaunchCompletedNotificationError(Exception e);
}
| 366 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/warnings/JobWarnings.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.warnings;
import java.util.Map;
import java.util.List;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.google.common.collect.Maps;
import com.google.common.collect.Lists;
import org.apache.pig.tools.pigstats.JobStats;
import com.netflix.lipstick.model.P2jWarning;
import org.apache.pig.tools.pigstats.PigStats;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.Counters.Group;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.TaskReport;
import org.apache.commons.math3.stat.StatUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class JobWarnings {
private static final Log log = LogFactory.getLog(JobWarnings.class);
public static final String NO_OUTPUT_RECORDS_KEY = "noOutputRecords";
public static final String SKEWED_REDUCERS_KEY = "skewedReducers";
/* Require that there are a minimum num of reducer tasks to consider
a job reducer skewed to prevent a high level of false posititives
that ensue otherwise. */
public static final int MIN_REDUCERS_FOR_SKEW = 2;
/* We use the standard deviation as a reference for determining if
a slow running reducer is skewing. In order to deal with a set
of reducers that has a small deviation, we override stddev in
the case that it is exceedingly small. This compensates for
reducers whose runtime is larger than the rest of the reducers
in the job but whose runtime is still fast enough that we don't
need to warn about it. */
public static final double MIN_STDDEV_DELTA_MINUTES = 10;
/* The size on disk of an HDFS directory. When a m/r job writes zero
records it creates a directory for its output on HDFS. In order to
tell the difference between records written and a directory written
but no records written we compare the size of the output to this
contstant. */
public static final long HDFS_DIRECTORY_SIZE = 154;
public boolean shouldNoOutputRecordsWarn(JobStats jobStats, String jobId) {
if (0 == jobStats.getRecordWrittern()) {
log.info("JobStats reports no records have been written");
/* JobStats is periodically returning zero for the number of records that
have been written for map/reduce jobs where records *have* been written.
Tracking down why/how this is happening has proved difficult, so to
prevent false positives we're double checking against a few well known
counters to confirm that we don't have any record data being written out. */
if (countersShowRecordsWritten(jobStats, jobId)) {
log.info("Counters found records written, no warning should be sent");
return false;
} else {
log.info("Counters also report no records written, will warn user");
return true;
}
} else {
log.info("JobStats reports some records have been written");
return false;
}
}
public long numOutputRecordsFromCounters(JobStats jobStats, String jobId) {
JobClient jobClient = PigStats.get().getJobClient();
Counters counters;
try {
RunningJob rj = jobClient.getJob(jobId);
counters = rj.getCounters();
} catch (IOException e) {
log.error("Error getting job client, continuing", e);
return 1;
}
Group fsGroup = counters.getGroup("FileSystemCounters");
long hdfsBytes = fsGroup.getCounter("HDFS_BYTES_WRITTEN");
long s3Bytes = fsGroup.getCounter("S3N_BYTES_WRITTEN");
return hdfsBytes + s3Bytes;
}
public static class ReducerDuration {
public String reducerTaskId;
public double duration;
public ReducerDuration(String reducerTaskId, double duration) {
this.reducerTaskId = reducerTaskId;
this.duration = duration;
}
}
/* Version 0.0 attempt, if any of the top 10% of reducers,
in terms of duration, are more than 2x the stddev from
the mean of the bottom 90% we consider skew to be present.
Version 0.1 pending more data about reducer skew. This is
to be considered a best effort for now. */
public List<String> findSkewedReducers(List<ReducerDuration> reducerTimes) {
if (! (MIN_REDUCERS_FOR_SKEW < reducerTimes.size())) {
return Lists.newLinkedList();
}
int numPotentialOutliers = (int)Math.ceil(reducerTimes.size() / 10.0);
int inflection = reducerTimes.size() - numPotentialOutliers;
List<ReducerDuration> potentialOutliers = reducerTimes.subList(inflection, reducerTimes.size());
List<ReducerDuration> referenceReducers = reducerTimes.subList(0, inflection);
/* List of reducer duration values that we will compare the
potential outliers to. */
double[] referenceDurations = new double[referenceReducers.size()];
for (int i = 0; i < referenceReducers.size(); i++) {
referenceDurations[i] = referenceReducers.get(i).duration;
}
double refMean = StatUtils.mean(referenceDurations);
double refVariance = StatUtils.populationVariance(referenceDurations, refMean);
double refStdDev = Math.sqrt(refVariance);
/* If the time to complete the task is more than this far
from the mean of all task completion times, we consider
it skewed */
double distToMeanThreshold = Math.max((refStdDev * 2), (MIN_STDDEV_DELTA_MINUTES * 60)) + refMean;
/* Now collect and return any of the outliers whose distance
from the mean is great than the computed threshold. */
List<String> skewedReducerIds = Lists.newArrayList();
for (ReducerDuration r: potentialOutliers) {
if ((r.duration - refMean) > distToMeanThreshold) {
skewedReducerIds.add(r.reducerTaskId);
}
}
return skewedReducerIds;
}
public List<ReducerDuration> enumerateReducerRunTimesAccending(JobClient jobClient, String jobId) {
if (!jobClient.getConf().getBoolean("pig.stats.notaskreport", false)) {
try {
TaskReport[] reduceTasks = jobClient.getReduceTaskReports(jobId);
return enumerateReducerRunTimesAccending(reduceTasks);
} catch (IOException e) {
log.error("Error getting reduce task reports, continuing", e);
}
} else {
log.info("Skipping reduce task reports for job " + jobId);
}
return Lists.newArrayList();
}
/* Extract all running or completed reducer tasks for the job, their runtime and sort them
in accending order. Used to partition reduce tasks to detect reducer skew. */
public List<ReducerDuration> enumerateReducerRunTimesAccending(TaskReport[] reduceTasks) {
List<ReducerDuration> reducerDurations = Lists.newArrayList();
long now = System.currentTimeMillis() / 1000;
for (int i = 0; i < reduceTasks.length; i++) {
String taskId = reduceTasks[i].getTaskID().toString();
long startTime = reduceTasks[i].getStartTime();
long finishTime = reduceTasks[i].getFinishTime();
if (0 == finishTime) {
/* Job hasn't finished yet */
finishTime = now;
}
if (0 != startTime) {
reducerDurations.add(new ReducerDuration(taskId, (double)finishTime - startTime));
}
}
return reducerDurations;
}
public boolean countersShowRecordsWritten(JobStats jobStats, String jobId) {
JobClient jobClient = PigStats.get().getJobClient();
Counters counters;
try {
RunningJob rj = jobClient.getJob(jobId);
counters = rj.getCounters();
} catch (IOException e) {
log.error("Error getting job client, continuing", e);
return true;
}
Group fsGroup = counters.getGroup("FileSystemCounters");
long hdfsBytes = fsGroup.getCounter("HDFS_BYTES_WRITTEN");
long s3Bytes = fsGroup.getCounter("S3N_BYTES_WRITTEN");
log.info(String.format("Total of %s bytes were written by this m/r job", (hdfsBytes + s3Bytes)));
if ((0 == s3Bytes) && (HDFS_DIRECTORY_SIZE == hdfsBytes)) {
log.info("No s3 output and empty HDFS directory created");
return false;
} else {
return (0 < (hdfsBytes + s3Bytes));
}
}
protected void addWarning(String jobId, Map<String, P2jWarning> warningsMap, String warningKey) {
Map<String, String> attrs = Maps.newHashMap();
addWarning(jobId, warningsMap, warningKey, attrs);
}
protected void addWarning(JobStats jobStats, Map<String, P2jWarning> warningsMap, String warningKey) {
Map<String, String> attrs = Maps.newHashMap();
addWarning(jobStats.getJobId(), warningsMap, warningKey, attrs);
}
protected void addWarning(String jobId, Map<String, P2jWarning> warningsMap, String warningKey, Map<String, String> attributes) {
P2jWarning pw = new P2jWarning();
pw.setWarningKey(warningKey);
pw.setJobId(jobId);
pw.setWarningAttributes(attributes);
warningsMap.put(warningKey, pw);
}
public Map<String, P2jWarning> findCompletedJobWarnings(JobClient jobClient, JobStats jobStats) {
Map<String, P2jWarning> warnings = findRunningJobWarnings(jobClient, jobStats.getJobId());
if (shouldNoOutputRecordsWarn(jobStats, jobStats.getJobId())) {
addWarning(jobStats, warnings, NO_OUTPUT_RECORDS_KEY);
}
return warnings;
}
public Map<String, P2jWarning> findRunningJobWarnings(JobClient jobClient, String jobId) {
Map<String, P2jWarning> warnings = Maps.newHashMap();
List<ReducerDuration> reducerTimes = enumerateReducerRunTimesAccending(jobClient, jobId);
List<String> skewedReducerIds = findSkewedReducers(reducerTimes);
if (0 < skewedReducerIds.size()) {
/* todo: find a better why to shove a list into the attribute map
than a csv. I feel shame at this. */
String sris = Joiner.on(",").join(skewedReducerIds);
addWarning(jobId, warnings, SKEWED_REDUCERS_KEY,
ImmutableMap.of(
"skewedReducerIds", sris,
"numberSkewedReducers", Integer.toString(skewedReducerIds.size())
));
}
return warnings;
}
}
| 367 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/pigtolipstick/BasicP2LClient.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.pigtolipstick;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.mapred.Counters.Group;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapred.JobInProgress;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.TaskReport;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.pig.LipstickPigServer;
import org.apache.pig.impl.plan.OperatorPlan;
import org.apache.pig.backend.hadoop.executionengine.HExecutionEngine;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceOper;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.plans.MROperPlan;
import org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator;
import org.apache.pig.impl.PigContext;
import org.apache.pig.newplan.Operator;
import org.apache.pig.tools.pigstats.JobStats;
import org.apache.pig.tools.pigstats.PigStats;
import org.apache.pig.tools.pigstats.mapreduce.MRScriptState;
import org.apache.pig.impl.PigImplConstants;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.lipstick.MRPlanCalculator;
import com.netflix.lipstick.P2jPlanGenerator;
import com.netflix.lipstick.model.P2jCounters;
import com.netflix.lipstick.model.P2jWarning;
import com.netflix.lipstick.model.P2jJobStatus;
import com.netflix.lipstick.model.P2jPlanPackage;
import com.netflix.lipstick.model.P2jPlanStatus;
import com.netflix.lipstick.model.P2jPlanStatus.StatusText;
import com.netflix.lipstick.model.P2jSampleOutput;
import com.netflix.lipstick.model.P2jSampleOutputList;
import com.netflix.lipstick.warnings.JobWarnings;
import com.netflix.lipstick.pigstatus.PigStatusClient;
import com.netflix.lipstick.pigstatus.RestfulPigStatusClient;
import com.netflix.lipstick.util.OutputSampler;
import com.netflix.lipstick.util.OutputSampler.SampleOutput;
/**
* A basic implementation of P2LClient.
*
* @author nbates
*
*/
public class BasicP2LClient implements P2LClient {
private static final Log LOG = LogFactory.getLog(BasicP2LClient.class);
protected static final String JOB_NAME_PROP = "jobName";
protected static final String ENABLE_SAMPLE_OUTPUT_PROP = "lipstick.enable.sampleoutput";
protected boolean planFailed = false;
protected String planId;
protected P2jPlanGenerator unopPlanGenerator;
protected P2jPlanGenerator opPlanGenerator;
protected LipstickPigServer ps;
protected PigContext context;
protected final Set<String> runningJobIds = Sets.newHashSet();
protected final Map<String, P2jJobStatus> jobIdToJobStatusMap = Maps.newHashMap();
protected final Map<String, Boolean> jobModeMap = Maps.newHashMap();
protected final PigStatusClient psClient;
protected boolean invalidClient = false;
protected boolean enableSampleOutput = true;
/**
* Instantiates a new BasicP2LClient using RestfulPigStatusClient with serviceUrls.
*
* @param serviceUrls One or more comma separated urls for connecting to a Lipstick Server
*/
public BasicP2LClient(String serviceUrls) {
this(new RestfulPigStatusClient(serviceUrls));
}
public BasicP2LClient(PigStatusClient client) {
this.psClient = client;
}
@Override
public void setPlanGenerators(P2jPlanGenerator unopPlanGenerator, P2jPlanGenerator opPlanGenerator) {
this.unopPlanGenerator = unopPlanGenerator;
this.opPlanGenerator = opPlanGenerator;
}
@Override
public void setPigServer(LipstickPigServer ps) {
this.ps = ps;
}
@Override
public void setPigContext(PigContext context) {
this.context = context;
}
@Override
public void setPlanId(String planId) {
this.planId = planId;
}
@Override
public String getPlanId() {
return planId;
}
@Override
@SuppressWarnings("unused")
public void createPlan(OperatorPlan<?> plan) {
if (plan != null && unopPlanGenerator != null && opPlanGenerator != null && context != null) {
Configuration conf = null;
for (org.apache.pig.impl.plan.Operator<?> op : plan) {
if (conf == null) {
conf = new Configuration();
MRScriptState.get().addSettingsToConf((MapReduceOper)op, conf);
break;
}
}
try {
Map<PhysicalOperator, Operator> p2lMap = Maps.newHashMap();
Map<Operator, PhysicalOperator> l2pMap = ((HExecutionEngine)context.getExecutionEngine()).getLogToPhyMap();
for (Entry<Operator, PhysicalOperator> i : l2pMap.entrySet()) {
p2lMap.put(i.getValue(), i.getKey());
}
String script = null;
// suppress getting script from conf for now - do something smarter later
if (conf != null && false) {
script = new String(Base64.decodeBase64(conf.get("pig.script")));
}
if ((script == null || script.length() == 0) && (ps != null)) {
script = StringUtils.join(ps.getScriptCache(), '\n');
}
MRPlanCalculator opPlan = new MRPlanCalculator(opPlanGenerator.getP2jPlan(), (MROperPlan)plan, p2lMap, opPlanGenerator.getReverseMap());
MRPlanCalculator unopPlan = new MRPlanCalculator(unopPlanGenerator.getP2jPlan(), (MROperPlan)plan, p2lMap, unopPlanGenerator.getReverseMap());
P2jPlanPackage plans = new P2jPlanPackage(opPlan.getP2jPlan(), unopPlan.getP2jPlan(), script, planId);
Properties props = context.getProperties();
plans.setUserName(UserGroupInformation.getCurrentUser().getUserName());
if (props.containsKey(JOB_NAME_PROP)) {
plans.setJobName(props.getProperty(JOB_NAME_PROP));
} else {
plans.setJobName("unknown");
}
if(props.containsKey(ENABLE_SAMPLE_OUTPUT_PROP)) {
String strProp = props.getProperty(ENABLE_SAMPLE_OUTPUT_PROP).toLowerCase();
if(strProp.equals("f") || strProp.equals("false")) {
enableSampleOutput = false;
LOG.warn("Sample Output has been disabled.");
}
}
plans.getStatus().setStartTime();
plans.getStatus().setStatusText(StatusText.running);
invalidClient = (psClient.savePlan(plans) == null);
} catch (Exception e) {
LOG.error("Caught unexpected exception generating json plan.", e);
invalidClient = true;
}
} else {
LOG.warn("Not saving plan, missing necessary objects to do so");
invalidClient = true;
}
if(invalidClient) {
LOG.error("Failed to properly create lipstick client and save plan. Lipstick will be disabled.");
}
}
@Override
public void updateProgress(int progress) {
if(invalidClient) {
return;
}
P2jPlanStatus planStatus = new P2jPlanStatus();
planStatus.setProgress(progress);
// toArray() done to avoid concurrent access errors during iteration
for (String jobId : runningJobIds.toArray(new String[0])) {
updatePlanStatusForJobId(planStatus, jobId);
}
psClient.saveStatus(planId, planStatus);
}
@Override
public void jobStarted(String jobId) {
if(invalidClient) {
return;
}
PigStats.JobGraph jobGraph = PigStats.get().getJobGraph();
LOG.debug("jobStartedNotification - jobId " + jobId + ", jobGraph:\n" + jobGraph);
// for each job in the graph, check if the stats for a job with this
// name is found. If so, look up it's scope and bind the jobId to
// the DAGNode with the same scope.
for (JobStats jobStats : jobGraph) {
if (jobStats != null && jobId.equals(jobStats.getJobId())) {
LOG.info("jobStartedNotification - scope " + jobStats.getName() + " is jobId " + jobId);
P2jJobStatus jobStatus = new P2jJobStatus();
jobStatus.setJobId(jobId);
jobStatus.setStartTime(System.currentTimeMillis());
jobStatus.setScope(jobStats.getName());
jobIdToJobStatusMap.put(jobId, jobStatus);
runningJobIds.add(jobId);
//
// Hack to get the configuration associated with the job to know
// whether it's been converted to local mode or not
//
try {
java.lang.reflect.Field f = jobStats.getClass().getSuperclass().getDeclaredField("conf");
f.setAccessible(true);
Configuration c = (Configuration)f.get(jobStats);
jobModeMap.put(jobId, c.getBoolean(PigImplConstants.CONVERTED_TO_LOCAL, false));
} catch (Exception e) {
e.printStackTrace();
}
}
}
P2jPlanStatus planStatus = new P2jPlanStatus();
updatePlanStatusForJobId(planStatus, jobId);
psClient.saveStatus(planId, planStatus);
}
@Override
public void jobFinished(JobStats jobStats) {
if(invalidClient) {
return;
}
// Remove jobId from runningSet b/c it's now complete
String jobId = jobStats.getJobId();
if (!runningJobIds.remove(jobId)) {
LOG.error("Internal Error. Job finished with no record of running jobId: " + jobId);
}
// Update the status of this job
JobClient jobClient = PigStats.get().getJobClient();
P2jPlanStatus planStatus = new P2jPlanStatus();
jobIdToJobStatusMap.get(jobId).setFinishTime(System.currentTimeMillis());
if (isLocalMode(jobId)) {
jobIdToJobStatusMap.get(jobId).setMapProgress(1);
jobIdToJobStatusMap.get(jobId).setReduceProgress(1);
}
jobIdToJobStatusMap.get(jobId).setBytesWritten(jobStats.getBytesWritten());
jobIdToJobStatusMap.get(jobId).setRecordsWritten(jobStats.getRecordWrittern());
updatePlanStatusForCompletedJobId(planStatus, jobId);
/* Set the completed job warnings after calling updatePlanStatusForCompletedJobId() otherwise
we end up overwriting the warning field with the running job warnings (which are included
with the completed job warnings). */
jobIdToJobStatusMap.get(jobId).setWarnings(getCompletedJobWarnings(jobClient, jobStats));
psClient.saveStatus(planId, planStatus);
if(enableSampleOutput) {
// Get sample output for the job
try {
P2jSampleOutputList sampleOutputList = new P2jSampleOutputList();
OutputSampler os = new OutputSampler(jobStats);
// The 10 & 1024 params (maxRows and maxBytes)
// should be configurable via properties
for (SampleOutput schemaOutputPair : os.getSampleOutputs(10, 1024)) {
P2jSampleOutput sampleOutput = new P2jSampleOutput();
sampleOutput.setSchemaString(schemaOutputPair.getSchema());
sampleOutput.setSampleOutput(schemaOutputPair.getOutput());
sampleOutputList.add(sampleOutput);
}
psClient.saveSampleOutput(planId,
jobIdToJobStatusMap.get(jobStats.getJobId()).getScope(),
sampleOutputList);
} catch (Exception e) {
LOG.error("Unable to get sample output from job with id [" + jobStats.getJobId() + "]. ", e);
}
}
}
@Override
public void jobFailed(JobStats jobStats) {
if(invalidClient) {
return;
}
planFailed = true;
}
@Override
public void planCompleted() {
if(invalidClient) {
return;
}
if (planFailed) {
planEndedWithStatusText(StatusText.failed);
} else {
planEndedWithStatusText(StatusText.finished);
}
}
/**
* Set the planStatus as ended with status statusText and saveStatus to the client.
*
* @param statusText job state at completition
*/
protected void planEndedWithStatusText(StatusText statusText) {
P2jPlanStatus planStatus = new P2jPlanStatus();
planStatus.setEndTime();
planStatus.setStatusText(statusText);
psClient.saveStatus(planId, planStatus);
}
/**
* Update planStatus with status for a map/reduce job.
*
* @param planStatus the P2jPlanStatus object to update
* @param jobId the map/reduce job id
*/
protected void updatePlanStatusForJobId(P2jPlanStatus planStatus, String jobId) {
P2jJobStatus status = buildJobStatusMap(jobId);
if (status != null) {
planStatus.updateWith(status);
}
}
protected void updatePlanStatusForCompletedJobId(P2jPlanStatus planStatus, String jobId) {
LOG.info("Updating plan status for completed job " + jobId);
updatePlanStatusForJobId(planStatus, jobId);
JobClient jobClient = PigStats.get().getJobClient();
JobID jobID = JobID.forName(jobId);
long startTime = Long.MAX_VALUE;
long finishTime = Long.MIN_VALUE;
/* The JobClient doesn't expose a way to get the Start and Finish time
of the over all job[1] sadly, so we're pulling out the min task start
time and max task finish time and using these to approximate.
[1] - Which is really dumb. The data obviously exists, it gets rendered
in the job tracker via the JobInProgress but sadly this is internal
to the remote job tracker so we don't have access to this
information. */
try {
if (!jobClient.getConf().getBoolean("pig.stats.notaskreport", false)) {
List<TaskReport> reports = Lists.newArrayList();
reports.addAll(Arrays.asList(jobClient.getMapTaskReports(jobID)));
reports.addAll(Arrays.asList(jobClient.getReduceTaskReports(jobID)));
for(TaskReport rpt : reports) {
/* rpt.getStartTime() sometimes returns zero meaning it does
not know what time it started so we need to prevent using
this or we'll lose the actual lowest start time */
long taskStartTime = rpt.getStartTime();
if (0 != taskStartTime) {
startTime = Math.min(startTime, taskStartTime);
}
finishTime = Math.max(finishTime, rpt.getFinishTime());
}
P2jJobStatus jobStatus = jobIdToJobStatusMap.get(jobId);
if (startTime < Long.MAX_VALUE) {
jobStatus.setStartTime(startTime);
}
if (finishTime > Long.MIN_VALUE) {
jobStatus.setFinishTime(finishTime);
}
LOG.info("Determined start and finish times for job " + jobId);
} else {
LOG.info("Skipping determining start and finish times for job " + jobId);
}
} catch (IOException e) {
LOG.error("Error getting job info.", e);
}
}
/**
* Build a P2jJobStatus object for the map/reduce job with id jobId.
*
* @param jobId the id of the map/reduce job
* @return the newly created P2jJobStatus
*/
@SuppressWarnings("deprecation")
protected P2jJobStatus buildJobStatusMap(String jobId) {
JobClient jobClient = PigStats.get().getJobClient();
P2jJobStatus js = jobIdToJobStatusMap.get(jobId);
try {
RunningJob rj = jobClient.getJob(jobId);
if (rj == null) {
LOG.warn("Couldn't find job status for jobId=" + jobId);
return js;
}
JobID jobID = rj.getID();
Counters counters = rj.getCounters();
js.setCounters(buildCountersMap(counters));
js.setWarnings(getRunningJobWarnings(jobClient, jobID.toString()));
js.setJobName(rj.getJobName());
js.setTrackingUrl(rj.getTrackingURL());
js.setIsComplete(rj.isComplete());
js.setIsSuccessful(rj.isSuccessful());
js.setMapProgress(rj.mapProgress());
js.setReduceProgress(rj.reduceProgress());
js.setTotalMappers((int)counters.findCounter(JobInProgress.Counter.TOTAL_LAUNCHED_MAPS).getCounter());
js.setTotalReducers((int)counters.findCounter(JobInProgress.Counter.TOTAL_LAUNCHED_REDUCES).getCounter());
return js;
} catch (IOException e) {
LOG.error("Error getting job info.", e);
}
return null;
}
public Map<String, P2jCounters> buildCountersMap(Counters counters) {
Map<String, P2jCounters> cMap = Maps.newHashMap();
for (Group g : counters) {
P2jCounters countersObj = new P2jCounters();
cMap.put(g.getDisplayName(), countersObj);
for (Counter c : g) {
countersObj.getCounters().put(c.getDisplayName(), c.getValue());
}
}
return cMap;
}
public Map<String, P2jWarning> getCompletedJobWarnings(JobClient jobClient, JobStats jobStats) {
if (isLocalMode(jobStats.getJobId())) {
Map<String, P2jWarning> warnings = Maps.newHashMap();
return warnings;
} else {
JobWarnings jw = new JobWarnings();
return jw.findCompletedJobWarnings(jobClient, jobStats);
}
}
public Map<String, P2jWarning> getRunningJobWarnings(JobClient jobClient, String jobId) {
if (isLocalMode(jobId)) {
Map<String, P2jWarning> warnings = Maps.newHashMap();
return warnings;
} else {
JobWarnings jw = new JobWarnings();
return jw.findRunningJobWarnings(jobClient, jobId);
}
}
public boolean isLocalMode(String jobId) {
return (context.getExecType() == org.apache.pig.ExecType.LOCAL ||
jobModeMap.get(jobId)
);
}
}
| 368 |
0 | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/lipstick-console/src/main/java/com/netflix/lipstick/pigtolipstick/P2LClient.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.lipstick.pigtolipstick;
import org.apache.pig.impl.PigContext;
import org.apache.pig.LipstickPigServer;
import org.apache.pig.impl.plan.OperatorPlan;
import org.apache.pig.tools.pigstats.JobStats;
import com.netflix.lipstick.P2jPlanGenerator;
/**
* Interface to manage translation of Pig to Lipstick model objects.
*
* @author nbates
*
*/
public interface P2LClient {
/**
* Creates a Serializable Internal Plan from a MROperPlan.
* Some implementations assume setPlanGenerators method has already been called.
*
* @param plan
*/
void createPlan(OperatorPlan<?> plan);
/**
* Sets the unoptimized and optimized plan generators.
* Some implementations assume this is called before createPlan.
*
* @param unopPlanGenerator
* @param opPlanGenerator
*/
void setPlanGenerators(P2jPlanGenerator unopPlanGenerator, P2jPlanGenerator opPlanGenerator);
/**
* Sets the LipstickPigServer which is used to provide context information.
*
* @param ps
*/
void setPigServer(LipstickPigServer ps);
/**
* Sets the PigContext which is used to provide context information.
*
* @param ps
*/
public void setPigContext(PigContext context);
/**
* Sets the id of the plan which should be globally unique.
*
* @param planId
*/
void setPlanId(String planId);
/**
* Returns the id of the plan which should be globally unique.
*
* @return
*/
String getPlanId();
/**
* Signals that progress has been made in at least one job.
* Implementations should use this as a trigger to update state about the job(s).
*
* @param progress
*/
void updateProgress(int progress);
/**
* Signals that a job has started.
*
* @param jobId
*/
void jobStarted(String jobId);
/**
* Signals that a job has finished and provides stats about the job.
* Implementations should use this as a trigger to update state about the job.
*
* @param jobStats
*/
void jobFinished(JobStats jobStats);
/**
* Signals that a job has failed and provides stats about the job.
* Implementations should use this as a trigger to update state about the job
* as well as to update failure of the overall plan.
*
* @param jobStats
*/
void jobFailed(JobStats jobStats);
/**
* Signals that a plan has completed.
* Implementations should update the state of the plan accordingly.
*/
void planCompleted();
}
| 369 |
0 | Create_ds/Lipstick/clients/java/src/test/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/test/java/com/netflix/lipstick/graph/GraphTest.java | package com.netflix.lipstick.graph;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Map;
import org.json.JSONException;
import org.skyscreamer.jsonassert.JSONAssert;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.CharStreams;
public class GraphTest {
ObjectMapper mapper = new ObjectMapper();
@Test
public void testSerialization() {
Map<String, Integer> counters = Maps.newHashMap();
counters.put("num_records", 2);
Graph graph = new Graph("0", "test")
.user("testuser")
.property("something", "else")
.status(new Status(20, 1412354951l, 1412354796l, "running"))
.node(new Node("a").property("alias","one").property("operation","start"))
.node(new Node("b").property("alias", "two").property("operation","hop"))
.node(new Node("c").property("alias", "three").property("operation","skip"))
.node(new Node("d").property("alias", "four").property("operation","join"))
.node(new Node("e").property("alias", "five").property("operation","read"))
.node(new Node("f").property("alias", "six").property("operation","fly"))
.node(new Node("g").property("alias", "seven").property("operation","dance"))
.node(new Node("h").property("alias", "eight").property("operation","rollerskate"))
.node(new Node("i").property("alias", "nine").property("operation","roll"))
.node(new Node("j").property("alias", "ten").property("operation","dive"))
.node(
new Node("k").type("Plain").url("http://www.netflix.com")
.property("alias", "eleven").property("operation", "tuck")
)
.node(new Node("l").property("alias", "twelve").property("operation","pushup"))
.node(new Node("m").property("alias", "thirteen").property("operation","twist"))
.node(new Node("n").property("alias", "fourteen").property("operation","kick"))
.node(new Node("o").property("alias", "fifteen").property("operation","shout"))
.node(new Node("p").property("alias", "sixteen").property("operation","finish"))
.node(new Node("job1").child("1"))
.node(new Node("job2").child("2"))
.node(new Node("job3").child("3"))
.node(new Node("job4").child("4"))
.node(new Node("job5").child("5"))
.edge(new Edge("a","d"))
.edge(new Edge("b","d"))
.edge(new Edge("c","e").label("testlabel").type("Something").property("edgeWeight",2))
.edge(new Edge("d","f"))
.edge(new Edge("f","g"))
.edge(new Edge("g","h"))
.edge(new Edge("h","i"))
.edge(new Edge("i","j"))
.edge(new Edge("j","k"))
.edge(new Edge("k","l"))
.edge(new Edge("l","m"))
.edge(new Edge("m","n"))
.edge(new Edge("n","o"))
.edge(new Edge("o","p"))
.nodeGroup(
new NodeGroup("1")
.child("a").child("b")
.status(new Status(10, 1412354951l, 1412354796l, "failed"))
)
.nodeGroup(new NodeGroup("2").child("c").child("d")
.stage(new Stage("map", new Status(100,0l,0l,"finished"))
))
.nodeGroup(new NodeGroup("3").child("e").child("f").child("g"))
.nodeGroup(new NodeGroup("4").child("h").child("i").child("j"))
.nodeGroup(
new NodeGroup("5")
.children(Lists.newArrayList("k","l","m","n","o","p"))
.url("http://localhost:8080")
.property("counters", counters)
.status(
new Status()
.progress(30)
.startTime(1412354951l)
.heartbeatTime(1412354796l)
.statusText("running")
)
);
try {
String ser = CharStreams.toString(new InputStreamReader(GraphTest.class.getResourceAsStream("/graph.json")));
JSONAssert.assertEquals(graph.toString(), ser, false);
} catch (IOException e) {
// err.
e.printStackTrace();
} catch (JSONException e) {
// err.
e.printStackTrace();
}
}
@Test
public void testDeserialization() {
Graph graph = Graph.fromJson(GraphTest.class.getResourceAsStream("/graph.json"));
Status expectedGraphStatus = new Status()
.progress(20).startTime(1412354951l)
.heartbeatTime(1412354796l).statusText("running");
Map<String, Object> expectedGraphProperties = Maps.newHashMap();
expectedGraphProperties.put("something", "else");
Assert.assertEquals(graph.id, "0");
Assert.assertEquals(graph.name, "test");
Assert.assertEquals(graph.user, "testuser");
Assert.assertEquals(graph.status, expectedGraphStatus);
Assert.assertEquals(graph.properties, expectedGraphProperties);
// Look at a specificNode node in the graph
Node expectedNode = new Node()
.id("k").type("Plain").url("http://www.netflix.com")
.property("alias", "eleven")
.property("operation", "tuck");
Assert.assertEquals(graph.node("k"), expectedNode);
Assert.assertEquals(graph.node("job5").child, "5");
// Look at a specific edge in the graph
Edge expectedEdge = new Edge("c","e")
.label("testlabel").type("Something")
.property("edgeWeight", 2);
Assert.assertEquals(graph.edge("c","e"), expectedEdge);
// Look at a specific node group in the graph
Map<String, Integer> counters = Maps.newHashMap();
counters.put("num_records", 2);
NodeGroup expectedGroup = new NodeGroup("5")
.children(Lists.newArrayList("k","l","m","n","o","p"))
.url("http://localhost:8080")
.property("counters", counters)
.status(
new Status()
.progress(30)
.startTime(1412354951l)
.heartbeatTime(1412354796l)
.statusText("running")
);
Assert.assertEquals(graph.nodeGroup("5"), expectedGroup);
Assert.assertEquals(graph.numNodes(), 21);
Assert.assertEquals(graph.numEdges(), 14);
Assert.assertEquals(graph.numNodeGroups(), 5);
}
}
| 370 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/graph/Status.java | package com.netflix.lipstick.graph;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonInclude(value=JsonInclude.Include.NON_EMPTY)
@JsonIgnoreProperties(ignoreUnknown=true)
public class Status {
public Integer progress;
public Long startTime;
public Long endTime;
public Long heartbeatTime;
public String statusText;
public Status() {
Long now = System.currentTimeMillis();
progress = 0;
startTime = now;
heartbeatTime = now;
}
public Status(Integer progress, Long startTime, Long heartbeatTime, String statusText) {
this.progress = progress;
this.startTime = startTime;
this.heartbeatTime = heartbeatTime;
this.statusText = statusText;
}
public Status progress(Integer progress) {
this.progress = progress;
return this;
}
public Status startTime(Long startTime) {
this.startTime = startTime;
return this;
}
public Status endTime(Long endTime) {
this.endTime = endTime;
return this;
}
public Status heartbeatTime(Long heartbeatTime) {
this.heartbeatTime = heartbeatTime;
return this;
}
public Status statusText(String statusText) {
this.statusText = statusText;
return this;
}
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof Status)) return false;
Status s = (Status)other;
return
this.progress.equals(s.progress) &&
this.startTime.equals(s.startTime) &&
this.endTime == null ? s.endTime == null : this.endTime.equals(s.endTime) &&
this.heartbeatTime.equals(s.heartbeatTime) &&
this.statusText.equals(s.statusText);
}
}
| 371 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/graph/Stage.java | package com.netflix.lipstick.graph;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonInclude(value=JsonInclude.Include.NON_EMPTY)
@JsonIgnoreProperties(ignoreUnknown=true)
public class Stage {
public String name;
public Status status;
public Stage() {
}
public Stage(String name) {
this(name, new Status());
}
public Stage(String name, Status status) {
this.name = name;
this.status = status;
}
public Stage name(String name) {
this.name = name;
return this;
}
public Stage status(Status status) {
this.status = status;
return this;
}
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof Stage)) return false;
Stage s = (Stage)other;
return
this.name == null ? s.name == null : this.name.equals(s.name) &&
this.status == null ? s.status == null : this.status.equals(s.status);
}
}
| 372 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/graph/Node.java | package com.netflix.lipstick.graph;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.google.common.collect.Maps;
@JsonInclude(value=JsonInclude.Include.NON_EMPTY)
@JsonIgnoreProperties(ignoreUnknown=true)
public class Node {
public String id;
public String url;
public String type;
public String child;
public Status status;
public Map<String, Object> properties;
public Node() {
this.properties = Maps.newHashMap();
}
public Node(String id) {
this.id = id;
this.properties = Maps.newHashMap();
}
public Node id(String id) {
this.id = id;
return this;
}
public Node url(String url) {
this.url = url;
return this;
}
public Node type(String type) {
this.type = type;
return this;
}
public Node child(String child) {
this.child = child;
return this;
}
public Node status(Status status) {
this.status = status;
return this;
}
public Object property(String key) {
return this.properties.get(key);
}
public Node property(String key, Object value) {
this.properties.put(key, value);
return this;
}
public Node properties(Map<String, Object> properties) {
this.properties = properties;
return this;
}
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof Node)) return false;
Node n = (Node)other;
return
this.id == null ? n.id == null : this.id.equals(n.id) &&
this.url == null ? n.url == null : this.url.equals(n.url) &&
this.type == null ? n.type == null : this.type.equals(n.type) &&
this.child == null ? n.child == null : this.child.equals(n.child) &&
this.status == null ? n.status == null : this.status.equals(n.status) &&
this.properties.equals(n.properties);
}
}
| 373 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/graph/Graph.java | package com.netflix.lipstick.graph;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.Maps;
@JsonInclude(value=JsonInclude.Include.NON_EMPTY)
@JsonIgnoreProperties(ignoreUnknown=true)
public class Graph {
private static final Log LOG = LogFactory.getLog(Graph.class);
public String id;
public String name;
public String user;
public Status status;
public Map<String, Object> properties;
@JsonIgnore
private Map<String, Node> nodeMap;
@JsonIgnore
private Map<String, Edge> edgeMap;
@JsonIgnore
private Map<String, NodeGroup> nodeGroupMap;
public Graph() {
this.status = new Status();
this.nodeMap = Maps.newHashMap();
this.edgeMap = Maps.newHashMap();
this.nodeGroupMap = Maps.newHashMap();
this.properties = Maps.newHashMap();
}
public Graph(String id, String name) {
this(id, name, System.getProperty("user.name"));
}
public Graph(String id, String name, String user) {
this.id = id;
this.name = name;
this.user = user;
this.status = new Status();
this.nodeMap = Maps.newHashMap();
this.edgeMap = Maps.newHashMap();
this.nodeGroupMap = Maps.newHashMap();
this.properties = Maps.newHashMap();
}
public int numNodes() {
return this.nodeMap.size();
}
public int numNodeGroups() {
return this.nodeGroupMap.size();
}
public int numEdges() {
return this.edgeMap.size();
}
public Graph(String id) {
this(id, "workflow-"+id);
}
public Graph id(String id) {
this.id = id;
return this;
}
public Graph name(String name) {
this.name = name;
return this;
}
public Graph user(String user) {
this.user = user;
return this;
}
public Graph status(Status status) {
this.status = status;
return this;
}
public Object property(String key) {
return this.properties.get(key);
}
public Graph property(String key, Object value) {
this.properties.put(key, value);
return this;
}
public Graph properties(Map<String, Object> properties) {
this.properties = properties;
return this;
}
public Graph node(Node node) {
this.nodeMap.put(node.id, node);
return this;
}
public Node node(String nodeId) {
return this.nodeMap.get(nodeId);
}
public Graph edge(Edge edge) {
String edgeId = edge.u + edge.v;
this.edgeMap.put(edgeId, edge);
return this;
}
public Edge edge(String u, String v) {
String edgeId = u + v;
return this.edgeMap.get(edgeId);
}
public Graph nodeGroup(NodeGroup nodeGroup) {
this.nodeGroupMap.put(nodeGroup.id, nodeGroup);
return this;
}
public NodeGroup nodeGroup(String nodeGroupId) {
return this.nodeGroupMap.get(nodeGroupId);
}
public Collection<Node> getNodes() {
return this.nodeMap.values();
}
public void setNodes(List<Node> nodes) {
nodes(nodes);
}
public Graph nodes(List<Node> nodes) {
for (Node node : nodes) {
nodeMap.put(node.id, node);
}
return this;
}
public void setEdges(List<Edge> edges) {
edges(edges);
}
public Collection<Edge> getEdges() {
return this.edgeMap.values();
}
public Graph edges(List<Edge> edges) {
for (Edge edge : edges) {
String edgeId = edge.u + edge.v;
this.edgeMap.put(edgeId, edge);
}
return this;
}
@JsonProperty("node_groups")
public void setNodeGroups(List<NodeGroup> nodeGroups) {
nodeGroups(nodeGroups);
}
@JsonProperty("node_groups")
public Collection<NodeGroup> getNodeGroups() {
return this.nodeGroupMap.values();
}
public Graph nodeGroups(List<NodeGroup> nodeGroups) {
for (NodeGroup nodeGroup : nodeGroups) {
this.nodeGroupMap.put(nodeGroup.id, nodeGroup);
}
return this;
}
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof Graph)) return false;
Graph g = (Graph)other;
return
this.id == null ? g.id == null : this.id.equals(g.id) &&
this.name == null ? g.name == null : this.name.equals(g.name) &&
this.status == null ? g.status == null : this.status.equals(g.status) &&
this.getNodes().equals(g.getNodes()) &&
this.getEdges().equals(g.getEdges()) &&
this.getNodeGroups().equals(g.getNodeGroups()) &&
this.properties.equals(g.properties);
}
public static Graph fromJson(InputStream is) {
try {
Graph g = (new ObjectMapper()).readValue(is, Graph.class);
return g;
} catch (IOException e) {
LOG.error("Error deserializing Graph", e);
}
return null;
}
public static Graph fromJson(String json) {
try {
Graph g = (new ObjectMapper()).readValue(json, Graph.class);
return g;
} catch (IOException e) {
LOG.error("Error deserializing Graph", e);
}
return null;
}
public String toString() {
String result = null;
try {
result = (new ObjectMapper()).writeValueAsString(this);
} catch (IOException e) {
throw new RuntimeException(e);
}
return result;
}
}
| 374 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/graph/Edge.java | package com.netflix.lipstick.graph;
import java.util.Map;
import jersey.repackaged.com.google.common.collect.Maps;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonInclude(value=JsonInclude.Include.NON_EMPTY)
@JsonIgnoreProperties(ignoreUnknown=true)
public class Edge {
public String u;
public String v;
public String type;
public String label;
public Map<String, Object> properties;
public Edge() {
this.properties = Maps.newHashMap();
}
public Edge(String u, String v) {
this.u = u;
this.v = v;
this.properties = Maps.newHashMap();
}
public Edge u(String u) {
this.u = u;
return this;
}
public Edge v(String v) {
this.v = v;
return this;
}
public Edge type(String type) {
this.type = type;
return this;
}
public Edge label(String label) {
this.label = label;
return this;
}
public Object property(String key) {
return this.properties.get(key);
}
public Edge property(String key, Object value) {
this.properties.put(key, value);
return this;
}
public Edge properties(Map<String, Object> properties) {
this.properties = properties;
return this;
}
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof Edge)) return false;
Edge e = (Edge)other;
return
this.u == null ? e.u == null : this.u.equals(e.u) &&
this.v == null ? e.v == null : this.v.equals(e.v) &&
this.type == null ? e.type == null : this.type.equals(e.type) &&
this.label == null ? e.label == null : this.label.equals(e.label) &&
this.properties.equals(e.properties);
}
}
| 375 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/graph/NodeGroup.java | package com.netflix.lipstick.graph;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@JsonInclude(value=JsonInclude.Include.NON_EMPTY)
@JsonIgnoreProperties(ignoreUnknown=true)
public class NodeGroup {
public String id;
public String url;
public Status status;
public List<Stage> stages;
public List<String> children;
public Map<String, Object> properties;
public NodeGroup() {
this.stages = Lists.newArrayList();
this.properties = Maps.newHashMap();
this.children = Lists.newArrayList();
}
public NodeGroup(String id) {
this.id = id;
this.stages = Lists.newArrayList();
this.properties = Maps.newHashMap();
this.children = Lists.newArrayList();
}
public NodeGroup id(String id) {
this.id = id;
return this;
}
public NodeGroup url(String url) {
this.url = url;
return this;
}
public NodeGroup status(Status status) {
this.status = status;
return this;
}
public NodeGroup stage(Stage stage) {
this.stages.add(stage);
return this;
}
public NodeGroup stages(List<Stage> stages) {
this.stages = stages;
return this;
}
public Boolean hasChild(String child) {
return this.children.contains(child);
}
public NodeGroup child(String child) {
if (!hasChild(child)) {
this.children.add(child);
}
return this;
}
public NodeGroup children(List<String> children) {
this.children = children;
return this;
}
public Object property(String key) {
return this.properties.get(key);
}
public NodeGroup property(String key, Object value) {
this.properties.put(key, value);
return this;
}
public NodeGroup properties(Map<String, Object> properties) {
this.properties = properties;
return this;
}
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof NodeGroup)) return false;
NodeGroup ng = (NodeGroup)other;
return
this.id == null ? ng.id == null : this.id.equals(ng.id) &&
this.url == null ? ng.url == null : this.url.equals(ng.url) &&
this.status == null ? ng.status == null : this.status.equals(ng.status) &&
this.stages.equals(ng.stages) &&
this.children.equals(ng.children) &&
this.properties.equals(ng.properties);
}
}
| 376 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/template/Template.java | package com.netflix.lipstick.template;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.io.Files;
public class Template {
private static final Log LOG = LogFactory.getLog(Template.class);
public String name;
public String view;
public String template;
public Template() {
}
public Template(String name) {
this.name = name;
}
public Template name(String name) {
this.name = name;
return this;
}
public Template view(String view) {
this.view = view;
return this;
}
public Template template(String template) {
this.template = template;
return this;
}
protected String readFile(String uri) throws IOException {
return Files.toString(new File(uri), Charset.defaultCharset());
}
public Template loadView(String viewUri) throws IOException {
this.view = readFile(viewUri);
return this;
}
public Template loadTemplate(String templateUri) throws IOException {
this.template = readFile(templateUri);
return this;
}
public static Template fromJson(String json) {
try {
Template t = (new ObjectMapper()).readValue(json, Template.class);
return t;
} catch (IOException e) {
LOG.error("Error deserializing Template", e);
}
return null;
}
public String toString() {
String result = null;
try {
result = (new ObjectMapper()).writeValueAsString(this);
} catch (IOException e) {
throw new RuntimeException(e);
}
return result;
}
}
| 377 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/client/Client.java | package com.netflix.lipstick.client;
import com.netflix.lipstick.graph.Graph;
import com.netflix.lipstick.template.Template;
public class Client extends BaseClient {
protected static String JOB_PATH = "/v1/job";
protected static String TEMPLATE_PATH = "/template";
public Client(String serviceUrl) {
super(serviceUrl);
}
public Client(String serviceUrl, int connectTimeout, int readTimeout) {
super(serviceUrl, connectTimeout, readTimeout);
}
public Graph get(String graphId) {
String path = String.format("%s/%s", JOB_PATH, graphId);
String response = makeRequest(path, null, RequestVerb.GET);
if (response != null) {
return Graph.fromJson(response);
}
return null;
}
public String list() {
return makeRequest(JOB_PATH, null, RequestVerb.GET);
}
public String save(Graph graph) {
return makeRequest(JOB_PATH, graph, RequestVerb.POST);
}
public String update(Graph graph) {
String path = String.format("%s/%s", JOB_PATH, graph.id);
return makeRequest(path, graph, RequestVerb.PUT);
}
public Template getTemplate(String name) {
String path = String.format("%s/%s", TEMPLATE_PATH, name);
String response = makeRequest(path, null, RequestVerb.GET);
if (response != null) {
return Template.fromJson(response);
}
return null;
}
public String saveTemplate(Template template) {
String path = String.format("%s/%s", TEMPLATE_PATH, template.name);
return makeRequest(path, template, RequestVerb.POST);
}
public String listTemplates() {
return makeRequest(TEMPLATE_PATH, null, RequestVerb.GET);
}
}
| 378 |
0 | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick | Create_ds/Lipstick/clients/java/src/main/java/com/netflix/lipstick/client/BaseClient.java | package com.netflix.lipstick.client;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.PriorityQueue;
import javax.ws.rs.client.ClientBuilder;
import javax.ws.rs.client.Entity;
import javax.ws.rs.client.WebTarget;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.glassfish.jersey.client.ClientConfig;
import org.glassfish.jersey.client.ClientProperties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
public class BaseClient {
protected enum RequestVerb {
POST, PUT, GET
};
private static final Log LOG = LogFactory.getLog(BaseClient.class);
private static int DEFAULT_TIMEOUT = 1500;
protected String serviceUrl;
protected ClientConfig config;
protected ObjectMapper om = new ObjectMapper();
public static class Server implements Comparable<Server> {
public String url;
public Long penalty;
public Server(String url, Long penalty) {
this.url = url;
this.penalty = penalty;
}
public void penalize() {
this.penalty = penalty*2l; // Double penalty each time
}
public int compareTo(Server other) {
return penalty.compareTo(other.penalty);
}
}
protected PriorityQueue<Server> servers = null;
public BaseClient(String serviceUrls) {
this(serviceUrls, DEFAULT_TIMEOUT, DEFAULT_TIMEOUT);
}
public BaseClient(String serviceUrls, int connectTimeout, int readTimeout) {
LOG.info("Initializing " + this.getClass() + " with serviceUrls: " + serviceUrls);
initializeServers(serviceUrls);
config = new ClientConfig();
config.property(ClientProperties.CONNECT_TIMEOUT, connectTimeout);
config.property(ClientProperties.READ_TIMEOUT, readTimeout);
}
protected void initializeServers(String serviceUrls) {
String[] urls = serviceUrls.split(",");
servers = new PriorityQueue<Server>(urls.length);
for (String url : urls) {
servers.add(new Server(url, 1l));
}
}
protected void rebuildServers(List<Server> servers) {
for (Server s : servers) {
this.servers.add(s);
}
}
protected String getServiceUrl() {
Server s = servers.peek();
return s.url;
}
protected String makeRequest(String resource, Object requestObj, RequestVerb verb) {
List<Server> penalized = new ArrayList<Server>();
javax.ws.rs.client.Client c = ClientBuilder.newClient(config);
Response response = null;
while (servers.size() > 0) {
String serviceUrl = getServiceUrl();
LOG.info("Trying Lipstick server "+serviceUrl);
WebTarget target = c.target(serviceUrl).path(resource);
response = sendRequest(target, requestObj, verb);
if (response != null) {
rebuildServers(penalized);
return response.readEntity(String.class);
} else {
Server s = servers.poll();
s.penalize();
penalized.add(s);
}
}
rebuildServers(penalized);
return null;
}
protected Response sendRequest(WebTarget target, Object requestObj, RequestVerb verb) {
Response response = null;
try {
switch(verb) {
case POST:
response = target.request().post(Entity.entity(om.writeValueAsString(requestObj), MediaType.APPLICATION_JSON_TYPE));
break;
case PUT:
response = target.request().put(Entity.entity(om.writeValueAsString(requestObj), MediaType.APPLICATION_JSON_TYPE));
break;
case GET:
response = target.request().get();
break;
}
response.bufferEntity();
if (response.getStatus() == 200) {
return response;
} else {
handleStatus(response.getStatus(), response.readEntity(String.class));
}
} catch (Exception e) {
if (response != null) {
LOG.error(String.format("Error contacting Lipstick server. code: [%d], message: [%s]", response.getStatus(), response.readEntity(String.class)));
} else {
LOG.error(String.format("Error contacting Lipstick server."));
}
LOG.debug("Stacktrace", e);
}
return null;
}
protected void handleStatus(int status, String message) throws IOException {
switch(status) {
case 404:
throw new NoSuchResourceException(String.format("%d not found, message: [%s]", status, message));
case 400:
throw new MalformedRequestException(String.format("%d bad request, message: [%s]", status, message));
case 500:
throw new RemoteServerException(String.format("%d internal server error, message: [%s]", status, message));
default:
LOG.debug(String.format("failed: %s message: %s", status, message));
}
}
public static class NoSuchResourceException extends IOException {
public NoSuchResourceException(String message) {
super(message);
}
}
public static class MalformedRequestException extends IOException {
public MalformedRequestException(String message) {
super(message);
}
}
public static class RemoteServerException extends IOException {
public RemoteServerException(String message) {
super(message);
}
}
}
| 379 |
0 | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon/randomcutforest/RandomCutForestShingledBenchmark.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import java.util.List;
import java.util.Random;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import com.amazon.randomcutforest.returntypes.DensityOutput;
import com.amazon.randomcutforest.returntypes.DiVector;
import com.amazon.randomcutforest.returntypes.Neighbor;
import com.amazon.randomcutforest.testutils.ShingledMultiDimDataWithKeys;
@Warmup(iterations = 2)
@Measurement(iterations = 5)
@Fork(value = 1)
@State(Scope.Thread)
public class RandomCutForestShingledBenchmark {
public final static int DATA_SIZE = 50_000;
public final static int INITIAL_DATA_SIZE = 25_000;
@State(Scope.Benchmark)
public static class BenchmarkState {
@Param({ "5" })
int baseDimensions;
@Param({ "8" })
int shingleSize;
@Param({ "30" })
int numberOfTrees;
@Param({ "1.0", "0.9", "0.8", "0.7", "0.6", "0.5", "0.4", "0.3", "0.2", "0.1", "0.0" })
double boundingBoxCacheFraction;
@Param({ "false", "true" })
boolean parallel;
double[][] data;
RandomCutForest forest;
@Setup(Level.Trial)
public void setUpData() {
data = ShingledMultiDimDataWithKeys.getMultiDimData(DATA_SIZE + INITIAL_DATA_SIZE, 50, 100, 5, 17,
baseDimensions).data;
}
@Setup(Level.Invocation)
public void setUpForest() {
forest = RandomCutForest.builder().numberOfTrees(numberOfTrees).dimensions(baseDimensions * shingleSize)
.internalShinglingEnabled(true).shingleSize(shingleSize).parallelExecutionEnabled(parallel)
.boundingBoxCacheFraction(boundingBoxCacheFraction).randomSeed(99).build();
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
forest.update(data[i]);
}
}
}
private RandomCutForest forest;
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest updateOnly(BenchmarkState state) {
double[][] data = state.data;
forest = state.forest;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
forest.update(data[i]);
}
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest scoreOnly(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
double score = 0.0;
Random rnd = new Random(0);
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
score += forest.getAnomalyScore(data[i]);
if (rnd.nextDouble() < 0.01) {
forest.update(data[i]); // this should execute sparingly
}
}
blackhole.consume(score);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest scoreAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
double score = 0.0;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
score = forest.getAnomalyScore(data[i]);
forest.update(data[i]);
}
blackhole.consume(score);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest attributionAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
DiVector vector = new DiVector(forest.getDimensions());
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
vector = forest.getAnomalyAttribution(data[i]);
forest.update(data[i]);
}
blackhole.consume(vector);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest basicDensityAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
DensityOutput output = new DensityOutput(forest.getDimensions(), forest.getSampleSize());
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
output = forest.getSimpleDensity(data[i]);
forest.update(data[i]);
}
blackhole.consume(output);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest neighborAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
List<Neighbor> output = null;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
output = forest.getNearNeighborsInSample(data[i]);
forest.update(data[i]);
}
blackhole.consume(output);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest imputeAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
double[] output = null;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
output = forest.imputeMissingValues(data[i], 1, new int[] { state.baseDimensions - 1 });
forest.update(data[i]);
}
blackhole.consume(output);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest extrapolateAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
double[] output = null;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
output = forest.extrapolate(1);
forest.update(data[i]);
}
blackhole.consume(output);
return forest;
}
}
| 380 |
0 | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon/randomcutforest/StateMapperShingledBenchmark.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static java.lang.Math.PI;
import static java.lang.Math.cos;
import java.util.Random;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import com.amazon.randomcutforest.config.Precision;
import com.amazon.randomcutforest.profilers.OutputSizeProfiler;
import com.amazon.randomcutforest.state.RandomCutForestMapper;
import com.amazon.randomcutforest.state.RandomCutForestState;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.protostuff.LinkedBuffer;
import io.protostuff.ProtostuffIOUtil;
import io.protostuff.Schema;
import io.protostuff.runtime.RuntimeSchema;
@Warmup(iterations = 2)
@Measurement(iterations = 5)
@Fork(value = 1)
@State(Scope.Benchmark)
public class StateMapperShingledBenchmark {
public static final int NUM_TRAIN_SAMPLES = 2048;
public static final int NUM_TEST_SAMPLES = 50;
@State(Scope.Thread)
public static class BenchmarkState {
@Param({ "10" })
int dimensions;
@Param({ "50" })
int numberOfTrees;
@Param({ "256" })
int sampleSize;
@Param({ "false", "true" })
boolean saveTreeState;
@Param({ "FLOAT_32", "FLOAT_64" })
Precision precision;
double[][] trainingData;
double[][] testData;
RandomCutForestState forestState;
String json;
byte[] protostuff;
@Setup(Level.Trial)
public void setUpData() {
trainingData = genShingledData(NUM_TRAIN_SAMPLES, dimensions, 0);
testData = genShingledData(NUM_TEST_SAMPLES, dimensions, 1);
}
@Setup(Level.Invocation)
public void setUpForest() throws JsonProcessingException {
RandomCutForest forest = RandomCutForest.builder().compact(true).dimensions(dimensions)
.numberOfTrees(numberOfTrees).sampleSize(sampleSize).precision(precision).shingleSize(dimensions)
.build();
for (int i = 0; i < NUM_TRAIN_SAMPLES; i++) {
forest.update(trainingData[i]);
}
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(saveTreeState);
forestState = mapper.toState(forest);
ObjectMapper jsonMapper = new ObjectMapper();
json = jsonMapper.writeValueAsString(forestState);
Schema<RandomCutForestState> schema = RuntimeSchema.getSchema(RandomCutForestState.class);
LinkedBuffer buffer = LinkedBuffer.allocate(512);
try {
protostuff = ProtostuffIOUtil.toByteArray(forestState, schema, buffer);
} finally {
buffer.clear();
}
}
}
private byte[] bytes;
@TearDown(Level.Iteration)
public void tearDown() {
OutputSizeProfiler.setTestArray(bytes);
}
@Benchmark
@OperationsPerInvocation(NUM_TEST_SAMPLES)
public RandomCutForestState roundTripFromState(BenchmarkState state, Blackhole blackhole) {
RandomCutForestState forestState = state.forestState;
double[][] testData = state.testData;
for (int i = 0; i < NUM_TEST_SAMPLES; i++) {
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(state.saveTreeState);
RandomCutForest forest = mapper.toModel(forestState);
double score = forest.getAnomalyScore(testData[i]);
blackhole.consume(score);
forest.update(testData[i]);
forestState = mapper.toState(forest);
}
return forestState;
}
@Benchmark
@OperationsPerInvocation(NUM_TEST_SAMPLES)
public String roundTripFromJson(BenchmarkState state, Blackhole blackhole) throws JsonProcessingException {
String json = state.json;
double[][] testData = state.testData;
for (int i = 0; i < NUM_TEST_SAMPLES; i++) {
ObjectMapper jsonMapper = new ObjectMapper();
RandomCutForestState forestState = jsonMapper.readValue(json, RandomCutForestState.class);
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(state.saveTreeState);
RandomCutForest forest = mapper.toModel(forestState);
double score = forest.getAnomalyScore(testData[i]);
blackhole.consume(score);
forest.update(testData[i]);
json = jsonMapper.writeValueAsString(mapper.toState(forest));
}
bytes = json.getBytes();
return json;
}
@Benchmark
@OperationsPerInvocation(NUM_TEST_SAMPLES)
public byte[] roundTripFromProtostuff(BenchmarkState state, Blackhole blackhole) {
bytes = state.protostuff;
double[][] testData = state.testData;
for (int i = 0; i < NUM_TEST_SAMPLES; i++) {
Schema<RandomCutForestState> schema = RuntimeSchema.getSchema(RandomCutForestState.class);
RandomCutForestState forestState = schema.newMessage();
ProtostuffIOUtil.mergeFrom(bytes, forestState, schema);
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(state.saveTreeState);
RandomCutForest forest = mapper.toModel(forestState);
double score = forest.getAnomalyScore(testData[i]);
blackhole.consume(score);
forest.update(testData[i]);
forestState = mapper.toState(forest);
LinkedBuffer buffer = LinkedBuffer.allocate(512);
try {
bytes = ProtostuffIOUtil.toByteArray(forestState, schema, buffer);
} finally {
buffer.clear();
}
}
return bytes;
}
private static double[][] genShingledData(int size, int dimensions, long seed) {
double[][] answer = new double[size][];
int entryIndex = 0;
boolean filledShingleAtleastOnce = false;
double[] history = new double[dimensions];
int count = 0;
double[] data = getDataD(size + dimensions - 1, 100, 5, seed);
for (int j = 0; j < size + dimensions - 1; ++j) { // we stream here ....
history[entryIndex] = data[j];
entryIndex = (entryIndex + 1) % dimensions;
if (entryIndex == 0) {
filledShingleAtleastOnce = true;
}
if (filledShingleAtleastOnce) {
// System.out.println("Adding " + j);
answer[count++] = getShinglePoint(history, entryIndex, dimensions);
}
}
return answer;
}
private static double[] getShinglePoint(double[] recentPointsSeen, int indexOfOldestPoint, int shingleLength) {
double[] shingledPoint = new double[shingleLength];
int i = 0;
for (int j = 0; j < shingleLength; ++j) {
double point = recentPointsSeen[(j + indexOfOldestPoint) % shingleLength];
shingledPoint[i++] = point;
}
return shingledPoint;
}
private static double[] getDataD(int num, double amplitude, double noise, long seed) {
double[] data = new double[num];
Random noiseprg = new Random(seed);
for (int i = 0; i < num; i++) {
data[i] = amplitude * cos(2 * PI * (i + 50) / 1000) + noise * noiseprg.nextDouble();
}
return data;
}
}
| 381 |
0 | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon/randomcutforest/StateMapperBenchmark.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import com.amazon.randomcutforest.config.Precision;
import com.amazon.randomcutforest.profilers.ObjectGraphSizeProfiler;
import com.amazon.randomcutforest.profilers.OutputSizeProfiler;
import com.amazon.randomcutforest.state.RandomCutForestMapper;
import com.amazon.randomcutforest.state.RandomCutForestState;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.protostuff.LinkedBuffer;
import io.protostuff.ProtostuffIOUtil;
import io.protostuff.Schema;
import io.protostuff.runtime.RuntimeSchema;
@Warmup(iterations = 2)
@Measurement(iterations = 5)
@Fork(value = 1)
@State(Scope.Benchmark)
public class StateMapperBenchmark {
public static final int NUM_TRAIN_SAMPLES = 2048;
public static final int NUM_TEST_SAMPLES = 50;
@State(Scope.Thread)
public static class BenchmarkState {
@Param({ "10" })
int dimensions;
@Param({ "50" })
int numberOfTrees;
@Param({ "256" })
int sampleSize;
@Param({ "false", "true" })
boolean saveTreeState;
@Param({ "FLOAT_32", "FLOAT_64" })
Precision precision;
double[][] trainingData;
double[][] testData;
RandomCutForestState forestState;
String json;
byte[] protostuff;
@Setup(Level.Trial)
public void setUpData() {
NormalMixtureTestData gen = new NormalMixtureTestData();
trainingData = gen.generateTestData(NUM_TRAIN_SAMPLES, dimensions);
testData = gen.generateTestData(NUM_TEST_SAMPLES, dimensions);
}
@Setup(Level.Invocation)
public void setUpForest() throws JsonProcessingException {
RandomCutForest forest = RandomCutForest.builder().compact(true).dimensions(dimensions)
.numberOfTrees(numberOfTrees).sampleSize(sampleSize).precision(precision)
.boundingBoxCacheFraction(0.0).build();
for (int i = 0; i < NUM_TRAIN_SAMPLES; i++) {
forest.update(trainingData[i]);
}
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(saveTreeState);
forestState = mapper.toState(forest);
ObjectMapper jsonMapper = new ObjectMapper();
json = jsonMapper.writeValueAsString(forestState);
Schema<RandomCutForestState> schema = RuntimeSchema.getSchema(RandomCutForestState.class);
LinkedBuffer buffer = LinkedBuffer.allocate(512);
try {
protostuff = ProtostuffIOUtil.toByteArray(forestState, schema, buffer);
} finally {
buffer.clear();
}
}
}
private RandomCutForest forest;
private byte[] bytes;
@TearDown(Level.Iteration)
public void tearDown() {
OutputSizeProfiler.setTestArray(bytes);
ObjectGraphSizeProfiler.setObject(forest);
}
@Benchmark
@OperationsPerInvocation(NUM_TEST_SAMPLES)
public RandomCutForestState roundTripFromState(BenchmarkState state, Blackhole blackhole) {
RandomCutForestState forestState = state.forestState;
double[][] testData = state.testData;
for (int i = 0; i < NUM_TEST_SAMPLES; i++) {
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(state.saveTreeState);
forest = mapper.toModel(forestState);
double score = forest.getAnomalyScore(testData[i]);
blackhole.consume(score);
forest.update(testData[i]);
forestState = mapper.toState(forest);
}
return forestState;
}
@Benchmark
@OperationsPerInvocation(NUM_TEST_SAMPLES)
public String roundTripFromJson(BenchmarkState state, Blackhole blackhole) throws JsonProcessingException {
String json = state.json;
double[][] testData = state.testData;
for (int i = 0; i < NUM_TEST_SAMPLES; i++) {
ObjectMapper jsonMapper = new ObjectMapper();
RandomCutForestState forestState = jsonMapper.readValue(json, RandomCutForestState.class);
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(state.saveTreeState);
forest = mapper.toModel(forestState);
double score = forest.getAnomalyScore(testData[i]);
blackhole.consume(score);
forest.update(testData[i]);
json = jsonMapper.writeValueAsString(mapper.toState(forest));
}
bytes = json.getBytes();
return json;
}
@Benchmark
@OperationsPerInvocation(NUM_TEST_SAMPLES)
public byte[] roundTripFromProtostuff(BenchmarkState state, Blackhole blackhole) {
bytes = state.protostuff;
double[][] testData = state.testData;
for (int i = 0; i < NUM_TEST_SAMPLES; i++) {
Schema<RandomCutForestState> schema = RuntimeSchema.getSchema(RandomCutForestState.class);
RandomCutForestState forestState = schema.newMessage();
ProtostuffIOUtil.mergeFrom(bytes, forestState, schema);
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(state.saveTreeState);
forest = mapper.toModel(forestState);
double score = forest.getAnomalyScore(testData[i]);
blackhole.consume(score);
forest.update(testData[i]);
forestState = mapper.toState(forest);
LinkedBuffer buffer = LinkedBuffer.allocate(512);
try {
bytes = ProtostuffIOUtil.toByteArray(forestState, schema, buffer);
} finally {
buffer.clear();
}
}
return bytes;
}
}
| 382 |
0 | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon/randomcutforest/RandomCutForestBenchmark.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import java.util.List;
import java.util.Random;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import com.amazon.randomcutforest.returntypes.DensityOutput;
import com.amazon.randomcutforest.returntypes.DiVector;
import com.amazon.randomcutforest.returntypes.Neighbor;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
@Warmup(iterations = 2)
@Measurement(iterations = 5)
@Fork(value = 1)
@State(Scope.Thread)
public class RandomCutForestBenchmark {
public final static int DATA_SIZE = 50_000;
public final static int INITIAL_DATA_SIZE = 25_000;
@State(Scope.Benchmark)
public static class BenchmarkState {
@Param({ "40" })
int baseDimensions;
@Param({ "1" })
int shingleSize;
@Param({ "30" })
int numberOfTrees;
@Param({ "1.0", "0.9", "0.8", "0.7", "0.6", "0.5", "0.4", "0.3", "0.2", "0.1", "0.0" })
double boundingBoxCacheFraction;
@Param({ "false", "true" })
boolean parallel;
double[][] data;
RandomCutForest forest;
@Setup(Level.Trial)
public void setUpData() {
int dimensions = baseDimensions * shingleSize;
NormalMixtureTestData gen = new NormalMixtureTestData();
data = gen.generateTestData(INITIAL_DATA_SIZE + DATA_SIZE, dimensions);
}
@Setup(Level.Invocation)
public void setUpForest() {
forest = RandomCutForest.builder().numberOfTrees(numberOfTrees).dimensions(baseDimensions * shingleSize)
.internalShinglingEnabled(true).shingleSize(shingleSize).parallelExecutionEnabled(parallel)
.boundingBoxCacheFraction(boundingBoxCacheFraction).randomSeed(99).build();
for (int i = 0; i < INITIAL_DATA_SIZE; i++) {
forest.update(data[i]);
}
}
}
private RandomCutForest forest;
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest updateOnly(BenchmarkState state) {
double[][] data = state.data;
forest = state.forest;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
forest.update(data[i]);
}
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest scoreOnly(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
double score = 0.0;
Random rnd = new Random(0);
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
score += forest.getAnomalyScore(data[i]);
if (rnd.nextDouble() < 0.01) {
forest.update(data[i]); // this should execute sparingly
}
}
blackhole.consume(score);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest scoreAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
double score = 0.0;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
score = forest.getAnomalyScore(data[i]);
forest.update(data[i]);
}
blackhole.consume(score);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest attributionAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
DiVector vector = new DiVector(forest.getDimensions());
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
vector = forest.getAnomalyAttribution(data[i]);
forest.update(data[i]);
}
blackhole.consume(vector);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest basicDensityAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
DensityOutput output = new DensityOutput(forest.getDimensions(), forest.getSampleSize());
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
output = forest.getSimpleDensity(data[i]);
forest.update(data[i]);
}
blackhole.consume(output);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest basicNeighborAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
List<Neighbor> output = null;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
output = forest.getNearNeighborsInSample(data[i]);
forest.update(data[i]);
}
blackhole.consume(output);
return forest;
}
@Benchmark
@OperationsPerInvocation(DATA_SIZE)
public RandomCutForest imputeAndUpdate(BenchmarkState state, Blackhole blackhole) {
double[][] data = state.data;
forest = state.forest;
double[] output = null;
for (int i = INITIAL_DATA_SIZE; i < data.length; i++) {
output = forest.imputeMissingValues(data[i], 1, new int[] { forest.dimensions - 1 });
forest.update(data[i]);
}
blackhole.consume(output);
return forest;
}
}
| 383 |
0 | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon/randomcutforest | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon/randomcutforest/profilers/OutputSizeProfiler.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest.profilers;
import java.util.Collection;
import java.util.Collections;
import org.openjdk.jmh.infra.BenchmarkParams;
import org.openjdk.jmh.infra.IterationParams;
import org.openjdk.jmh.profile.InternalProfiler;
import org.openjdk.jmh.results.AggregationPolicy;
import org.openjdk.jmh.results.IterationResult;
import org.openjdk.jmh.results.Result;
import org.openjdk.jmh.results.ScalarResult;
/**
* This simple profile outputs the size of a provided byte array or string as
* part of the JMH metrics. We use it to measure the size of output in
* {@link com.amazon.randomcutforest.StateMapperBenchmark}.
*/
public class OutputSizeProfiler implements InternalProfiler {
private static byte[] bytes;
public static void setTestString(String s) {
bytes = s.getBytes();
}
public static void setTestArray(byte[] bytes) {
OutputSizeProfiler.bytes = bytes;
}
@Override
public void beforeIteration(BenchmarkParams benchmarkParams, IterationParams iterationParams) {
}
@Override
public Collection<? extends Result> afterIteration(BenchmarkParams benchmarkParams, IterationParams iterationParams,
IterationResult iterationResult) {
int length = 0;
if (bytes != null) {
length = bytes.length;
bytes = null;
}
ScalarResult result = new ScalarResult("+output-size.bytes", length, "bytes", AggregationPolicy.AVG);
return Collections.singleton(result);
}
@Override
public String getDescription() {
return null;
}
}
| 384 |
0 | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon/randomcutforest | Create_ds/random-cut-forest-by-aws/Java/benchmark/src/main/java/com/amazon/randomcutforest/profilers/ObjectGraphSizeProfiler.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest.profilers;
import java.util.Collection;
import java.util.Collections;
import org.github.jamm.MemoryMeter;
import org.openjdk.jmh.infra.BenchmarkParams;
import org.openjdk.jmh.infra.IterationParams;
import org.openjdk.jmh.profile.InternalProfiler;
import org.openjdk.jmh.results.AggregationPolicy;
import org.openjdk.jmh.results.IterationResult;
import org.openjdk.jmh.results.Result;
import org.openjdk.jmh.results.ScalarResult;
/**
* A profiler that uses the JAMM memory meter to measure the size of an object
* graph.
*/
public class ObjectGraphSizeProfiler implements InternalProfiler {
private static Object object;
private static MemoryMeter meter = new MemoryMeter();
public static void setObject(Object object) {
ObjectGraphSizeProfiler.object = object;
}
@Override
public void beforeIteration(BenchmarkParams benchmarkParams, IterationParams iterationParams) {
}
@Override
public Collection<? extends Result> afterIteration(BenchmarkParams benchmarkParams, IterationParams iterationParams,
IterationResult iterationResult) {
long size = 0;
if (object != null) {
size = meter.measureDeep(object);
object = null;
}
ScalarResult result = new ScalarResult("+object-graph-size.bytes", size, "bytes", AggregationPolicy.AVG);
return Collections.singleton(result);
}
@Override
public String getDescription() {
return null;
}
}
| 385 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/ConditionalFieldTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.Random;
import org.junit.jupiter.api.Test;
import com.amazon.randomcutforest.returntypes.SampleSummary;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
public class ConditionalFieldTest {
private static int numberOfTrees;
private static int sampleSize;
private static int dimensions;
private static int randomSeed;
private static RandomCutForest parallelExecutionForest;
private static RandomCutForest singleThreadedForest;
private static RandomCutForest forestSpy;
private static double baseMu;
private static double baseSigma;
private static double anomalyMu;
private static double anomalySigma;
private static double transitionToAnomalyProbability;
private static double transitionToBaseProbability;
private static int dataSize;
@Test
public void SimpleTest() {
int newDimensions = 30;
randomSeed = 101;
sampleSize = 256;
RandomCutForest newForest = RandomCutForest.builder().numberOfTrees(100).sampleSize(sampleSize)
.dimensions(newDimensions).randomSeed(randomSeed).compact(true).boundingBoxCacheFraction(0.0).build();
dataSize = 2000 + 5;
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 0.0;
anomalySigma = 1.0;
transitionToAnomalyProbability = 0.0;
// ignoring anomaly cluster for now
transitionToBaseProbability = 1.0;
Random prg = new Random(0);
NormalMixtureTestData generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] data = generator.generateTestData(dataSize, newDimensions, 100);
for (int i = 0; i < 2000; i++) {
// shrink, shift at random
for (int j = 0; j < newDimensions; j++)
data[i][j] *= 0.01;
if (prg.nextDouble() < 0.5)
data[i][0] += 5.0;
else
data[i][0] -= 5.0;
newForest.update(data[i]);
}
float[] queryOne = new float[newDimensions];
float[] queryTwo = new float[newDimensions];
queryTwo[1] = 1;
SampleSummary summary = newForest.getConditionalFieldSummary(queryOne, 1, new int[] { 0 }, 1);
assert (summary.summaryPoints.length == 2);
assert (summary.relativeWeight.length == 2);
assert (Math.abs(summary.summaryPoints[0][0] - 5.0) < 0.01
|| Math.abs(summary.summaryPoints[0][0] + 5.0) < 0.01);
assert (Math.abs(summary.summaryPoints[1][0] - 5.0) < 0.01
|| Math.abs(summary.summaryPoints[1][0] + 5.0) < 0.01);
assert (summary.relativeWeight[0] > 0.25);
assert (summary.relativeWeight[1] > 0.25);
summary = newForest.getConditionalFieldSummary(queryTwo, 1, new int[] { 0 }, 1);
assert (summary.summaryPoints.length == 2);
assert (summary.relativeWeight.length == 2);
assertEquals(summary.summaryPoints[0][1], 1, 1e-6);
assertEquals(summary.summaryPoints[1][1], 1, 1e-6);
assert (Math.abs(summary.summaryPoints[0][0] - 5.0) < 0.01
|| Math.abs(summary.summaryPoints[0][0] + 5.0) < 0.01);
assert (Math.abs(summary.summaryPoints[1][0] - 5.0) < 0.01
|| Math.abs(summary.summaryPoints[1][0] + 5.0) < 0.01);
assert (summary.relativeWeight[0] > 0.25);
assert (summary.relativeWeight[1] > 0.25);
}
}
| 386 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/TestUtils.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collector;
import com.amazon.randomcutforest.returntypes.ConvergingAccumulator;
import com.amazon.randomcutforest.tree.INodeView;
import com.amazon.randomcutforest.tree.RandomCutTree;
public class TestUtils {
public static final double EPSILON = 1e-6;
/**
* Return a visitor that does nothing.
*/
public static final VisitorFactory<Double> DUMMY_GENERIC_VISITOR_FACTORY = new VisitorFactory<Double>(
(tree, x) -> new Visitor<Double>() {
@Override
public void accept(INodeView node, int depthOfNode) {
}
@Override
public Double getResult() {
return Double.NaN;
}
});
/**
* Return a multi-visitor that does nothing.
*/
public static final Function<RandomCutTree, MultiVisitor<Double>> DUMMY_MULTI_VISITOR_FACTORY = tree -> new MultiVisitor<Double>() {
@Override
public void accept(INodeView node, int depthOfNode) {
}
@Override
public Double getResult() {
return Double.NaN;
}
@Override
public boolean trigger(INodeView node) {
return false;
}
@Override
public MultiVisitor<Double> newCopy() {
return null;
}
@Override
public void combine(MultiVisitor<Double> other) {
}
};
/**
* A collector that accumulates values into a sorted list.
*/
public static final Collector<Double, List<Double>, List<Double>> SORTED_LIST_COLLECTOR = Collector
.of(ArrayList::new, List::add, (left, right) -> {
left.addAll(right);
return left;
}, list -> {
list.sort(Double::compare);
return list;
});
/**
* Return a converging accumulator that converges after seeing numberOfEntries
* values. The returned value is the sum of all accepted values.
*
* @param numberOfEntries The number of entries that need to be accepted for
* this accumulator to converge.
* @return a new converging accumulator that converges after seeing
* numberOfEntries values.
*/
public static ConvergingAccumulator<Double> convergeAfter(int numberOfEntries) {
return new ConvergingAccumulator<Double>() {
private int valuesAccepted = 0;
private double total = 0.0;
@Override
public void accept(Double value) {
valuesAccepted++;
total += value;
}
@Override
public boolean isConverged() {
return valuesAccepted >= numberOfEntries;
}
@Override
public int getValuesAccepted() {
return valuesAccepted;
}
@Override
public Double getAccumulatedValue() {
return total;
}
};
}
/**
* Return a multi-visitor that does nothing.
*/
public static final MultiVisitorFactory<Double> DUMMY_GENERIC_MULTI_VISITOR_FACTORY = new MultiVisitorFactory<>(
(tree, y) -> new MultiVisitor<Double>() {
@Override
public void accept(INodeView node, int depthOfNode) {
}
@Override
public Double getResult() {
return Double.NaN;
}
@Override
public boolean trigger(INodeView node) {
return false;
}
@Override
public MultiVisitor<Double> newCopy() {
return null;
}
@Override
public void combine(MultiVisitor<Double> other) {
}
});
}
| 387 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/RandomCutForestFunctionalTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.util.Random;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.ArgumentsProvider;
import org.junit.jupiter.params.provider.ArgumentsSource;
import org.junit.jupiter.params.provider.CsvSource;
import com.amazon.randomcutforest.returntypes.DensityOutput;
import com.amazon.randomcutforest.returntypes.DiVector;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
@Tag("functional")
public class RandomCutForestFunctionalTest {
private static int numberOfTrees;
private static int sampleSize;
private static int dimensions;
private static int randomSeed;
private static RandomCutForest parallelExecutionForest;
private static RandomCutForest singleThreadedForest;
private static RandomCutForest forestSpy;
private static double baseMu;
private static double baseSigma;
private static double anomalyMu;
private static double anomalySigma;
private static double transitionToAnomalyProbability;
private static double transitionToBaseProbability;
private static int dataSize;
@BeforeAll
public static void oneTimeSetUp() { // this is a stochastic dataset and will have different values for different
// runs
numberOfTrees = 100;
sampleSize = 256;
dimensions = 3;
randomSeed = 123;
parallelExecutionForest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(dimensions).randomSeed(randomSeed).centerOfMassEnabled(true)
.storeSequenceIndexesEnabled(true).build();
singleThreadedForest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(dimensions).randomSeed(randomSeed).centerOfMassEnabled(true)
.storeSequenceIndexesEnabled(true).parallelExecutionEnabled(false).build();
dataSize = 10_000;
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 5.0;
anomalySigma = 1.5;
transitionToAnomalyProbability = 0.01;
transitionToBaseProbability = 0.4;
NormalMixtureTestData generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] data = generator.generateTestData(dataSize, dimensions);
for (int i = 0; i < dataSize; i++) {
parallelExecutionForest.update(data[i]);
singleThreadedForest.update(data[i]);
}
}
// Use this ArgumentsProvider to run a test on both single-threaded and
// multi-threaded forests
static class TestForestProvider implements ArgumentsProvider {
@Override
public Stream<? extends Arguments> provideArguments(ExtensionContext context) throws Exception {
return Stream.of(singleThreadedForest, parallelExecutionForest).map(Arguments::of);
}
}
// displacement scoring (multiplied by the normalizer log_2(treesize)) on the
// fly !!
// as introduced in Robust Random Cut Forest Based Anomaly Detection in Streams
// @ICML 2016. This does not address co-displacement (duplicity).
// seen function is (x,y) -> 1 which basically ignores everything
// unseen function is (x,y) -> y which corresponds to mass of sibling
// damp function is (x,y) -> 1 which is no dampening
public static double getDisplacementScore(RandomCutForest forest, float[] point) {
return forest.getDynamicScore(point, 0, (x, y) -> 1.0, (x, y) -> y, (x, y) -> 1.0);
}
public double getDisplacementScoreApproximate(RandomCutForest forest, float[] point, double precision) {
return forest.getApproximateDynamicScore(point, precision, true, 0, (x, y) -> 1.0, (x, y) -> y, (x, y) -> 1.0);
}
// Expected height (multiplied by the normalizer log_2(treesize) ) scoring on
// the fly !!
// seen function is (x,y) -> x+log(Y)/log(2) which depth + duplicity converted
// to depth
// unseen function is (x,y) -> x which is depth
// damp function is (x,y) -> 1 which is no dampening
// note that this is *NOT* anything like the expected height in
// Isolation Forest/Random Forest algorithms, because here
// the Expected height takes into account the contrafactual
// that "what would have happened had the point been available during
// the construction of the forest"
public static double getHeightScore(RandomCutForest forest, float[] point) {
return forest.getDynamicScore(point, 0, (x, y) -> 1.0 * (x + Math.log(y)), (x, y) -> 1.0 * x, (x, y) -> 1.0);
}
public double getHeightScoreApproximate(RandomCutForest forest, float[] point, double precision) {
return forest.getApproximateDynamicScore(point, precision, false, 0, (x, y) -> 1.0 * (x + Math.log(y)),
(x, y) -> 1.0 * x, (x, y) -> 1.0);
}
@ParameterizedTest
@ArgumentsSource(TestForestProvider.class)
private void testGetAnomalyScore(RandomCutForest forest) {
float[] point = { 0.0f, 0.0f, 0.0f };
double score = forest.getAnomalyScore(point);
assertTrue(score < 1);
assertTrue(forest.getApproximateAnomalyScore(point) < 1);
/**
* This part demonstrates testing of dynamic scoring where score functions are
* changed on the fly.
*/
// displacement scoring on the fly!!
score = getDisplacementScore(forest, point);
assertTrue(score < 25);
// testing that the leaf exclusion does not affect anything
// tests the masking effect
assertTrue(forest.getDynamicScore(point, 1, (x, y) -> 1.0, (x, y) -> y, (x, y) -> 1.0) < 25);
double newScore = getDisplacementScoreApproximate(forest, point, 0);
assertEquals(score, newScore, 1E-10);
double otherScore = getDisplacementScoreApproximate(forest, point, 0.1);
assertTrue(otherScore < 25);
// the approximation bound is increased to accomodate the
// larger variance of the probabilistic test
// adjust the parameters in early convergence to
// get 0.1*score+0.1
assertEquals(otherScore, newScore, 0.3 * score + 0.1);
/**
* Using expected height -- note that this height is not the same as the height
* in a random forest, because it accounts for the contrafactual of having
* constructed the forest with the knowledge of the point.
*/
score = getHeightScore(forest, point);
assertTrue(score > 50);
newScore = getHeightScoreApproximate(forest, point, 0);
assertEquals(score, newScore, 1E-10);
otherScore = getHeightScoreApproximate(forest, point, 0.1);
assertTrue(otherScore > 50);
// the approximation bound is increased to accomodate the
// larger variance of the probabilistic test
assertEquals(score, otherScore, 0.3 * score + 0.1);
point = new float[] { 8.0f, 8.0f, 8.0f };
score = forest.getAnomalyScore(point);
assertTrue(score > 1);
assertTrue(forest.getApproximateAnomalyScore(point) > 1);
// displacement scoring on the fly !!
score = getDisplacementScore(forest, point);
assertTrue(score > 100);
// testing masking
assertTrue(forest.getDynamicScore(point, 1, (x, y) -> 1.0, (x, y) -> y, (x, y) -> 1.0) > 100);
newScore = getDisplacementScoreApproximate(forest, point, 0);
assertEquals(score, newScore, 1E-10);
otherScore = getDisplacementScoreApproximate(forest, point, 0.1);
assertTrue(otherScore > 100);
// the approximation bound is increased to accomodate the
// larger variance of the probabilistic test
assertEquals(score, otherScore, 0.3 * score + 0.1);
// Expected height scoring on the fly !!
score = getHeightScore(forest, point);
assertTrue(score < 30);
newScore = getHeightScoreApproximate(forest, point, 0);
assertEquals(score, newScore, 1E-10);
otherScore = getHeightScoreApproximate(forest, point, 0.1);
assertTrue(otherScore < 30);
// the approximation bound is increased to accomodate the
// larger variance of the probabilistic test
assertEquals(score, otherScore, 0.3 * score + 0.1);
}
@ParameterizedTest
@ArgumentsSource(TestForestProvider.class)
public void testSideEffectsA(RandomCutForest forest) {
double score = forest.getAnomalyScore(new double[] { 0.0, 0.0, 0.0 });
NormalMixtureTestData generator2 = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] newData = generator2.generateTestData(dataSize, dimensions);
for (int i = 0; i < dataSize; i++) {
forest.getAnomalyScore(newData[i]);
}
double newScore = forest.getAnomalyScore(new double[] { 0.0, 0.0, 0.0 });
assertEquals(score, newScore, 10E-10);
}
@ParameterizedTest
@ArgumentsSource(TestForestProvider.class)
public void testSideEffectsB(RandomCutForest forest) {
/* the changes to score and attribution should be in sync */
DiVector initial = forest.getAnomalyAttribution(new double[] { 0.0, 0.0, 0.0 });
NormalMixtureTestData generator2 = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] newData = generator2.generateTestData(dataSize, dimensions);
for (int i = 0; i < dataSize; i++) {
forest.getAnomalyAttribution(newData[i]);
}
double newScore = forest.getAnomalyScore(new double[] { 0.0, 0.0, 0.0 });
DiVector newVector = forest.getAnomalyAttribution(new double[] { 0.0, 0.0, 0.0 });
assertEquals(initial.getHighLowSum(), newVector.getHighLowSum(), 10E-10);
assertEquals(initial.getHighLowSum(), newScore, 1E-10);
assertArrayEquals(initial.high, newVector.high, 1E-10);
assertArrayEquals(initial.low, newVector.low, 1E-10);
}
@ParameterizedTest
@ArgumentsSource(TestForestProvider.class)
public void testGetAnomalyAttribution(RandomCutForest forest) {
/* This method checks that the scores and attributions are consistent */
double[] point = { 0.0, 0.0, 0.0 };
DiVector seenResult = forest.getAnomalyAttribution(point);
double seenScore = forest.getAnomalyScore(point);
assertTrue(seenResult.getHighLowSum(0) < 0.5);
assertTrue(seenResult.getHighLowSum(1) < 0.5);
assertTrue(seenResult.getHighLowSum(2) < 0.5);
assertTrue(seenScore < 1.0);
assertEquals(seenScore, seenResult.getHighLowSum(), 1E-10);
DiVector likelyResult = forest.getApproximateAnomalyAttribution(point);
double score = forest.getApproximateAnomalyScore(point);
assertTrue(likelyResult.getHighLowSum(0) < 0.5);
assertTrue(likelyResult.getHighLowSum(1) < 0.5);
assertTrue(likelyResult.getHighLowSum(2) < 0.5);
assertEquals(score, likelyResult.getHighLowSum(), 0.1);
assertEquals(seenResult.getHighLowSum(), likelyResult.getHighLowSum(), 0.1);
}
@ParameterizedTest
@ArgumentsSource(TestForestProvider.class)
public void testMultipleAttributions(RandomCutForest forest) {
/**
* We will test the attribution over random runs. Narrow tests can fail -- we
* will keep track of the aggregate number of narrow tests and test for large
* characterization that would be misleading in failure.
*/
int hardPass = 0;
int causal = 0;
double[] point = { 6.0, 0.0, 0.0 };
DiVector result = forest.getAnomalyAttribution(point);
assertTrue(result.low[0] < 0.2);
if (result.getHighLowSum(1) < 0.5)
++hardPass;
if (result.getHighLowSum(2) < 0.5)
++hardPass;
assertTrue(result.getHighLowSum(1) + result.getHighLowSum(2) < 1.0);
assertTrue(result.high[0] > forest.getAnomalyScore(point) / 3);
if (result.high[0] > 0.5 * forest.getAnomalyScore(point))
++causal;
// the last line states that first coordinate was high and was a majority
// contributor to the score
// the previous test states that the contribution is twice the average of the 12
// possible contributors.
// these tests all subparts of the score at once
point = new double[] { -6.0, 0.0, 0.0 };
result = forest.getAnomalyAttribution(point);
assertTrue(result.getHighLowSum() > 1.0);
assertTrue(result.high[0] < 0.5);
if (result.getHighLowSum(1) < 0.5)
++hardPass;
if (result.getHighLowSum(2) < 0.5)
++hardPass;
assertTrue(result.low[0] > forest.getAnomalyScore(point) / 3);
if (result.low[0] > 0.5 * forest.getAnomalyScore(point))
++causal;
point = new double[] { 0.0, 6.0, 0.0 };
assertTrue(result.getHighLowSum() > 1.0);
result = forest.getAnomalyAttribution(point);
if (result.getHighLowSum(0) < 0.5)
++hardPass;
if (result.getHighLowSum(2) < 0.5)
++hardPass;
assertTrue(result.low[1] < 0.5);
assertTrue(result.high[1] > forest.getAnomalyScore(point) / 3);
if (result.high[1] > 0.5 * forest.getAnomalyScore(point))
++causal;
point = new double[] { 0.0, -6.0, 0.0 };
assertTrue(result.getHighLowSum() > 1.0);
result = forest.getAnomalyAttribution(point);
if (result.getHighLowSum(0) < 0.5)
++hardPass;
if (result.getHighLowSum(2) < 0.5)
++hardPass;
assertTrue(result.high[1] < 0.5);
assertTrue(result.low[1] > forest.getAnomalyScore(point) / 3);
if (result.low[1] > 0.5 * forest.getAnomalyScore(point))
++causal;
point = new double[] { 0.0, 0.0, 6.0 };
assertTrue(result.getHighLowSum() > 1.0);
result = forest.getAnomalyAttribution(point);
if (result.getHighLowSum(0) < 0.5)
++hardPass;
if (result.getHighLowSum(1) < 0.5)
++hardPass;
assertTrue(result.low[2] < 0.5);
assertTrue(result.high[2] > forest.getAnomalyScore(point) / 3);
if (result.high[2] > 0.5 * forest.getAnomalyScore(point))
++causal;
point = new double[] { 0.0, 0.0, -6.0 };
assertTrue(result.getHighLowSum() > 1.0);
result = forest.getAnomalyAttribution(point);
if (result.getHighLowSum(0) < 0.5)
++hardPass;
if (result.getHighLowSum(1) < 0.5)
++hardPass;
assertTrue(result.high[2] < 0.5);
assertTrue(result.low[2] > forest.getAnomalyScore(point) / 3);
if (result.low[2] > 0.5 * forest.getAnomalyScore(point))
++causal;
assertTrue(causal >= 5); // maximum is 6; there can be skew in one direction
point = new double[] { -3.0, 0.0, 0.0 };
result = forest.getAnomalyAttribution(point);
assertTrue(result.high[0] < 0.5);
if (result.getHighLowSum(1) < 0.5)
++hardPass;
if (result.getHighLowSum(2) < 0.5)
++hardPass;
assertTrue(result.low[0] > forest.getAnomalyScore(point) / 3);
/*
* For multiple causes, the relationship of scores only hold for larger
* distances.
*/
point = new double[] { -3.0, 6.0, 0.0 };
assertTrue(result.getHighLowSum() > 1.0);
result = forest.getAnomalyAttribution(point);
if (result.low[0] > 0.5)
++hardPass;
assertTrue(result.high[0] < 0.5);
assertTrue(result.low[1] < 0.5);
assertTrue(result.high[1] > 0.5);
if (result.high[1] > 0.9)
++hardPass;
assertTrue(result.getHighLowSum(2) < 0.5);
assertTrue(result.high[1] + result.low[0] > 0.8 * forest.getAnomalyScore(point));
point = new double[] { 6.0, -3.0, 0.0 };
assertTrue(result.getHighLowSum() > 1.0);
result = forest.getAnomalyAttribution(point);
assertTrue(result.low[0] < 0.5);
assertTrue(result.high[0] > 0.5);
if (result.high[0] > 0.9)
++hardPass;
if (result.low[1] > 0.5)
++hardPass;
assertTrue(result.high[1] < 0.5);
assertTrue(result.getHighLowSum(2) < 0.5);
assertTrue(result.high[0] + result.low[1] > 0.8 * forest.getAnomalyScore(point));
point = new double[] { 20.0, -10.0, 0.0 };
assertTrue(result.getHighLowSum() > 1.0);
result = forest.getAnomalyAttribution(point);
assertTrue(result.high[0] + result.low[1] > 0.8 * forest.getAnomalyScore(point));
if (result.high[0] > 1.8 * result.low[1])
++hardPass;
if (result.low[1] > result.high[0] / 2.2)
++hardPass;
assertTrue(hardPass >= 15); // maximum is 20
}
@Test
public void testUpdateWithSignedZeros() {
RandomCutForest forest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(2).dimensions(1)
.randomSeed(randomSeed).centerOfMassEnabled(true).storeSequenceIndexesEnabled(true).build();
forest.update(new double[] { 0.0 });
forest.getAnomalyScore(new double[] { 0.0 });
forest.getAnomalyScore(new double[] { -0.0 });
forest.update(new double[] { -0.0 });
forest.getAnomalyScore(new double[] { 0.0 });
forest.getAnomalyScore(new double[] { -0.0 });
}
@Test
public void testShadowBuffer() {
/**
* This test checks that the attribution *DOES NOT* change as a ratio as more
* copies of the points are added. The shadowbox in
* the @DirectionalAttributionVisitor allows us to simulate a deletion without
* performing a deletion.
*
* The goal is to measure the attribution and have many copies of the same point
* and eventually the attribution will become uniform in all directions.
*
* we create a new forest so that other tests are unaffected.
*/
numberOfTrees = 100;
sampleSize = 256;
dimensions = 3;
randomSeed = 123;
RandomCutForest newForest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(dimensions).randomSeed(randomSeed).centerOfMassEnabled(true).timeDecay(1e-5)
.storeSequenceIndexesEnabled(true).build();
dataSize = 10_000;
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 5.0;
anomalySigma = 1.5;
transitionToAnomalyProbability = 0.01;
transitionToBaseProbability = 0.4;
NormalMixtureTestData generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] data = generator.generateTestData(dataSize, dimensions);
for (int i = 0; i < dataSize; i++) {
newForest.update(data[i]);
}
double[] point = new double[] { -8.0, -8.0, 0.0 };
DiVector result = newForest.getAnomalyAttribution(point);
double score = newForest.getAnomalyScore(point);
assertEquals(score, result.getHighLowSum(), 1E-5);
assertTrue(score > 2);
assertTrue(result.getHighLowSum(2) < 0.2);
// the third dimension has little influence in classification
// this is going to add {8,8,0} into the forest
// but not enough to cause large scale changes
// note the probability of a tree seeing a change is
// 256/10_000
for (int i = 0; i < 5; i++) {
newForest.update(point);
}
DiVector newResult = newForest.getAnomalyAttribution(point);
double newScore = newForest.getAnomalyScore(point);
assertEquals(newScore, newResult.getHighLowSum(), 1E-5);
assertTrue(newScore < score);
for (int j = 0; j < 3; j++) {
// relationship holds at larger values
if (result.high[j] > 0.2) {
assertEquals(score * newResult.high[j], newScore * result.high[j], 0.1 * score);
} else {
assertTrue(newResult.high[j] < 0.2);
}
if (result.low[j] > 0.2) {
assertEquals(score * newResult.low[j], newScore * result.low[j], 0.1 * score);
} else {
assertTrue(newResult.low[j] < 0.2);
}
}
// this will make the point an inlier
for (int i = 0; i < 5000; i++) {
newForest.update(point);
}
DiVector finalResult = newForest.getAnomalyAttribution(point);
double finalScore = newForest.getAnomalyScore(point);
assertTrue(finalScore < 1);
assertEquals(finalScore, finalResult.getHighLowSum(), 1E-5);
for (int j = 0; j < 3; j++) {
// relationship holds at larger values
if (finalResult.high[j] > 0.2) {
assertEquals(score * finalResult.high[j], finalScore * result.high[j], 0.1 * score);
} else {
assertTrue(newResult.high[j] < 0.2);
}
if (finalResult.low[j] > 0.2) {
assertEquals(score * finalResult.low[j], finalScore * result.low[j], 0.1 * score);
} else {
assertTrue(finalResult.low[j] < 0.2);
}
}
}
@ParameterizedTest
@ArgumentsSource(TestForestProvider.class)
public void testSimpleDensity(RandomCutForest forest) {
DensityOutput output1 = forest.getSimpleDensity(new double[] { 0.0, 0.0, 0.0 });
DensityOutput output2 = forest.getSimpleDensity(new double[] { 6.0, 6.0, 0.0 });
DensityOutput output3 = forest.getSimpleDensity(new double[] { -4.0, -4.0, 0.0 });
DensityOutput output4 = forest.getSimpleDensity(new double[] { -6.0, -6.0, 0.0 });
assertTrue(output1.getDensity(0.001, 3) > output2.getDensity(0.001, 3));
assertTrue(output1.getDensity(0.001, 3) > output3.getDensity(0.001, 3));
assertTrue(output1.getDensity(0.001, 3) > output4.getDensity(0.001, 3));
assertTrue(output3.getDensity(0.001, 3) > output4.getDensity(0.001, 3));
}
@ParameterizedTest
@ArgumentsSource(TestForestProvider.class)
public void testSimpleDensityWhenSamplerNotFullThenDensityIsZero(RandomCutForest forest) {
RandomCutForest forestSpy = spy(forest);
when(forestSpy.samplersFull()).thenReturn(false);
DensityOutput output = forestSpy.getSimpleDensity(new double[] { 0.0, 0.0, 0.0 });
assertEquals(0, output.getDensity(0.001, 3));
}
@ParameterizedTest
@ArgumentsSource(TestForestProvider.class)
public void testImputeMissingValues(RandomCutForest forest) {
double[] queryPoint = new double[] { Double.NaN, 0.02, 0.01 };
int numberOfMissingValues = 1;
int[] missingIndexes = new int[] { 0 };
double[] imputedPoint = forest.imputeMissingValues(queryPoint, numberOfMissingValues, missingIndexes);
assertEquals(queryPoint[1], imputedPoint[1], 1e-5);
assertTrue(Math.abs(imputedPoint[0]) < 0.5);
}
@Test
public void getTotalUpdates_returnExpectedSize() {
assertEquals(dataSize, singleThreadedForest.getTotalUpdates());
assertEquals(dataSize, parallelExecutionForest.getTotalUpdates());
}
@ParameterizedTest(name = "{index} => numDims={0}, numTrees={1}, numSamples={2}, numTrainSamples={3}, "
+ "numTestSamples={4}, enableParallel={5}, numThreads={6}")
@CsvSource({ "10, 50, 256, 50000, 0, 0, 0" })
public void dynamicCachingChangeTest(int numDims, int numTrees, int numSamples, int numTrainSamples,
int numTestSamples, int enableParallel, int numThreads) {
RandomCutForest.Builder<?> forestBuilder = RandomCutForest.builder().dimensions(numDims).numberOfTrees(numTrees)
.sampleSize(numSamples).randomSeed(0).boundingBoxCacheFraction(1.0).compact(false);
if (enableParallel == 0) {
forestBuilder.parallelExecutionEnabled(false);
}
if (numThreads > 0) {
forestBuilder.threadPoolSize(numThreads);
}
RandomCutForest forest = forestBuilder.build();
RandomCutForest anotherForest = RandomCutForest.builder().dimensions(numDims).numberOfTrees(numTrees)
.sampleSize(numSamples).randomSeed(0).compact(true).boundingBoxCacheFraction(1.0).build();
int count = 0;
for (double[] point : generate(numTrainSamples, numDims, 0)) {
++count;
double score = forest.getAnomalyScore(point);
double anotherScore = anotherForest.getAnomalyScore(point);
assertEquals(score, anotherScore, 1E-10);
forest.update(point);
anotherForest.update(point);
if (count % 2000 == 1000) {
double fraction = Math.random();
// System.out.println(" second forest fraction " + fraction);
anotherForest.setBoundingBoxCacheFraction(fraction);
}
if (count % 2000 == 0) {
double fraction = Math.random();
// System.out.println(" first forest fraction " + fraction);
forest.setBoundingBoxCacheFraction(fraction);
}
}
}
@ParameterizedTest(name = "{index} => numDims={0}, numTrees={1}, numSamples={2}, numTrainSamples={3}, "
+ "numTestSamples={4}, enableParallel={5}, numThreads={6}")
@CsvSource({ "10, 10, 30000, 50000, 0, 0, 0" })
public void dynamicCachingChangeTestLarge(int numDims, int numTrees, int numSamples, int numTrainSamples,
int numTestSamples, int enableParallel, int numThreads) {
RandomCutForest.Builder<?> forestBuilder = RandomCutForest.builder().dimensions(numDims).numberOfTrees(numTrees)
.sampleSize(numSamples).randomSeed(0).boundingBoxCacheFraction(1.0).compact(false);
if (enableParallel == 0) {
forestBuilder.parallelExecutionEnabled(false);
}
if (numThreads > 0) {
forestBuilder.threadPoolSize(numThreads);
}
RandomCutForest forest = forestBuilder.build();
RandomCutForest anotherForest = RandomCutForest.builder().dimensions(numDims).numberOfTrees(numTrees)
.sampleSize(numSamples).randomSeed(0).compact(true).boundingBoxCacheFraction(1.0).build();
int count = 0;
for (double[] point : generate(numTrainSamples, numDims, 0)) {
++count;
double score = forest.getAnomalyScore(point);
double anotherScore = anotherForest.getAnomalyScore(point);
assertEquals(score, anotherScore, 1E-10);
forest.update(point);
anotherForest.update(point);
}
}
private double[][] generate(int numSamples, int numDimensions, int seed) {
return IntStream.range(0, numSamples).mapToObj(i -> new Random(seed + i).doubles(numDimensions).toArray())
.toArray(double[][]::new);
}
}
| 388 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/RandomCutForestShingledFunctionalTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static com.amazon.randomcutforest.CommonUtils.toDoubleArray;
import static com.amazon.randomcutforest.testutils.ShingledMultiDimDataWithKeys.generateShingledData;
import static java.lang.Math.PI;
import static java.lang.Math.cos;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.List;
import java.util.Random;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import com.amazon.randomcutforest.config.Precision;
import com.amazon.randomcutforest.state.RandomCutForestMapper;
import com.amazon.randomcutforest.state.RandomCutForestState;
import com.amazon.randomcutforest.store.PointStore;
import com.amazon.randomcutforest.summarization.ICluster;
import com.amazon.randomcutforest.summarization.Summarizer;
import com.amazon.randomcutforest.testutils.MultiDimDataWithKey;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
import com.amazon.randomcutforest.testutils.ShingledMultiDimDataWithKeys;
import com.amazon.randomcutforest.util.ShingleBuilder;
@Tag("functional")
public class RandomCutForestShingledFunctionalTest {
private static int numberOfTrees;
private static int sampleSize;
private static int dimensions;
private static int randomSeed;
private static int shingleSize;
private static ShingleBuilder shingleBuilder;
private static RandomCutForest forest;
private static double baseMu;
private static double baseSigma;
private static double anomalyMu;
private static double anomalySigma;
private static double transitionToAnomalyProbability;
private static double transitionToBaseProbability;
private static int dataSize;
@BeforeAll
public static void oneTimeSetUp() {
numberOfTrees = 100;
sampleSize = 256;
dimensions = 2;
randomSeed = 123;
shingleSize = 3;
shingleBuilder = new ShingleBuilder(dimensions, shingleSize);
forest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shingleBuilder.getShingledPointSize()).randomSeed(randomSeed).centerOfMassEnabled(true)
.initialAcceptFraction(0.5).storeSequenceIndexesEnabled(true).build();
dataSize = 10_000;
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 5.0;
anomalySigma = 1.5;
transitionToAnomalyProbability = 0.01;
transitionToBaseProbability = 0.4;
NormalMixtureTestData generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] data = generator.generateTestData(dataSize, dimensions);
for (int i = 0; i < dataSize; i++) {
shingleBuilder.addPoint(data[i]);
if (shingleBuilder.isFull()) {
forest.update(shingleBuilder.getShingle());
}
}
}
@Test
public void testExtrapolateBasic() {
double[] result = forest.extrapolateBasic(shingleBuilder.getShingle(), 4, dimensions, false);
assertEquals(4 * dimensions, result.length);
result = forest.extrapolateBasic(shingleBuilder.getShingle(), 4, dimensions, true, 2);
assertEquals(4 * dimensions, result.length);
result = forest.extrapolateBasic(shingleBuilder, 4);
assertEquals(4 * dimensions, result.length);
// use a block size which is too big
assertThrows(IllegalArgumentException.class,
() -> forest.extrapolateBasic(shingleBuilder.getShingle(), 4, 4, true, 2));
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void InternalShinglingTest(boolean rotation) {
int sampleSize = 256;
int baseDimensions = 2;
int shingleSize = 2;
int dimensions = baseDimensions * shingleSize;
long seed = new Random().nextLong();
System.out.println(seed);
Random rng = new Random(seed);
int numTrials = 3; // test is exact equality, reducing the number of trials
int length = 40 * sampleSize;
for (int i = 0; i < numTrials; i++) {
int outputAfter = 1 + rng.nextInt(10 * sampleSize);
long newSeed = rng.nextLong();
RandomCutForest first = new RandomCutForest.Builder<>().compact(true).dimensions(dimensions)
.precision(Precision.FLOAT_32).randomSeed(newSeed).internalShinglingEnabled(true)
.outputAfter(outputAfter + shingleSize - 1).internalRotationEnabled(rotation)
.shingleSize(shingleSize).build();
RandomCutForest second = new RandomCutForest.Builder<>().compact(true).dimensions(dimensions)
.precision(Precision.FLOAT_32).randomSeed(newSeed).internalShinglingEnabled(false)
.outputAfter(outputAfter).shingleSize(shingleSize).build();
RandomCutForest third = new RandomCutForest.Builder<>().compact(true).dimensions(dimensions)
.precision(Precision.FLOAT_32).randomSeed(newSeed).internalShinglingEnabled(false).shingleSize(1)
.outputAfter(outputAfter).build();
MultiDimDataWithKey dataWithKeys = ShingledMultiDimDataWithKeys.getMultiDimData(length, 50, 100, 5,
seed + i, baseDimensions);
double[][] shingledData = generateShingledData(dataWithKeys.data, shingleSize, baseDimensions, rotation);
assertEquals(shingledData.length, dataWithKeys.data.length - shingleSize + 1);
int count = shingleSize - 1;
// insert initial points
for (int j = 0; j < shingleSize - 1; j++) {
first.update(dataWithKeys.data[j]);
}
for (int j = 0; j < shingledData.length; j++) {
// validate equality of points
for (int y = 0; y < baseDimensions; y++) {
int position = (rotation) ? (count % shingleSize) : shingleSize - 1;
assertEquals(dataWithKeys.data[count][y], shingledData[j][position * baseDimensions + y], 1e-10);
}
double firstResult = first.getAnomalyScore(dataWithKeys.data[count]);
first.update(dataWithKeys.data[count]);
++count;
double secondResult = second.getAnomalyScore(shingledData[j]);
second.update(shingledData[j]);
double thirdResult = third.getAnomalyScore(shingledData[j]);
third.update(shingledData[j]);
assertEquals(firstResult, secondResult, 1e-10);
assertEquals(secondResult, thirdResult, 1e-10);
}
PointStore store = (PointStore) first.getUpdateCoordinator().getStore();
assertEquals(store.getCurrentStoreCapacity() * dimensions, store.getStore().length);
List<ICluster<float[]>> firstSummary = store.summarize(5, 0.5, 3, 0.8, Summarizer::L2distance, null);
store = (PointStore) second.getUpdateCoordinator().getStore();
assertEquals(store.getCurrentStoreCapacity() * dimensions, store.getStore().length);
List<ICluster<float[]>> secondSummary = store.summarize(5, 0.5, 3, 0.8, Summarizer::L2distance, null);
assert (secondSummary.size() == firstSummary.size());
for (int j = 0; j < firstSummary.size(); j++) {
assertEquals(firstSummary.get(j).getWeight(), secondSummary.get(j).getWeight(), 1e-3);
assertEquals(firstSummary.get(j).averageRadius(), secondSummary.get(j).averageRadius(), 1e-3);
}
store = (PointStore) third.getUpdateCoordinator().getStore();
assertEquals(store.getCurrentStoreCapacity() * dimensions, store.getStore().length);
List<ICluster<float[]>> thirdSummary = store.summarize(5, 0.5, 3, 0.8, Summarizer::L2distance, null);
assert (thirdSummary.size() == firstSummary.size());
for (int j = 0; j < firstSummary.size(); j++) {
assertEquals(firstSummary.get(j).getWeight(), thirdSummary.get(j).getWeight(), 1e-3);
assertEquals(firstSummary.get(j).averageRadius(), thirdSummary.get(j).averageRadius(), 1e-3);
}
}
}
@Test
public void testExtrapolateShingleAwareSinglePrecision() {
int numberOfTrees = 100;
int sampleSize = 256;
int shinglesize = 10;
long randomSeed = 123;
RandomCutForest newforest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shinglesize).randomSeed(randomSeed).compact(true).shingleSize(shinglesize)
.precision(Precision.FLOAT_32).build();
RandomCutForest anotherforest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shinglesize).randomSeed(randomSeed).compact(true).shingleSize(1)
.precision(Precision.FLOAT_32).build();
RandomCutForest yetAnotherforest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shinglesize).randomSeed(randomSeed).compact(true).shingleSize(shinglesize)
.internalShinglingEnabled(true).precision(Precision.FLOAT_32).build();
double amplitude = 50.0;
double noise = 2.0;
int entryIndex = 0;
boolean filledShingleAtleastOnce = false;
double[] history = new double[shinglesize];
int num = 850;
double[] data = getDataA(amplitude, noise);
double[] answer = null;
double error = 0;
double[] record = null;
for (int j = 0; j < num; ++j) { // we stream here ....
history[entryIndex] = data[j];
entryIndex = (entryIndex + 1) % shinglesize;
if (entryIndex == 0) {
filledShingleAtleastOnce = true;
}
// input is always double[], internal representation is float[]
// input is 1 dimensional for internal shingling (for 1 dimensional sequences)
yetAnotherforest.update(new double[] { data[j] });
if (filledShingleAtleastOnce) {
record = getShinglePoint(history, entryIndex, shinglesize);
newforest.update(record);
anotherforest.update(record);
}
}
answer = newforest.extrapolateBasic(record, 200, 1, false);
double[] anotherAnswer = anotherforest.extrapolateBasic(record, 200, 1, false);
double[] yetAnotherAnswer = yetAnotherforest.extrapolate(200);
assertArrayEquals(anotherAnswer, answer, 1e-10);
assertArrayEquals(yetAnotherAnswer, answer, 1e-10);
error = 0;
for (int j = 0; j < 200; j++) {
double prediction = amplitude * cos((j + 850 - 50) * 2 * PI / 120);
error += Math.abs(prediction - answer[j]);
}
error = error / 200;
assertTrue(error < 4 * noise);
}
@Test
public void testExtrapolateInternalRotationSinglePrecision() {
int numberOfTrees = 100;
int sampleSize = 256;
int shinglesize = 120;
long randomSeed = 123;
RandomCutForest newforestA = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shinglesize).randomSeed(randomSeed).compact(true).precision(Precision.FLOAT_32).build();
RandomCutForest newforestB = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shinglesize).randomSeed(randomSeed).internalShinglingEnabled(true)
.internalRotationEnabled(true).compact(true).shingleSize(shinglesize).precision(Precision.FLOAT_32)
.build();
RandomCutForest newforestC = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shinglesize).randomSeed(randomSeed).compact(true).shingleSize(shinglesize)
.precision(Precision.FLOAT_32).build();
double amplitude = 50.0;
double noise = 2.0;
Random noiseprg = new Random(72);
int entryIndex = 0;
boolean filledShingleAtleastOnce = false;
double[] history = new double[shinglesize];
int num = 850;
double[] data = getDataA(amplitude, noise);
double[] answer = null;
double error = 0;
double[] record = null;
for (int j = 0; j < num; ++j) { // we stream here ....
history[entryIndex] = data[j];
entryIndex = (entryIndex + 1) % shinglesize;
if (entryIndex == 0) {
filledShingleAtleastOnce = true;
}
newforestB.update(new double[] { data[j] });
if (filledShingleAtleastOnce) {
// produce cyclic vectors
record = getShinglePoint(history, 0, shinglesize);
newforestA.update(record);
newforestC.update(record);
}
}
answer = newforestA.extrapolateBasic(record, 200, 1, true, entryIndex);
double[] anotherAnswer = newforestB.extrapolate(200);
double[] yetAnotherAnswer = newforestC.extrapolateBasic(record, 200, 1, true, entryIndex);
assertArrayEquals(answer, yetAnotherAnswer, 1e-10);
double[] othershingle = toDoubleArray(newforestB.lastShingledPoint());
assertEquals(entryIndex, newforestB.nextSequenceIndex() % shinglesize);
assertArrayEquals(record, othershingle, 1e-5);
assertArrayEquals(answer, anotherAnswer, 1e-5);
error = 0;
for (int j = 0; j < 200; j++) {
double prediction = amplitude * cos((j + 850 - 50) * 2 * PI / 120);
error += Math.abs(prediction - answer[j]);
}
error = error / 200;
assertTrue(error < 4 * noise);
}
@Test
public void testExtrapolateC() {
int numberOfTrees = 100;
int sampleSize = 256;
int shinglesize = 20;
long randomSeed = 124;
// build two identical copies; we will be giving them different
// subsequent inputs and test adaptation to stream evolution
RandomCutForest newforestC = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shinglesize).randomSeed(randomSeed).compact(true).timeDecay(1.0 / 300).build();
RandomCutForest newforestD = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(shinglesize).randomSeed(randomSeed).compact(true).timeDecay(1.0 / 300).build();
double amplitude = 50.0;
double noise = 2.0;
Random noiseprg = new Random(72);
int entryIndex = 0;
boolean filledShingleAtleastOnce = false;
double[] history = new double[shinglesize];
int num = 1330;
double[] data = getDataB(amplitude, noise);
double[] answer = null;
double error = 0;
double[] record = null;
for (int j = 0; j < num; ++j) { // we stream here ....
history[entryIndex] = data[j];
entryIndex = (entryIndex + 1) % shinglesize;
if (entryIndex == 0) {
filledShingleAtleastOnce = true;
}
if (filledShingleAtleastOnce) {
record = getShinglePoint(history, entryIndex, shinglesize);
newforestC.update(record);
newforestD.update(record);
}
}
/**
* the two forests are identical up to this point we will now provide two
* different input to each num+2*expLife=1930, but since the shape of the
* pattern remains the same in a phase shift, the prediction comes back to
* "normal" fairly quickly.
*/
for (int j = num; j < 1630; ++j) { // we stream here ....
double t = cos(2 * PI * (j - 50) / 240);
history[entryIndex] = amplitude * t + noise * noiseprg.nextDouble();
;
entryIndex = (entryIndex + 1) % shinglesize;
if (entryIndex == 0) {
filledShingleAtleastOnce = true;
}
if (filledShingleAtleastOnce) {
record = getShinglePoint(history, entryIndex, shinglesize);
newforestC.update(record);
}
}
answer = newforestC.extrapolateBasic(record, 200, 1, false);
error = 0;
for (int j = 0; j < 200; j++) {
double t = cos(2 * PI * (1630 + j - 50) / 240);
double prediction = amplitude * t;
error += Math.abs(prediction - answer[j]);
}
error = error / 200;
assertTrue(error < 2 * noise);
/**
* Here num+2*expLife=1930 for a small explife such as 300, num+expLife is
* already sufficient increase the factor for larger expLife or increase the
* sampleSize to absorb the longer range dependencies of a larger expLife
*/
for (int j = num; j < 1630; ++j) { // we stream here ....
double t = cos(2 * PI * (j + 50) / 120);
int sign = (t > 0) ? 1 : -1;
history[entryIndex] = amplitude * sign * Math.pow(t * sign, 1.0 / 3) + noise * noiseprg.nextDouble();
entryIndex = (entryIndex + 1) % shinglesize;
if (entryIndex == 0) {
filledShingleAtleastOnce = true;
}
if (filledShingleAtleastOnce) {
record = getShinglePoint(history, entryIndex, shinglesize);
newforestD.update(record);
}
}
answer = newforestD.extrapolateBasic(record, 200, 1, false);
error = 0;
for (int j = 0; j < 200; j++) {
double t = cos(2 * PI * (1630 + j + 50) / 120);
int sign = (t > 0) ? 1 : -1;
double prediction = amplitude * sign * Math.pow(t * sign, 1.0 / 3);
error += Math.abs(prediction - answer[j]);
}
error = error / 200;
assertTrue(error < 2 * noise);
}
double[] getDataA(double amplitude, double noise) {
int num = 850;
double[] data = new double[num];
Random noiseprg = new Random(9000);
for (int i = 0; i < 510; i++) {
data[i] = amplitude * cos(2 * PI * (i - 50) / 120) + noise * noiseprg.nextDouble();
}
for (int i = 510; i < 525; i++) { // flatline
data[i] = 0;
}
for (int i = 525; i < 825; i++) {
data[i] = amplitude * cos(2 * PI * (i - 50) / 120) + noise * noiseprg.nextDouble();
}
for (int i = 825; i < num; i++) { // high frequency noise
data[i] = amplitude * cos(2 * PI * (i - 50) / 12) + noise * noiseprg.nextDouble();
}
return data;
}
double[] getDataB(double amplitude, double noise) {
int num = 1330;
double[] data = new double[num];
Random noiseprg = new Random(9001);
for (int i = 0; i < 990; i++) {
data[i] = amplitude * cos(2 * PI * (i + 50) / 240) + noise * noiseprg.nextDouble();
}
for (int i = 990; i < 1005; i++) { // flatline
data[i] = 0;
}
for (int i = 1005; i < 1305; i++) {
data[i] = amplitude * cos(2 * PI * (i + 50) / 240) + noise * noiseprg.nextDouble();
}
for (int i = 1305; i < num; i++) { // high frequency noise
data[i] = amplitude * cos(2 * PI * (i + 50) / 12) + noise * noiseprg.nextDouble();
}
return data;
}
private static double[] getShinglePoint(double[] recentPointsSeen, int indexOfOldestPoint, int shingleLength) {
double[] shingledPoint = new double[shingleLength];
int i = 0;
for (int j = 0; j < shingleLength; ++j) {
double point = recentPointsSeen[(j + indexOfOldestPoint) % shingleLength];
shingledPoint[i++] = point;
}
return shingledPoint;
}
@Test
public void testUpdate() {
int dimensions = 10;
RandomCutForest forest = RandomCutForest.builder().numberOfTrees(100).compact(true).dimensions(dimensions)
.randomSeed(0).sampleSize(200).precision(Precision.FLOAT_32).build();
double[][] trainingData = genShingledData(1000, dimensions, 0);
double[][] testData = genShingledData(100, dimensions, 1);
for (int i = 0; i < testData.length; i++) {
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(true);
double score = forest.getAnomalyScore(testData[i]);
forest.update(testData[i]);
RandomCutForestState forestState = mapper.toState(forest);
forest = mapper.toModel(forestState);
}
}
private static double[][] genShingledData(int size, int dimensions, long seed) {
double[][] answer = new double[size][];
int entryIndex = 0;
boolean filledShingleAtleastOnce = false;
double[] history = new double[dimensions];
int count = 0;
double[] data = getDataD(size + dimensions - 1, 100, 5, seed);
for (int j = 0; j < size + dimensions - 1; ++j) { // we stream here ....
history[entryIndex] = data[j];
entryIndex = (entryIndex + 1) % dimensions;
if (entryIndex == 0) {
filledShingleAtleastOnce = true;
}
if (filledShingleAtleastOnce) {
// System.out.println("Adding " + j);
answer[count++] = getShinglePoint(history, entryIndex, dimensions);
}
}
return answer;
}
private static double[] getDataD(int num, double amplitude, double noise, long seed) {
double[] data = new double[num];
Random noiseprg = new Random(seed);
for (int i = 0; i < num; i++) {
data[i] = amplitude * cos(2 * PI * (i + 50) / 1000) + noise * noiseprg.nextDouble();
}
return data;
}
}
| 389 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/CPUTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import java.util.Arrays;
import java.util.concurrent.ForkJoinPool;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import com.amazon.randomcutforest.testutils.ShingledMultiDimDataWithKeys;
/**
* The following "test" is intended to provide an approximate estimate of the
* improvement from parallelization. At the outset, we remark that running the
* test from inside an IDE/environment may reflect more of the environment.
* Issues such as warming are not reflected in this test.
*
* Users who wish to obtain more calibrated estimates should use a benchmark --
* preferably using their own "typical" data and their end to end setup.
* Performance of RCF is data dependent. Such users may be invoking RCF
* functions differently from a standard "impute, score, update" process
* recommended for streaming time series data.
*
* Moreover, in the context of a large number of models, the rate at which the
* models require updates is also a factor and not controlled herein.
*
* The two tests should produce near identical sum of scores, and (root) mean
* squared error of the impute up to machine precision (since the order of the
* arithmetic operations would vary).
*
* To summarize the lessons, it appears that parallelism almost always helps
* (upto resource limitations). If an user is considering a single model -- say
* from a console or dashboard, they should consider having parallel threads
* enabled. For large number of models, it may be worthwhile to also investigate
* different ways of achieving parallelism and not just attempt to change the
* executor framework.
*
*/
@Tag("functional")
public class CPUTest {
int numberOfTrees = 30;
int DATA_SIZE = 10000;
int numberOfForests = 6;
int numberOfAttributes = 5;
int shingleSize = 30;
int sampleSize = 256;
// set numberOfThreads = 1 to turn off parallelism
int numberOfThreads = 3;
// change boundingBoxCacheFraction to see different memory consumption
// this would be germane for large number of models cache/memory contention
double boundingBoxCacheFraction = 1.0;
int dimensions = shingleSize * numberOfAttributes;
@Test
public void profileTestSync() {
double[] mse = new double[numberOfForests];
int[] mseCount = new int[numberOfForests];
double[] score = new double[numberOfForests];
double[][] data = ShingledMultiDimDataWithKeys.getMultiDimData(DATA_SIZE, 60, 100, 5, 0,
numberOfAttributes).data;
RandomCutForest[] forests = new RandomCutForest[numberOfForests];
for (int k = 0; k < numberOfForests; k++) {
forests[k] = RandomCutForest.builder().numberOfTrees(numberOfTrees).dimensions(dimensions)
.shingleSize(shingleSize).boundingBoxCacheFraction(boundingBoxCacheFraction).randomSeed(99 + k)
.outputAfter(10).parallelExecutionEnabled(true).threadPoolSize(numberOfThreads)
.internalShinglingEnabled(true).initialAcceptFraction(0.1).sampleSize(sampleSize).build();
}
for (int j = 0; j < data.length; j++) {
for (int k = 0; k < numberOfForests; k++) {
score[k] += forests[k].getAnomalyScore(data[j]);
if (j % 10 == 0 && j > 0) {
double[] result = forests[k].extrapolate(1);
double sum = 0;
for (int i = 0; i < result.length; i++) {
double t = result[i] - data[j][i];
sum += t * t;
}
sum = Math.sqrt(sum);
mse[k] += sum;
mseCount[k]++;
}
forests[k].update(data[j]);
}
}
for (int k = 0; k < numberOfForests; k++) {
System.out.println(" Forest " + k);
System.out.println(" MSE " + mse[k] / mseCount[k]);
System.out.println(" scoresum " + score[k] / data.length);
}
}
@Test
public void profileTestASync() {
double[] mse = new double[numberOfForests];
int[] mseCount = new int[numberOfForests];
double[] score = new double[numberOfForests];
double[][] data = ShingledMultiDimDataWithKeys.getMultiDimData(DATA_SIZE, 60, 100, 5, 0,
numberOfAttributes).data;
RandomCutForest[] forests = new RandomCutForest[numberOfForests];
for (int k = 0; k < numberOfForests; k++) {
forests[k] = RandomCutForest.builder().numberOfTrees(numberOfTrees).dimensions(dimensions)
.shingleSize(shingleSize).boundingBoxCacheFraction(boundingBoxCacheFraction).randomSeed(99 + k)
.outputAfter(10).parallelExecutionEnabled(false).internalShinglingEnabled(true)
.initialAcceptFraction(0.1).sampleSize(sampleSize).build();
}
ForkJoinPool forkJoinPool = new ForkJoinPool(numberOfThreads);
int[] indices = new int[numberOfForests];
for (int k = 0; k < numberOfForests; k++) {
indices[k] = k;
}
for (int j = 0; j < data.length; j++) {
int finalJ = j;
forkJoinPool.submit(() -> Arrays.stream(indices).parallel().forEach(k -> {
score[k] += forests[k].getAnomalyScore(data[finalJ]);
if (finalJ % 10 == 0 && finalJ > 0) {
double[] result = forests[k].extrapolate(1);
double sum = 0;
for (int i = 0; i < result.length; i++) {
double t = result[i] - data[finalJ][i];
sum += t * t;
}
sum = Math.sqrt(sum);
mse[k] += sum;
mseCount[k]++;
}
forests[k].update(data[finalJ]);
})).join();
}
for (int k = 0; k < numberOfForests; k++) {
System.out.println(" Forest " + k);
System.out.println(" MSE " + mse[k] / mseCount[k]);
System.out.println(" scoresum " + score[k] / data.length);
}
}
} | 390 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/RandomCutForestConsistencyFunctionalTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Random;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import com.amazon.randomcutforest.config.Precision;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
/**
* This class validates that forests configured with different execution modes
* (sequential or parallel) or different internal data representations are
* executing the algorithm steps in the same way.
*/
@Tag("functional")
public class RandomCutForestConsistencyFunctionalTest {
private int dimensions = 5;
private int sampleSize = 128;
private long randomSeed = 123L;
private int testSize = 2048;
@Test
public void testConsistentScoring() {
RandomCutForest.Builder<?> builder = RandomCutForest.builder().dimensions(dimensions).sampleSize(sampleSize)
.randomSeed(randomSeed);
RandomCutForest pointerCachedSequential = builder.compact(false).boundingBoxCacheFraction(1.0)
.parallelExecutionEnabled(false).build();
RandomCutForest pointerCachedParallel = builder.compact(false).boundingBoxCacheFraction(1.0)
.parallelExecutionEnabled(true).build();
RandomCutForest pointerCachedRandomSequential = builder.compact(false)
.boundingBoxCacheFraction(new Random().nextDouble()).parallelExecutionEnabled(false).build();
RandomCutForest pointerCachedRandomParallel = builder.compact(false)
.boundingBoxCacheFraction(new Random().nextDouble()).parallelExecutionEnabled(true).build();
RandomCutForest pointerUncachedSequential = builder.compact(false).boundingBoxCacheFraction(0.0)
.parallelExecutionEnabled(false).build();
RandomCutForest pointerUncachedParallel = builder.compact(false).boundingBoxCacheFraction(0.0)
.parallelExecutionEnabled(true).build();
RandomCutForest compactCachedSequential = builder.compact(true).boundingBoxCacheFraction(1.0)
.parallelExecutionEnabled(false).build();
RandomCutForest compactCachedParallel = builder.compact(true).boundingBoxCacheFraction(1.0)
.parallelExecutionEnabled(true).build();
RandomCutForest compactUncachedSequential = builder.compact(true).boundingBoxCacheFraction(0.0)
.parallelExecutionEnabled(false).build();
RandomCutForest compactUncachedParallel = builder.compact(true).boundingBoxCacheFraction(0.0)
.parallelExecutionEnabled(true).build();
RandomCutForest compactCachedRandomSequential = builder.compact(true)
.boundingBoxCacheFraction(new Random().nextDouble()).parallelExecutionEnabled(false).build();
RandomCutForest compactCachedRandomParallel = builder.compact(true)
.boundingBoxCacheFraction(new Random().nextDouble()).parallelExecutionEnabled(true).build();
NormalMixtureTestData testData = new NormalMixtureTestData();
double delta = 1e-10;
int anomalies = 0;
for (double[] point : testData.generateTestData(testSize, dimensions, 99)) {
double score = pointerCachedSequential.getAnomalyScore(point);
if (score > 0) {
anomalies++;
}
assertEquals(score, pointerCachedParallel.getAnomalyScore(point), delta);
assertEquals(score, pointerUncachedSequential.getAnomalyScore(point), delta);
assertEquals(score, pointerUncachedParallel.getAnomalyScore(point), delta);
assertEquals(score, compactCachedSequential.getAnomalyScore(point), delta);
assertEquals(score, compactCachedParallel.getAnomalyScore(point), delta);
assertEquals(score, compactUncachedSequential.getAnomalyScore(point), delta);
assertEquals(score, compactUncachedParallel.getAnomalyScore(point), delta);
assertEquals(score, pointerCachedRandomSequential.getAnomalyScore(point), delta);
assertEquals(score, pointerCachedRandomParallel.getAnomalyScore(point), delta);
assertEquals(score, compactCachedRandomSequential.getAnomalyScore(point), delta);
assertEquals(score, compactCachedRandomParallel.getAnomalyScore(point), delta);
pointerCachedSequential.update(point);
pointerCachedParallel.update(point);
pointerUncachedSequential.update(point);
pointerUncachedParallel.update(point);
pointerCachedRandomSequential.update(point);
pointerCachedRandomParallel.update(point);
compactCachedSequential.update(point);
compactCachedParallel.update(point);
compactUncachedSequential.update(point);
compactUncachedParallel.update(point);
compactCachedRandomSequential.update(point);
compactCachedRandomParallel.update(point);
}
// verify that the test is nontrivial
assertTrue(anomalies > 0);
}
@Test
public void testConsistentScoringSinglePrecision() {
RandomCutForest.Builder<?> builder = RandomCutForest.builder().dimensions(dimensions).sampleSize(sampleSize)
.randomSeed(randomSeed).parallelExecutionEnabled(false).compact(true);
RandomCutForest compactFloatCached = builder.boundingBoxCacheFraction(1.0).precision(Precision.FLOAT_32)
.build();
RandomCutForest compactFloatCachedParallel = builder.boundingBoxCacheFraction(1.0).precision(Precision.FLOAT_32)
.parallelExecutionEnabled(true).build();
RandomCutForest compactFloatUncached = builder.boundingBoxCacheFraction(0.0).precision(Precision.FLOAT_32)
.build();
RandomCutForest compactFloatCachedRandom = builder.boundingBoxCacheFraction(new Random().nextDouble())
.precision(Precision.FLOAT_32).build();
RandomCutForest compactFloatCachedRandomParallel = builder.boundingBoxCacheFraction(new Random().nextDouble())
.precision(Precision.FLOAT_32).parallelExecutionEnabled(true).build();
RandomCutForest compactFloatUncachedParallel = builder.boundingBoxCacheFraction(0.0)
.precision(Precision.FLOAT_32).parallelExecutionEnabled(true).build();
RandomCutForest compactDoubleCached = builder.boundingBoxCacheFraction(1.0).precision(Precision.FLOAT_64)
.build();
NormalMixtureTestData testData = new NormalMixtureTestData();
int anomalies = 0;
for (double[] point : testData.generateTestData(testSize, dimensions, 99)) {
double score = compactFloatCached.getAnomalyScore(point);
if (score > 0) {
anomalies++;
}
assertEquals(score, compactFloatUncached.getAnomalyScore(point), 1e-10);
assertEquals(score, compactFloatUncachedParallel.getAnomalyScore(point), 1e-10);
assertEquals(score, compactFloatCachedRandom.getAnomalyScore(point), 1e-10);
assertEquals(score, compactFloatCachedRandomParallel.getAnomalyScore(point), 1e-10);
// we expect some loss of precision when comparing to the score computed as a
// double
assertEquals(score, compactDoubleCached.getAnomalyScore(point), 1e-2);
compactFloatCached.update(point);
compactFloatCachedParallel.update(point);
compactFloatUncached.update(point);
compactFloatUncachedParallel.update(point);
compactFloatCachedRandom.update(point);
compactFloatCachedRandomParallel.update(point);
compactDoubleCached.update(point);
}
// verify that the test is nontrivial
assertTrue(anomalies > 0);
}
}
| 391 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/SampleSummaryTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static com.amazon.randomcutforest.CommonUtils.toFloatArray;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Random;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Stream;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import com.amazon.randomcutforest.returntypes.SampleSummary;
import com.amazon.randomcutforest.summarization.Center;
import com.amazon.randomcutforest.summarization.ICluster;
import com.amazon.randomcutforest.summarization.MultiCenter;
import com.amazon.randomcutforest.summarization.Summarizer;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
import com.amazon.randomcutforest.util.Weighted;
@Tag("functional")
public class SampleSummaryTest {
private static double baseMu;
private static double baseSigma;
private static double anomalyMu;
private static double anomalySigma;
private static double transitionToAnomalyProbability;
private static double transitionToBaseProbability;
private static int dataSize;
@Test
public void configAndAbsorbTest() {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 2000;
Summarizer summarizer = new Summarizer();
float[][] points = getData(dataSize, newDimensions, random.nextInt(), Summarizer::L2distance);
ArrayList<Weighted<float[]>> weighted = new ArrayList<>();
ArrayList<Weighted<Integer>> refs = new ArrayList<>();
int count = 0;
for (float[] point : points) {
// testing 0 weight
weighted.add(new Weighted<>(point, 0.0f));
refs.add(new Weighted<Integer>(count, 0.0f));
++count;
}
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 500, 10 * newDimensions,
false, Summarizer::L2distance, random.nextInt(), false));
BiFunction<float[], Float, ICluster<float[]>> clusterInitializer = (a, b) -> MultiCenter.initialize(a, b, 0.8,
3);
Function<Integer, float[]> getPoint = (i) -> weighted.get(i).index;
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 500, 10 * newDimensions, 1,
false, 0.1, Summarizer::L2distance, clusterInitializer, 0, false, null));
assertThrows(IllegalArgumentException.class,
() -> Summarizer.summarize(weighted, 50, 10, false, Summarizer::L2distance, random.nextInt(), false));
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 50, 10, 1, false, 0.1,
Summarizer::L2distance, clusterInitializer, 0, false, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 50, 10 * newDimensions, 0,
false, 0.1, Summarizer::L2distance, clusterInitializer, 0, false, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 50, 10 * newDimensions, 100,
false, 0.1, Summarizer::L2distance, clusterInitializer, 0, false, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.iterativeClustering(5, 10 * newDimensions, 0,
refs, getPoint, Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.iterativeClustering(5, 10 * newDimensions, 7,
refs, getPoint, Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
assertThrows(IllegalArgumentException.class,
() -> Summarizer.iterativeClustering(5, 10 * newDimensions, 1, Collections.emptyList(), getPoint,
Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.iterativeClustering(5, 10 * newDimensions, 1,
refs, getPoint, Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 5, 10 * newDimensions, false,
Summarizer::L2distance, random.nextInt(), false));
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 5, 10 * newDimensions, 1,
false, 0.1, Summarizer::L2distance, clusterInitializer, 0, false, null));
Weighted<float[]> a = weighted.get(0);
a.weight = -1;
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 5, 10 * newDimensions, 1,
false, 0.1, Summarizer::L2distance, clusterInitializer, 0, false, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 5, 10 * newDimensions, false,
Summarizer::L2distance, random.nextInt(), false));
a.weight = Float.NaN;
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 5, 10 * newDimensions, 1,
false, 0.1, Summarizer::L2distance, clusterInitializer, 0, false, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 5, 10 * newDimensions, false,
Summarizer::L2distance, random.nextInt(), false));
a.weight = Float.POSITIVE_INFINITY;
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 5, 10 * newDimensions, 1,
false, 0.1, Summarizer::L2distance, clusterInitializer, 0, false, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.summarize(weighted, 5, 10 * newDimensions, false,
Summarizer::L2distance, random.nextInt(), false));
a.weight = 1;
assertDoesNotThrow(() -> Summarizer.summarize(weighted, 5, 10 * newDimensions, false, Summarizer::L2distance,
random.nextInt(), false));
assertDoesNotThrow(() -> Summarizer.summarize(weighted, 5, 10 * newDimensions, 1, false, 0.1,
Summarizer::L2distance, clusterInitializer, 0, false, null));
refs.get(0).weight = -1;
assertThrows(IllegalArgumentException.class, () -> Summarizer.iterativeClustering(5, 10 * newDimensions, 1,
refs, getPoint, Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
refs.get(0).weight = Float.POSITIVE_INFINITY;
assertThrows(IllegalArgumentException.class, () -> Summarizer.iterativeClustering(5, 10 * newDimensions, 1,
refs, getPoint, Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
refs.get(0).weight = Float.NaN;
assertThrows(IllegalArgumentException.class, () -> Summarizer.iterativeClustering(5, 10 * newDimensions, 1,
refs, getPoint, Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
refs.get(0).weight = 0;
assertThrows(IllegalArgumentException.class, () -> Summarizer.iterativeClustering(5, 10 * newDimensions, 1,
refs, getPoint, Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
refs.get(0).weight = 1;
assertDoesNotThrow(() -> Summarizer.iterativeClustering(5, 10 * newDimensions, 1, refs, getPoint,
Summarizer::L2distance, clusterInitializer, 0, false, false, 0.1, null));
assertThrows(IllegalArgumentException.class, () -> Summarizer.assignAndRecompute(refs, getPoint,
Collections.emptyList(), Summarizer::L2distance, false));
List<ICluster<float[]>> list = new ArrayList<>();
list.add(clusterInitializer.apply(new float[newDimensions], 1f));
assertThrows(IllegalArgumentException.class, () -> Summarizer.assignAndRecompute(Collections.emptyList(),
getPoint, list, Summarizer::L2distance, false));
assertDoesNotThrow(() -> Summarizer.assignAndRecompute(refs, getPoint, list, Summarizer::L2distance, false));
assertArrayEquals(list.get(0).primaryRepresentative(Summarizer::L2distance), new float[newDimensions], 1e-6f);
float[] newPoint = new float[newDimensions];
Arrays.fill(newPoint, 1.01f);
list.get(0).absorb(clusterInitializer.apply(newPoint, 1f), Summarizer::L2distance);
BiFunction<float[], float[], Double> badDistance = mock();
when(badDistance.apply(any(), any())).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class,
() -> Summarizer.assignAndRecompute(refs, getPoint, list, badDistance, false));
}
@Test
public void TestMultiCenter() {
BiFunction<float[], Float, ICluster<float[]>> clusterInitializer = (a, b) -> MultiCenter.initialize(a, b, 0.8,
3);
Function<Integer, float[]> getPoint = (i) -> {
return new float[1];
};
ICluster<float[]> newCluster = clusterInitializer.apply(new float[1], 1f);
float[] newPoint = new float[] { 1 };
BiFunction<float[], float[], Double> badDistance = mock();
when(badDistance.apply(any(), any())).thenReturn(-1.0);
ICluster<float[]> cluster = clusterInitializer.apply(new float[1], 1.0f);
ICluster<float[]> another = clusterInitializer.apply(new float[1], 1.0f);
assertThrows(IllegalArgumentException.class, () -> cluster.absorb(another, badDistance));
when(badDistance.apply(any(), any())).thenReturn(-1.0).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> cluster.distance(new float[1], badDistance));
assertThrows(IllegalArgumentException.class, () -> cluster.absorb(another, badDistance));
newCluster.absorb(clusterInitializer.apply(newPoint, 1f), Summarizer::L2distance);
when(badDistance.apply(any(), any())).thenReturn(1.0).thenReturn(1.0).thenReturn(1.0).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> newCluster.absorb(another, badDistance));
ICluster<float[]> newCluster2 = clusterInitializer.apply(new float[1], 1f);
newCluster2.absorb(clusterInitializer.apply(newPoint, 1f), Summarizer::L2distance);
when(badDistance.apply(any(), any())).thenReturn(1.0).thenReturn(1.0).thenReturn(1.0).thenReturn(1.0)
.thenReturn(1.0);
newCluster2.absorb(clusterInitializer.apply(newPoint, 1f), badDistance);
when(badDistance.apply(any(), any())).thenReturn(1.0).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> newCluster2.distance(new float[1], badDistance));
another.absorb(clusterInitializer.apply(newPoint, 1f), Summarizer::L2distance);
when(badDistance.apply(any(), any())).thenReturn(-1.0).thenReturn(1.0).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> newCluster2.distance(another, badDistance));
// error at a different location
assertThrows(IllegalArgumentException.class, () -> newCluster2.distance(another, badDistance));
when(badDistance.apply(any(), any())).thenReturn(1.0).thenReturn(1.0).thenReturn(1.0).thenReturn(1.0)
.thenReturn(1.0).thenReturn(1.0).thenReturn(1.0).thenReturn(1.0).thenReturn(1.0).thenReturn(1.0)
.thenReturn(1.0).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> newCluster2.absorb(another, badDistance));
ICluster<float[]> newCluster3 = MultiCenter.initialize(new float[1], 0f, 0, 1);
assertEquals(newCluster3.recompute(getPoint, false, Summarizer::L2distance), 0);
assertEquals(newCluster3.recompute(getPoint, true, Summarizer::L2distance), 0);
newCluster3.getAssignedPoints().add(new Weighted<>(1, 1.0f));
assertEquals(newCluster3.recompute(getPoint, true, Summarizer::L2distance), 0);
ICluster<float[]> newCluster4 = MultiCenter.initialize(new float[1], 1f, 0, 1);
when(badDistance.apply(any(), any())).thenReturn(-1.0).thenReturn(-1.0);
newCluster4.getAssignedPoints().add(new Weighted<>(1, 1.0f));
assertThrows(IllegalArgumentException.class, () -> newCluster4.recompute(getPoint, true, badDistance));
assertThrows(IllegalArgumentException.class, () -> newCluster4.absorb(newCluster3, badDistance));
}
@Test
public void testCenter() {
int newDimensions = 1;
Function<Integer, float[]> getPoint = (i) -> {
return new float[1];
};
BiFunction<float[], float[], Double> badDistance = mock();
ICluster<float[]> newCluster5 = Center.initialize(new float[newDimensions], 0f);
assertEquals(newCluster5.extentMeasure(), newCluster5.averageRadius());
assertEquals(newCluster5.recompute(getPoint, true, Summarizer::L2distance), 0);
newCluster5.getAssignedPoints().add(new Weighted<>(1, 1.0f));
assertEquals(newCluster5.recompute(getPoint, true, Summarizer::L2distance), 0);
when(badDistance.apply(any(), any())).thenReturn(-1.0).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> newCluster5.distance(new float[1], badDistance));
ICluster<float[]> newCluster6 = Center.initialize(new float[newDimensions], 10f);
newCluster6.getAssignedPoints().add(new Weighted<>(1, 1.0f));
newCluster6.getAssignedPoints().add(new Weighted<>(1, 1.0f));
when(badDistance.apply(any(), any())).thenReturn(-1.0).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> newCluster6.absorb(newCluster5, badDistance));
assertThrows(IllegalArgumentException.class, () -> newCluster6.recompute(getPoint, true, badDistance));
ICluster<float[]> multiCenter1 = MultiCenter.initialize(new float[] { 1 }, 5.0f, 0.8, 2);
ICluster<float[]> multiCenter2 = MultiCenter.initialize(new float[] { 2 }, 5.0f, 0.8, 2);
multiCenter1.absorb(multiCenter2, Summarizer::L2distance); // weight 10
newCluster6.absorb(multiCenter1, Summarizer::L2distance);
assertEquals(newCluster6.primaryRepresentative(Summarizer::L2distance)[0], 0.5, 1e-6f);
ICluster<float[]> newCluster7 = Center.initialize(new float[newDimensions], -10f);
newCluster7.getAssignedPoints().add(new Weighted<>(1, 1.0f));
newCluster7.getAssignedPoints().add(new Weighted<>(1, 1.0f));
when(badDistance.apply(any(), any())).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> newCluster7.recompute(getPoint, true, badDistance));
ICluster<float[]> newCluster8 = Center.initialize(new float[newDimensions], 1.9f);
newCluster8.getAssignedPoints().add(new Weighted<>(1, 1.0f));
newCluster8.getAssignedPoints().add(new Weighted<>(1, 1.0f));
when(badDistance.apply(any(), any())).thenReturn(-1.0);
assertThrows(IllegalArgumentException.class, () -> newCluster8.recompute(getPoint, true, badDistance));
}
@Test
public void zeroTest() {
Random random = new Random(0);
dataSize = 2000;
float[][] points = new float[dataSize][];
for (int y = 0; y < dataSize; y++) {
points[y] = new float[] { (float) (random.nextInt(100) + 0.5 * random.nextDouble()) };
}
ArrayList<Weighted<float[]>> weighted = new ArrayList<>();
ArrayList<Weighted<Integer>> refs = new ArrayList<>();
Function<Integer, float[]> getPoint = (x) -> weighted.get(x).index;
int count = 0;
for (float[] point : points) {
// testing 0 weight
weighted.add(new Weighted<>(point, 1.0f));
refs.add(new Weighted<Integer>(count, 1.0f));
++count;
}
BiFunction<float[], Float, ICluster<float[]>> clusterInitializer = (a, b) -> Center.initialize(a, b);
List<ICluster<float[]>> list = new ArrayList<>();
for (int y = 0; y < 200; y++) {
list.add(clusterInitializer.apply(new float[] { -1.0f }, 1.0f));
}
assertDoesNotThrow(() -> Summarizer.iterativeClustering(100, 0, 1, refs, getPoint, Summarizer::L2distance,
clusterInitializer, 0, false, true, 0.1, list));
}
@ParameterizedTest
@MethodSource("generateArguments")
public void SummaryTest(BiFunction<float[], float[], Double> distance) {
int over = 0;
int under = 0;
for (int numTrials = 0; numTrials < 20; numTrials++) {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 200000;
float[][] points = getData(dataSize, newDimensions, random.nextInt(), distance);
SampleSummary summary = Summarizer.summarize(points, 5 * newDimensions, 10 * newDimensions, false, distance,
random.nextInt(), false);
System.out.println("trial " + numTrials + " : " + summary.summaryPoints.length + " clusters for "
+ newDimensions + " dimensions, seed : " + seed);
if (summary.summaryPoints.length < 2 * newDimensions) {
++under;
} else if (summary.summaryPoints.length > 2 * newDimensions) {
++over;
}
}
assert (under <= 1);
assert (over <= 1);
}
@ParameterizedTest
@MethodSource("generateArguments")
public void ParallelTest(BiFunction<float[], float[], Double> distance) {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 200000;
float[][] points = getData(dataSize, newDimensions, random.nextInt(), distance);
System.out.println("checking seed : " + seed);
int nextSeed = random.nextInt();
SampleSummary summary1 = Summarizer.summarize(points, 5 * newDimensions, 10 * newDimensions, false, distance,
nextSeed, false);
SampleSummary summary2 = Summarizer.summarize(points, 5 * newDimensions, 10 * newDimensions, false, distance,
nextSeed, true);
ArrayList<Weighted<float[]>> pointList = new ArrayList<>();
for (float[] point : points) {
pointList.add(new Weighted<>(point, 1.0f));
}
List<ICluster<float[]>> clusters = Summarizer.singleCentroidSummarize(pointList, 5 * newDimensions,
10 * newDimensions, 1, true, distance, nextSeed, false, null);
assertEquals(summary2.weightOfSamples, summary1.weightOfSamples, " sampling inconsistent");
assertEquals(summary2.summaryPoints.length, summary1.summaryPoints.length,
" incorrect length of typical points");
assertEquals(clusters.size(), summary1.summaryPoints.length);
double total = clusters.stream().map(ICluster::getWeight).reduce(0.0, Double::sum);
assertEquals(total, summary1.weightOfSamples, 1e-3);
// parallelization can produce reordering of merges
}
@Test
public void SampleSummaryTestL2() {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 200000;
float[][] points = getData(dataSize, newDimensions, random.nextInt(), Summarizer::L2distance);
System.out.println("checking L2 seed : " + seed);
int nextSeed = random.nextInt();
ArrayList<Weighted<float[]>> pointList = new ArrayList<>();
for (float[] point : points) {
pointList.add(new Weighted<>(point, 1.0f));
}
SampleSummary summary1 = Summarizer.summarize(points, 5 * newDimensions, 20 * newDimensions, false,
Summarizer::L2distance, nextSeed, false);
SampleSummary summary2 = Summarizer.l2summarize(points, 5 * newDimensions, nextSeed);
SampleSummary summary3 = Summarizer.l2summarize(pointList, 5 * newDimensions, 20 * newDimensions, false,
nextSeed);
assertEquals(summary2.weightOfSamples, summary1.weightOfSamples, " sampling inconsistent");
assertEquals(summary3.weightOfSamples, summary1.weightOfSamples, " sampling inconsistent");
assertEquals(summary2.summaryPoints.length, summary1.summaryPoints.length,
" incorrect length of typical points");
assertEquals(summary3.summaryPoints.length, summary1.summaryPoints.length,
" incorrect length of typical points");
for (int i = 0; i < summary2.summaryPoints.length; i++) {
assertArrayEquals(summary1.summaryPoints[i], summary2.summaryPoints[i], 1e-6f);
assertArrayEquals(summary1.summaryPoints[i], summary3.summaryPoints[i], 1e-6f);
assertEquals(summary1.relativeWeight[i], summary2.relativeWeight[i], 1e-6f);
assertEquals(summary1.relativeWeight[i], summary3.relativeWeight[i], 1e-6f);
}
}
@Test
public void IdempotenceTestL2() {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 200000;
float[][] points = getData(dataSize, newDimensions, random.nextInt(), Summarizer::L2distance);
System.out.println("checking idempotence L2 seed : " + seed);
int nextSeed = random.nextInt();
ArrayList<Weighted<float[]>> pointList = new ArrayList<>();
for (float[] point : points) {
pointList.add(new Weighted<>(point, 1.0f));
}
List<ICluster<float[]>> clusters = Summarizer.singleCentroidSummarize(pointList, 5 * newDimensions,
20 * newDimensions, 1, true, Summarizer::L2distance, nextSeed, false, null);
List<ICluster<float[]>> clusters2 = Summarizer.singleCentroidSummarize(pointList, 5 * newDimensions,
20 * newDimensions, 1, true, Summarizer::L2distance, nextSeed, false, clusters);
assertEquals(clusters.size(), clusters2.size(), " incorrect sizes");
for (int i = 0; i < clusters.size(); i++) {
// note clusters can have same weight and get permuted
assertEquals(clusters.get(i).getWeight(), clusters2.get(i).getWeight());
}
clusters.sort(Comparator.comparingDouble(ICluster::extentMeasure));
clusters2.sort(Comparator.comparingDouble(ICluster::extentMeasure));
assertEquals(clusters.size(), clusters2.size(), " incorrect sizes");
for (int i = 0; i < clusters.size(); i++) {
// note clusters can have same weight and get permuted
assertEquals(clusters.get(i).extentMeasure(), clusters2.get(i).extentMeasure());
assertEquals(clusters.get(i).averageRadius(), clusters2.get(i).averageRadius());
assertEquals(clusters.get(i).averageRadius(), clusters.get(i).extentMeasure());
}
}
public float[][] getData(int dataSize, int newDimensions, int seed, BiFunction<float[], float[], Double> distance) {
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 0.0;
anomalySigma = 1.0;
transitionToAnomalyProbability = 0.0;
// ignoring anomaly cluster for now
transitionToBaseProbability = 1.0;
Random prg = new Random(0);
NormalMixtureTestData generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] data = generator.generateTestData(dataSize, newDimensions, seed);
float[][] floatData = new float[dataSize][];
float[] allZero = new float[newDimensions];
float[] sigma = new float[newDimensions];
Arrays.fill(sigma, 1f);
double scale = distance.apply(allZero, sigma);
for (int i = 0; i < dataSize; i++) {
// shrink, shift at random
int nextD = prg.nextInt(newDimensions);
for (int j = 0; j < newDimensions; j++) {
data[i][j] *= 1.0 / (3.0);
// standard deviation adds up across dimension; taking square root
// and using s 3 sigma ball
if (j == nextD) {
if (prg.nextDouble() < 0.5)
data[i][j] += 2.0 * scale;
else
data[i][j] -= 2.0 * scale;
}
}
floatData[i] = toFloatArray(data[i]);
}
return floatData;
}
private static Stream<Arguments> generateArguments() {
return Stream.of(Arguments.of((BiFunction<float[], float[], Double>) Summarizer::L1distance),
Arguments.of((BiFunction<float[], float[], Double>) Summarizer::L2distance),
Arguments.of((BiFunction<float[], float[], Double>) Summarizer::LInfinitydistance));
}
}
| 392 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/ForecastTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static com.amazon.randomcutforest.testutils.ShingledMultiDimDataWithKeys.generateShingledData;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.Random;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import com.amazon.randomcutforest.config.Precision;
import com.amazon.randomcutforest.returntypes.RangeVector;
import com.amazon.randomcutforest.testutils.MultiDimDataWithKey;
import com.amazon.randomcutforest.testutils.ShingledMultiDimDataWithKeys;
@Tag("functional")
public class ForecastTest {
@Test
public void basic() {
int sampleSize = 256;
int baseDimensions = 1;
int shingleSize = 8;
int dimensions = baseDimensions * shingleSize;
long seed = new Random().nextLong();
System.out.println(seed);
int length = 4 * sampleSize;
int outputAfter = 128;
RandomCutForest forest = new RandomCutForest.Builder<>().compact(true).dimensions(dimensions)
.precision(Precision.FLOAT_32).randomSeed(seed).internalShinglingEnabled(true).shingleSize(shingleSize)
.outputAfter(outputAfter).build();
// as the ratio of amplitude (signal) to noise is changed, the estimation range
// in forecast
// (or any other inference) should increase
MultiDimDataWithKey dataWithKeys = ShingledMultiDimDataWithKeys.getMultiDimData(length, 50, 100, 10, seed,
baseDimensions);
System.out.println(dataWithKeys.changes.length + " anomalies injected ");
double[][] shingledData = generateShingledData(dataWithKeys.data, shingleSize, baseDimensions, false);
assertEquals(shingledData.length, dataWithKeys.data.length - shingleSize + 1);
int horizon = 20;
double[] error = new double[horizon];
double[] lowerError = new double[horizon];
double[] upperError = new double[horizon];
for (int j = 0; j < dataWithKeys.data.length; j++) {
// forecast first; change centrality to achieve a control over the sampling
// setting centrality = 0 would correspond to random sampling from the leaves
// reached by
// impute visitor
RangeVector forecast = forest.extrapolateFromShingle(forest.lastShingledPoint(), horizon, 1, 1.0);
assert (forecast.values.length == horizon);
for (int i = 0; i < horizon; i++) {
// check ranges
assert (forecast.values[i] >= forecast.lower[i]);
assert (forecast.values[i] <= forecast.upper[i]);
// compute errors
if (j > outputAfter + shingleSize - 1 && j + i < dataWithKeys.data.length) {
double t = dataWithKeys.data[j + i][0] - forecast.values[i];
error[i] += t * t;
t = dataWithKeys.data[j + i][0] - forecast.lower[i];
lowerError[i] += t * t;
t = dataWithKeys.data[j + i][0] - forecast.upper[i];
upperError[i] += t * t;
}
}
forest.update(dataWithKeys.data[j]);
}
System.out.println("RMSE ");
for (int i = 0; i < horizon; i++) {
double t = error[i] / (dataWithKeys.data.length - shingleSize + 1 - outputAfter - i);
System.out.print(Math.sqrt(t) + " ");
}
System.out.println();
System.out.println("RMSE Lower ");
for (int i = 0; i < horizon; i++) {
double t = lowerError[i] / (dataWithKeys.data.length - shingleSize + 1 - outputAfter - i);
System.out.print(Math.sqrt(t) + " ");
}
System.out.println();
System.out.println("RMSE Upper ");
for (int i = 0; i < horizon; i++) {
double t = upperError[i] / (dataWithKeys.data.length - shingleSize + 1 - outputAfter - i);
System.out.print(Math.sqrt(t) + " ");
}
System.out.println();
}
}
| 393 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/DynamicPointSetFunctionalTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static com.amazon.randomcutforest.testutils.ExampleDataSets.generateFan;
import static java.lang.Math.PI;
import static java.lang.Math.cos;
import static java.lang.Math.sin;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.List;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import com.amazon.randomcutforest.returntypes.DensityOutput;
import com.amazon.randomcutforest.returntypes.Neighbor;
@Tag("functional")
public class DynamicPointSetFunctionalTest {
private static int numberOfTrees;
private static int sampleSize;
private static int dimensions;
private static int randomSeed;
private static RandomCutForest parallelExecutionForest;
private static RandomCutForest singleThreadedForest;
private static RandomCutForest forestSpy;
private static double baseMu;
private static double baseSigma;
private static double anomalyMu;
private static double anomalySigma;
private static double transitionToAnomalyProbability;
private static double transitionToBaseProbability;
private static int dataSize;
static double[] rotateClockWise(double[] point, double theta) {
double[] result = new double[2];
result[0] = cos(theta) * point[0] + sin(theta) * point[1];
result[1] = -sin(theta) * point[0] + cos(theta) * point[1];
return result;
}
@Test
public void movingDensity() {
int newDimensions = 2;
randomSeed = 123;
RandomCutForest newForest = RandomCutForest.builder().dimensions(newDimensions).randomSeed(randomSeed)
.timeDecay(1.0 / 800).centerOfMassEnabled(true).storeSequenceIndexesEnabled(true).build();
double[][] data = generateFan(1000, 3);
double[] queryPoint = new double[] { 0.7, 0 };
for (int degree = 0; degree < 360; degree += 2) {
for (int j = 0; j < data.length; j++) {
newForest.update(rotateClockWise(data[j], 2 * PI * degree / 360));
}
DensityOutput density = newForest.getSimpleDensity(queryPoint);
double value = density.getDensity(0.001, 2);
if ((degree <= 60) || ((degree >= 120) && (degree <= 180)) || ((degree >= 240) && (degree <= 300)))
assertTrue(value < 0.8); // the fan is above at 90,210,330
if (((degree >= 75) && (degree <= 105)) || ((degree >= 195) && (degree <= 225))
|| ((degree >= 315) && (degree <= 345)))
assertTrue(value > 0.5);
// fan is close by
// intentionally 0.5 is below 0.8 for a robust test
// Testing for directionality
// There can be unclear directionality when the
// blades are right above
double bladeAboveInY = density.getDirectionalDensity(0.001, 2).low[1];
double bladeBelowInY = density.getDirectionalDensity(0.001, 2).high[1];
double bladesToTheLeft = density.getDirectionalDensity(0.001, 2).high[0];
double bladesToTheRight = density.getDirectionalDensity(0.001, 2).low[0];
assertEquals(value, bladeAboveInY + bladeBelowInY + bladesToTheLeft + bladesToTheRight, 1E-6);
// the tests below have a freedom of 10% of the total value
if (((degree >= 75) && (degree <= 85)) || ((degree >= 195) && (degree <= 205))
|| ((degree >= 315) && (degree <= 325))) {
assertTrue(bladeAboveInY + 0.1 * value > bladeBelowInY);
assertTrue(bladeAboveInY + 0.1 * value > bladesToTheRight);
}
if (((degree >= 95) && (degree <= 105)) || ((degree >= 215) && (degree <= 225))
|| ((degree >= 335) && (degree <= 345))) {
assertTrue(bladeBelowInY + 0.1 * value > bladeAboveInY);
assertTrue(bladeBelowInY + 0.1 * value > bladesToTheRight);
}
if (((degree >= 60) && (degree <= 75)) || ((degree >= 180) && (degree <= 195))
|| ((degree >= 300) && (degree <= 315))) {
assertTrue(bladeAboveInY + 0.1 * value > bladesToTheLeft);
assertTrue(bladeAboveInY + 0.1 * value > bladesToTheRight);
}
if (((degree >= 105) && (degree <= 120)) || ((degree >= 225) && (degree <= 240)) || (degree >= 345)) {
assertTrue(bladeBelowInY + 0.1 * value > bladesToTheLeft);
assertTrue(bladeBelowInY + 0.1 * value > bladesToTheRight);
}
// fans are farthest to the left at 30,150 and 270
if (((degree >= 15) && (degree <= 45)) || ((degree >= 135) && (degree <= 165))
|| ((degree >= 255) && (degree <= 285))) {
assertTrue(bladesToTheLeft + 0.1 * value > bladeAboveInY + bladeBelowInY + bladesToTheRight);
assertTrue(bladeAboveInY + bladeBelowInY + 0.1 * value > bladesToTheRight);
}
}
}
@Test
public void movingNeighbors() {
int newDimensions = 2;
randomSeed = 123;
RandomCutForest newForest = RandomCutForest.builder().dimensions(newDimensions).randomSeed(randomSeed)
.timeDecay(1.0 / 800).centerOfMassEnabled(true).storeSequenceIndexesEnabled(true).build();
double[][] data = generateFan(1000, 3);
double[] queryPoint = new double[] { 0.7, 0 };
for (int degree = 0; degree < 360; degree += 2) {
for (int j = 0; j < data.length; j++) {
newForest.update(rotateClockWise(data[j], 2 * PI * degree / 360));
}
List<Neighbor> ans = newForest.getNearNeighborsInSample(queryPoint, 1);
List<Neighbor> closeNeighBors = newForest.getNearNeighborsInSample(queryPoint, 0.1);
Neighbor best = null;
if (ans != null) {
best = ans.get(0);
for (int j = 1; j < ans.size(); j++) {
assert (ans.get(j).distance >= best.distance);
}
}
// fan is away at 30, 150 and 270
if (((degree > 15) && (degree < 45)) || ((degree >= 135) && (degree <= 165))
|| ((degree >= 255) && (degree <= 285))) {
assertTrue(closeNeighBors.size() == 0); // no close neighbor
assertTrue(best.distance > 0.3);
}
// fan is overhead at 90, 210 and 330
if (((degree > 75) && (degree < 105)) || ((degree >= 195) && (degree <= 225))
|| ((degree >= 315) && (degree <= 345))) {
assertTrue(closeNeighBors.size() > 0);
assertEquals(closeNeighBors.get(0).distance, best.distance, 1E-10);
}
}
}
}
| 394 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/MultiCenterTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static com.amazon.randomcutforest.CommonUtils.toFloatArray;
import static java.lang.Math.min;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.function.BiFunction;
import java.util.stream.Stream;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import com.amazon.randomcutforest.summarization.GenericMultiCenter;
import com.amazon.randomcutforest.summarization.ICluster;
import com.amazon.randomcutforest.summarization.MultiCenter;
import com.amazon.randomcutforest.summarization.Summarizer;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
import com.amazon.randomcutforest.util.Weighted;
@Tag("functional")
public class MultiCenterTest {
private static double baseMu;
private static double baseSigma;
private static double anomalyMu;
private static double anomalySigma;
private static double transitionToAnomalyProbability;
private static double transitionToBaseProbability;
private static int dataSize;
@Test
public void constructorTest() {
assertThrows(IllegalArgumentException.class, () -> MultiCenter.initialize(new float[4], 0, -1.0, 1));
assertThrows(IllegalArgumentException.class, () -> MultiCenter.initialize(new float[4], 0, 2.0, 1));
assertThrows(IllegalArgumentException.class, () -> MultiCenter.initialize(new float[4], 0, 1.0, 0));
assertThrows(IllegalArgumentException.class, () -> MultiCenter.initialize(new float[4], 0, 1.0, 1000));
assertThrows(IllegalArgumentException.class, () -> GenericMultiCenter.initialize(new float[4], 0, -1.0, 1));
assertThrows(IllegalArgumentException.class, () -> GenericMultiCenter.initialize(new float[4], 0, 2.0, 1));
assertThrows(IllegalArgumentException.class, () -> GenericMultiCenter.initialize(new float[4], 0, 1.0, 0));
assertThrows(IllegalArgumentException.class, () -> GenericMultiCenter.initialize(new float[4], 0, 1.0, 1000));
}
@Test
public void initializationTest() {
GenericMultiCenter genericMultiCenter = GenericMultiCenter.initialize(new float[4], 0, 0.5, 1);
MultiCenter multiCenter = MultiCenter.initialize(new float[4], 0, 0.5, 1);
List<Weighted<Integer>> a = new ArrayList<>();
assertEquals(multiCenter.getAssignedPoints().getClass(), a.getClass());
assertEquals(genericMultiCenter.getAssignedPoints(), Collections.emptyList());
assertEquals(genericMultiCenter.averageRadius(), 0);
assertEquals(genericMultiCenter.extentMeasure(), 0);
}
@ParameterizedTest
@MethodSource("generateArguments")
public void SummaryTest(BiFunction<float[], float[], Double> distance) {
int over = 0;
int under = 0;
for (int numTrials = 0; numTrials < 10; numTrials++) {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 200000;
float[][] points = getData(dataSize, newDimensions, random.nextInt(), distance);
List<ICluster<float[]>> summary = Summarizer.multiSummarize(points, 5 * newDimensions, 10 * newDimensions,
1, false, 0.8, distance, random.nextInt(), false, random.nextDouble(), 1);
System.out.println("trial " + numTrials + " : " + summary.size() + " clusters for " + newDimensions
+ " dimensions, seed : " + seed);
if (summary.size() < 2 * newDimensions) {
++under;
} else if (summary.size() > 2 * newDimensions) {
++over;
}
}
assert (under <= 1);
}
@ParameterizedTest
@MethodSource("generateArguments")
public void MultiSummaryTestGeneric(BiFunction<float[], float[], Double> distance) {
int over = 0;
int under = 0;
for (int numTrials = 0; numTrials < 10; numTrials++) {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 200000;
float[][] points = getData(dataSize, newDimensions, random.nextInt(), distance);
List<ICluster<float[]>> summary = Summarizer.multiSummarize(points, 5 * newDimensions, 10 * newDimensions,
1, false, 0.8, distance, random.nextInt(), false, random.nextDouble(), 5);
System.out.println("trial " + numTrials + " : " + summary.size() + " clusters for " + newDimensions
+ " dimensions, seed : " + seed);
if (summary.size() < 2 * newDimensions) {
++under;
} else if (summary.size() > 2 * newDimensions) {
++over;
}
}
assert (under <= 1);
}
@Test
public void MultiSummaryTest() {
int over = 0;
int under = 0;
for (int numTrials = 0; numTrials < 10; numTrials++) {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 200000;
float[][] points = getData(dataSize, newDimensions, random.nextInt(), Summarizer::L2distance);
List<ICluster<float[]>> summary = Summarizer.multiSummarize(points, 5 * newDimensions, 0.9, 1, seed);
System.out.println("trial " + numTrials + " : " + summary.size() + " clusters for " + newDimensions
+ " dimensions, seed : " + seed);
if (summary.size() < 2 * newDimensions) {
++under;
} else if (summary.size() > 2 * newDimensions) {
++over;
}
}
assert (under <= 1);
}
@ParameterizedTest
@MethodSource("generateArguments")
public void ParallelTest(BiFunction<float[], float[], Double> distance) {
long seed = new Random().nextLong();
Random random = new Random(seed);
int newDimensions = random.nextInt(10) + 3;
dataSize = 200000;
float[][] points = getData(dataSize, newDimensions, random.nextInt(), distance);
System.out.println("checking parallelEnabled seed : " + seed);
int nextSeed = random.nextInt();
// these can differ for shinkage != 0 due to floating point issues
List<ICluster<float[]>> summary1 = Summarizer.multiSummarize(points, 5 * newDimensions, 10 * newDimensions, 1,
false, 0.8, distance, nextSeed, false, 0, 5);
ArrayList<float[]> list = new ArrayList<>();
for (float[] point : points) {
list.add(point);
}
List<ICluster<float[]>> summary2 = Summarizer.multiSummarize(list, 5 * newDimensions, 10 * newDimensions, 1,
false, 0.8, distance, nextSeed, true, 0, 5);
assertEquals(summary2.size(), summary1.size(), " incorrect number of clusters");
for (int i = 0; i < summary2.size(); i++) {
assertEquals(summary1.get(i).getWeight(), summary2.get(i).getWeight(), 1e-6);
assertEquals(summary1.get(i).extentMeasure(), summary2.get(i).extentMeasure(), 1e-6);
List<Weighted<float[]>> reps1 = summary1.get(i).getRepresentatives();
List<Weighted<float[]>> reps2 = summary2.get(i).getRepresentatives();
assertEquals(reps1.size(), reps2.size());
for (int j = 0; j < reps1.size(); j++) {
assertEquals(reps1.get(j).weight, reps2.get(j).weight, 1e-6);
assertArrayEquals(reps1.get(j).index, reps2.get(j).index, 1e-6f);
}
}
}
@Test
public void StringTest() {
long seed = new Random().nextLong();
System.out.println("checking String summarization seed : " + seed);
Random random = new Random(seed);
int size = 100;
int numberOfStrings = 20000;
String[] points = new String[numberOfStrings];
for (int i = 0; i < numberOfStrings; i++) {
if (random.nextDouble() < 0.5) {
points[i] = getABString(size, 0.8, random);
} else {
points[i] = getABString(size, 0.2, random);
}
}
int nextSeed = random.nextInt();
List<ICluster<String>> summary = Summarizer.multiSummarize(points, 5, 10, 1, false, 0.8,
MultiCenterTest::toyDistance, nextSeed, false, 0.1, 5);
System.out.println();
assertEquals(summary.size(), 2);
}
public static double toyDistance(String a, String b) {
if (a.length() > b.length()) {
return toyDistance(b, a);
}
double[][] dist = new double[2][b.length() + 1];
for (int j = 0; j < b.length() + 1; j++) {
dist[0][j] = j;
}
for (int i = 1; i < a.length() + 1; i++) {
dist[1][0] = i;
for (int j = 1; j < b.length() + 1; j++) {
double t = dist[0][j - 1] + ((a.charAt(i - 1) == b.charAt(j - 1)) ? 0 : 1);
dist[1][j] = min(min(t, dist[0][j] + 1), dist[1][j - 1] + 1);
}
for (int j = 0; j < b.length() + 1; j++) {
dist[0][j] = dist[1][j];
}
}
return dist[1][b.length()];
}
public float[][] getData(int dataSize, int newDimensions, int seed, BiFunction<float[], float[], Double> distance) {
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 0.0;
anomalySigma = 1.0;
transitionToAnomalyProbability = 0.0;
// ignoring anomaly cluster for now
transitionToBaseProbability = 1.0;
Random prg = new Random(0);
NormalMixtureTestData generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] data = generator.generateTestData(dataSize, newDimensions, seed);
float[][] floatData = new float[dataSize][];
float[] allZero = new float[newDimensions];
float[] sigma = new float[newDimensions];
Arrays.fill(sigma, 1f);
double scale = distance.apply(allZero, sigma);
for (int i = 0; i < dataSize; i++) {
// shrink, shift at random
int nextD = prg.nextInt(newDimensions);
for (int j = 0; j < newDimensions; j++) {
data[i][j] *= 1.0 / (3.0);
// standard deviation adds up across dimension; taking square root
// and using s 3 sigma ball
if (j == nextD) {
if (prg.nextDouble() < 0.5)
data[i][j] += 2.0 * scale;
else
data[i][j] -= 2.0 * scale;
}
}
floatData[i] = toFloatArray(data[i]);
}
return floatData;
}
public String getABString(int size, double probabilityOfA, Random random) {
StringBuilder stringBuilder = new StringBuilder();
int newSize = size + random.nextInt(size / 5);
for (int i = 0; i < newSize; i++) {
if (random.nextDouble() < probabilityOfA) {
stringBuilder.append("-");
} else {
stringBuilder.append("_");
}
}
return stringBuilder.toString();
}
private static Stream<Arguments> generateArguments() {
return Stream.of(Arguments.of((BiFunction<float[], float[], Double>) Summarizer::L1distance),
Arguments.of((BiFunction<float[], float[], Double>) Summarizer::L2distance));
}
}
| 395 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/RandomCutForestBuilderTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class RandomCutForestBuilderTest {
private int numberOfTrees;
private int sampleSize;
private int outputAfter;
private int dimensions;
private double lambda;
private long randomSeed;
private int threadPoolSize;
private RandomCutForest forest;
public static final int DEFAULT_OUTPUT_AFTER_FRACTION = 4;
@BeforeEach
public void setUp() {
numberOfTrees = 99;
sampleSize = 201;
outputAfter = 201 / 5;
dimensions = 2;
lambda = 0.12;
randomSeed = 12345;
threadPoolSize = 9;
forest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize).outputAfter(outputAfter)
.dimensions(dimensions).timeDecay(lambda).randomSeed(randomSeed).storeSequenceIndexesEnabled(true)
.centerOfMassEnabled(true).parallelExecutionEnabled(true).threadPoolSize(threadPoolSize).build();
}
@Test
public void testForestBuilderWithCustomArguments() {
assertEquals(numberOfTrees, forest.getNumberOfTrees());
assertEquals(sampleSize, forest.getSampleSize());
assertEquals(outputAfter, forest.getOutputAfter());
assertEquals(dimensions, forest.getDimensions());
assertEquals(lambda, forest.getTimeDecay());
assertTrue(forest.isStoreSequenceIndexesEnabled());
assertTrue(forest.isCenterOfMassEnabled());
assertTrue(forest.isParallelExecutionEnabled());
assertEquals(threadPoolSize, forest.getThreadPoolSize());
}
@Test
public void testDefaultForestWithDimensionArgument() {
RandomCutForest f = RandomCutForest.defaultForest(10);
assertEquals(10, f.getDimensions());
assertEquals(256, f.getSampleSize());
assertEquals(256 / DEFAULT_OUTPUT_AFTER_FRACTION, f.getOutputAfter());
assertFalse(f.isStoreSequenceIndexesEnabled());
assertFalse(f.isCenterOfMassEnabled());
assertFalse(f.isParallelExecutionEnabled());
assertEquals(0, f.getThreadPoolSize());
}
@Test
public void testDefaultForestWithDimensionAndRandomSeedArguments() {
RandomCutForest f = RandomCutForest.defaultForest(11, 123);
assertEquals(11, f.getDimensions());
assertEquals(256, f.getSampleSize());
assertEquals(256 / DEFAULT_OUTPUT_AFTER_FRACTION, f.getOutputAfter());
assertFalse(f.isStoreSequenceIndexesEnabled());
assertFalse(f.isCenterOfMassEnabled());
assertFalse(f.isParallelExecutionEnabled());
assertEquals(0, f.getThreadPoolSize());
}
@Test
public void testDefaultForestWithCustomOutputAfterArgument() {
RandomCutForest f = RandomCutForest.defaultForest(10);
assertEquals(10, f.getDimensions());
assertEquals(256, f.getSampleSize());
assertEquals(256 / DEFAULT_OUTPUT_AFTER_FRACTION, f.getOutputAfter());
assertFalse(f.isStoreSequenceIndexesEnabled());
assertFalse(f.isCenterOfMassEnabled());
assertFalse(f.isParallelExecutionEnabled());
assertEquals(0, f.getThreadPoolSize());
}
@Test
public void testForestBuilderWithDefaultParallelExecutionThreadPoolSize() {
RandomCutForest forest = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.outputAfter(outputAfter).dimensions(dimensions).timeDecay(lambda).randomSeed(randomSeed)
.storeSequenceIndexesEnabled(true).centerOfMassEnabled(true).parallelExecutionEnabled(true).build();
assertEquals(numberOfTrees, forest.getNumberOfTrees());
assertEquals(sampleSize, forest.getSampleSize());
assertEquals(outputAfter, forest.getOutputAfter());
assertEquals(dimensions, forest.getDimensions());
assertEquals(lambda, forest.getTimeDecay());
assertTrue(forest.isStoreSequenceIndexesEnabled());
assertTrue(forest.isCenterOfMassEnabled());
assertTrue(forest.isParallelExecutionEnabled());
assertEquals(Runtime.getRuntime().availableProcessors() - 1, forest.getThreadPoolSize());
}
@Test
public void testForestBuilderWithDefaultLambdaValue() {
RandomCutForest forest = RandomCutForest.builder().dimensions(4).sampleSize(sampleSize).build();
assertEquals(1.0 / (RandomCutForest.DEFAULT_SAMPLE_SIZE_COEFFICIENT_IN_TIME_DECAY * sampleSize),
forest.getTimeDecay());
}
@Test
public void testIllegalExceptionIsThrownWhenNumberOfTreesIsZero() {
assertThrows(IllegalArgumentException.class, () -> RandomCutForest.builder().numberOfTrees(0)
.sampleSize(sampleSize).dimensions(dimensions).timeDecay(lambda).build());
}
@Test
public void testIllegalExceptionIsThrownWhenSampleSizeIsZero() {
assertThrows(IllegalArgumentException.class, () -> RandomCutForest.builder().numberOfTrees(numberOfTrees)
.sampleSize(0).dimensions(dimensions).timeDecay(lambda).build());
}
@Test
public void testIllegalExceptionIsThrownWhenOutputAfterIsNegative() {
assertThrows(IllegalArgumentException.class, () -> RandomCutForest.builder().numberOfTrees(numberOfTrees)
.sampleSize(sampleSize).outputAfter(-10).dimensions(dimensions).timeDecay(lambda).build());
}
@Test
public void testIllegalExceptionIsNotThrownWhenOutputAfterIsGreaterThanSample() {
assertDoesNotThrow(() -> RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.outputAfter(sampleSize + 1).dimensions(dimensions).timeDecay(lambda).build());
}
@Test
public void testIllegalExceptionIsThrownWhenDimensionIsNotProvided() {
assertThrows(IllegalArgumentException.class, () -> RandomCutForest.builder().numberOfTrees(numberOfTrees)
.sampleSize(sampleSize).timeDecay(lambda).build());
}
@Test
public void testIllegalExceptionIsThrownWhenLambdaIsNegative() {
assertThrows(IllegalArgumentException.class, () -> RandomCutForest.builder().numberOfTrees(numberOfTrees)
.sampleSize(sampleSize).dimensions(dimensions).timeDecay(-0.1).build());
}
@Test
public void testIllegalExceptionIsThrownWhenPoolSizeIsZero() {
assertThrows(IllegalArgumentException.class,
() -> RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(dimensions).threadPoolSize(0).parallelExecutionEnabled(true).build());
}
@Test
public void testPoolSizeIsZeroWhenParallelExecutionIsDisabled() {
RandomCutForest f = RandomCutForest.builder().numberOfTrees(numberOfTrees).sampleSize(sampleSize)
.dimensions(dimensions).parallelExecutionEnabled(false).build();
assertFalse(f.isParallelExecutionEnabled());
assertEquals(0, f.getThreadPoolSize());
}
}
| 396 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/AttributionExamplesFunctionalTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Random;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import com.amazon.randomcutforest.returntypes.DiVector;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
@Tag("functional")
public class AttributionExamplesFunctionalTest {
private static int numberOfTrees;
private static int sampleSize;
private static int dimensions;
private static int randomSeed;
private static RandomCutForest parallelExecutionForest;
private static RandomCutForest singleThreadedForest;
private static RandomCutForest forestSpy;
private static double baseMu;
private static double baseSigma;
private static double anomalyMu;
private static double anomalySigma;
private static double transitionToAnomalyProbability;
private static double transitionToBaseProbability;
private static int dataSize;
@Test
public void RRCFattributionTest() {
// starts with the same setup as rrcfTest; data corresponds to two small
// clusters at x=+/-5.0
// queries q_1=(0,0,0, ..., 0)
// inserts updates (0,1,0, ..., 0) a few times
// queries q_2=(0,1,0, ..., 0)
// attribution of q_2 is now affected by q_1 (which is still an anomaly)
int newDimensions = 30;
randomSeed = 101;
sampleSize = 256;
RandomCutForest newForest = RandomCutForest.builder().numberOfTrees(100).sampleSize(sampleSize)
.dimensions(newDimensions).randomSeed(randomSeed).compact(true).boundingBoxCacheFraction(0.0).build();
dataSize = 2000 + 5;
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 0.0;
anomalySigma = 1.0;
transitionToAnomalyProbability = 0.0;
// ignoring anomaly cluster for now
transitionToBaseProbability = 1.0;
Random prg = new Random(0);
NormalMixtureTestData generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] data = generator.generateTestData(dataSize, newDimensions, 100);
for (int i = 0; i < 2000; i++) {
// shrink, shift at random
for (int j = 0; j < newDimensions; j++)
data[i][j] *= 0.01;
if (prg.nextDouble() < 0.5)
data[i][0] += 5.0;
else
data[i][0] -= 5.0;
newForest.update(data[i]);
}
double[] queryOne = new double[newDimensions];
double[] queryTwo = new double[newDimensions];
queryTwo[1] = 1;
double originalScoreTwo = newForest.getAnomalyScore(queryTwo);
DiVector originalAttrTwo = newForest.getAnomalyAttribution(queryTwo);
assertTrue(originalScoreTwo > 3.0);
assertEquals(originalScoreTwo, originalAttrTwo.getHighLowSum(), 1E-5);
assertTrue(originalAttrTwo.high[0] > 1.0); // due to -5 cluster
assertTrue(originalAttrTwo.low[0] > 1.0); // due to +5 cluster
assertTrue(originalAttrTwo.high[1] > 1); // due to +1 in query
assertTrue(originalAttrTwo.getHighLowSum(0) > 1.1 * originalAttrTwo.getHighLowSum(1));
// we insert queryOne a few times to make sure it is sampled
for (int i = 2000; i < 2000 + 5; i++) {
double score = newForest.getAnomalyScore(queryOne);
double score2 = newForest.getAnomalyScore(queryTwo);
DiVector attr2 = newForest.getAnomalyAttribution(queryTwo);
// verify
assertTrue(score > 2.0);
assertTrue(score2 > 2.0);
assertEquals(attr2.getHighLowSum(), score2, 1E-5);
for (int j = 0; j < newDimensions; j++)
data[i][j] *= 0.01;
newForest.update(data[i]);
// 5 different anomalous points
}
double midScoreTwo = newForest.getAnomalyScore(queryTwo);
DiVector midAttrTwo = newForest.getAnomalyAttribution(queryTwo);
assertTrue(midScoreTwo > 2.4);
assertEquals(midScoreTwo, midAttrTwo.getHighLowSum(), 1E-5);
assertTrue(midAttrTwo.high[0] < 1); // due to -5 cluster !!!
assertTrue(midAttrTwo.low[0] < 1); // due to +5 cluster !!!
assertTrue(midAttrTwo.high[1] > 1); // due to +1 in query
assertTrue(midAttrTwo.getHighLowSum(0) < 1.1 * midAttrTwo.high[1]);
// reversal of the dominant dimension
// still an anomaly; but the attribution is masked by points
// a few more updates, which are identical
for (int i = 2005; i < 2010; i++) {
newForest.update(queryOne);
}
double finalScoreTwo = newForest.getAnomalyScore(queryTwo);
DiVector finalAttrTwo = newForest.getAnomalyAttribution(queryTwo);
assertTrue(finalScoreTwo > 2.4);
assertEquals(finalScoreTwo, finalAttrTwo.getHighLowSum(), 1E-5);
assertTrue(finalAttrTwo.high[0] < 0.5); // due to -5 cluster !!!
assertTrue(finalAttrTwo.low[0] < 0.5); // due to +5 cluster !!!
assertTrue(finalAttrTwo.high[1] > 1); // due to +1 in query
assertTrue(2.5 * finalAttrTwo.getHighLowSum(0) < finalAttrTwo.high[1]);
// the drop in high[0] and low[0] is steep and the attribution has shifted
}
@Test
public void attributionUnMaskingTest() {
// starts with the same setup as rrcfTest; data corresponds to two small
// clusters at x=+/-5.0
// queries q_1=(0,0,0, ..., 0)
// inserts updates (0,1,0, ..., 0) a few times
// queries q_2=(0,1,0, ..., 0)
// attribution of q_2 is now affected by q_1 (which is still an anomaly)
int newDimensions = 30;
randomSeed = 179;
sampleSize = 256;
RandomCutForest newForest = RandomCutForest.builder().numberOfTrees(100).sampleSize(sampleSize)
.dimensions(newDimensions).randomSeed(randomSeed).compact(true)
.boundingBoxCacheFraction(new Random().nextDouble()).timeDecay(1e-5).build();
dataSize = 2000 + 5;
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 0.0;
anomalySigma = 1.5;
transitionToAnomalyProbability = 0.0;
// ignoring anomaly cluster for now
transitionToBaseProbability = 1.0;
Random prg = new Random(0);
NormalMixtureTestData generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
double[][] data = generator.generateTestData(dataSize, newDimensions, 100);
for (int i = 0; i < 2000; i++) {
// shrink, shift at random
for (int j = 0; j < newDimensions; j++)
data[i][j] *= 0.01;
if (prg.nextDouble() < 0.5)
data[i][0] += 5.0;
else
data[i][0] -= 5.0;
newForest.update(data[i]);
}
float[] queryOne = new float[30];
float[] queryTwo = new float[30];
queryTwo[1] = 1;
double originalScoreTwo = newForest.getAnomalyScore(queryTwo);
// testing approximation with precision 0 (no approximation)
DiVector originalAttrTwo = newForest.getApproximateDynamicAttribution(queryTwo, 0, true, 0,
CommonUtils::defaultScoreSeenFunction, CommonUtils::defaultScoreUnseenFunction,
CommonUtils::defaultDampFunction);
originalAttrTwo.componentwiseTransform(x -> CommonUtils.defaultScalarNormalizerFunction(x, sampleSize));
assertTrue(originalScoreTwo > 3.0);
assertEquals(originalScoreTwo, originalAttrTwo.getHighLowSum(), 1E-5);
assertTrue(originalAttrTwo.high[0] > 0.75); // due to -5 cluster
assertTrue(originalAttrTwo.low[0] > 0.75); // due to +5 cluster
assertTrue(originalAttrTwo.high[1] > 1); // due to +1 in query
assertTrue(originalAttrTwo.getHighLowSum(0) > originalAttrTwo.getHighLowSum(1));
double apx = newForest.getApproximateDynamicScore(queryTwo, 0.1, true, 0, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
assertEquals(originalScoreTwo, CommonUtils.defaultScalarNormalizerFunction(apx, sampleSize), 0.2);
assertEquals(apx,
newForest
.getApproximateDynamicAttribution(queryTwo, 0.1, true, 0, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction)
.getHighLowSum(),
1e-5);
// we insert queryOne a few times to make sure it is sampled
for (int i = 2000; i < 2000 + 5; i++) {
double score = newForest.getAnomalyScore(queryOne);
double score2 = newForest.getAnomalyScore(queryTwo);
DiVector attr2 = newForest.getDynamicAttribution(queryTwo, 0, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
attr2.componentwiseTransform(x -> CommonUtils.defaultScalarNormalizerFunction(x, sampleSize));
double score3 = newForest.getDynamicScore(queryTwo, 1, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
score3 = CommonUtils.defaultScalarNormalizerFunction(score3, sampleSize);
DiVector attr3 = newForest.getDynamicAttribution(queryTwo, 1, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
attr3.componentwiseTransform(x -> CommonUtils.defaultScalarNormalizerFunction(x, sampleSize));
// verify
assertTrue(score > 2.0);
assertTrue(score2 > 2.0);
assertTrue(score3 > 2.0);
assertEquals(attr2.getHighLowSum(), score2, 1E-5);
assertEquals(attr3.getHighLowSum(), score3, 1E-5);
for (int j = 0; j < newDimensions; j++)
data[i][j] *= 0.01;
newForest.update(data[i]);
// 5 different anomalous points
}
double midScoreTwo = newForest.getAnomalyScore(queryTwo);
DiVector midAttrTwo = newForest.getDynamicAttribution(queryTwo, 0, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
midAttrTwo.componentwiseTransform(x -> CommonUtils.defaultScalarNormalizerFunction(x, sampleSize));
assertTrue(midScoreTwo > 2.5);
assertEquals(midScoreTwo, midAttrTwo.getHighLowSum(), 1E-5);
assertTrue(midAttrTwo.high[1] > 1); // due to +1 in query
assertTrue(midAttrTwo.getHighLowSum(0) < 1.2 * midAttrTwo.high[1]);
// reversal of the dominant dimension
// still an anomaly; but the attribution is masked by points
double midUnmaskedScore = newForest.getDynamicScore(queryTwo, 1, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
midUnmaskedScore = CommonUtils.defaultScalarNormalizerFunction(midUnmaskedScore, sampleSize);
DiVector midUnmaskedAttr = newForest.getDynamicAttribution(queryTwo, 1, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
midUnmaskedAttr.componentwiseTransform(x -> CommonUtils.defaultScalarNormalizerFunction(x, sampleSize));
assertTrue(midUnmaskedScore > 3.0);
assertEquals(midUnmaskedScore, midUnmaskedAttr.getHighLowSum(), 1E-5);
assertTrue(midUnmaskedAttr.high[1] > 1); // due to +1 in query
assertTrue(midUnmaskedAttr.getHighLowSum(0) > midUnmaskedAttr.getHighLowSum(1));
// contribution from dimension 0 is still dominant
// the attributions in dimension 0 are reduced, but do not
// or become as small as quickly as in the other case
// a few more updates, which are identical
for (int i = 2005; i < 2010; i++) {
newForest.update(queryOne);
}
double finalScoreTwo = newForest.getAnomalyScore(queryTwo);
DiVector finalAttrTwo = newForest.getDynamicAttribution(queryTwo, 0, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
finalAttrTwo.componentwiseTransform(x -> CommonUtils.defaultScalarNormalizerFunction(x, sampleSize));
assertTrue(finalScoreTwo > 2.5);
assertEquals(finalScoreTwo, finalAttrTwo.getHighLowSum(), 1E-5);
assertTrue(finalAttrTwo.high[1] > 1); // due to +1 in query
assertTrue(2 * finalAttrTwo.getHighLowSum(0) < finalAttrTwo.high[1]);
// the drop in high[0] and low[0] is steep and the attribution has shifted
// different thresholds
double finalUnmaskedScore = newForest.getDynamicScore(queryTwo, 5, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
finalUnmaskedScore = CommonUtils.defaultScalarNormalizerFunction(finalUnmaskedScore, sampleSize);
DiVector finalUnmaskedAttr = newForest.getDynamicAttribution(queryTwo, 5, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
finalUnmaskedAttr.componentwiseTransform(x -> CommonUtils.defaultScalarNormalizerFunction(x, sampleSize));
assertTrue(finalUnmaskedScore > 3.0);
assertEquals(finalUnmaskedScore, finalUnmaskedAttr.getHighLowSum(), 1E-5);
assertTrue(finalUnmaskedAttr.high[1] > 1); // due to +1 in query
assertTrue(finalUnmaskedAttr.getHighLowSum(0) > 0.8 * finalUnmaskedAttr.getHighLowSum(1));
// the attributions in dimension 0 continue to be reduced, but do not vanish
// or become small as in the other case; the gap is not a factor of 4
}
}
| 397 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/RandomCutForestTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest;
import static com.amazon.randomcutforest.CommonUtils.toDoubleArray;
import static com.amazon.randomcutforest.TestUtils.EPSILON;
import static java.lang.Math.PI;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.AdditionalMatchers.aryEq;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyDouble;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.Random;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import java.util.stream.Collector;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.powermock.reflect.Whitebox;
import com.amazon.randomcutforest.config.Config;
import com.amazon.randomcutforest.config.Precision;
import com.amazon.randomcutforest.executor.AbstractForestTraversalExecutor;
import com.amazon.randomcutforest.executor.AbstractForestUpdateExecutor;
import com.amazon.randomcutforest.executor.IStateCoordinator;
import com.amazon.randomcutforest.executor.PointStoreCoordinator;
import com.amazon.randomcutforest.executor.SamplerPlusTree;
import com.amazon.randomcutforest.executor.SequentialForestTraversalExecutor;
import com.amazon.randomcutforest.executor.SequentialForestUpdateExecutor;
import com.amazon.randomcutforest.returntypes.ConvergingAccumulator;
import com.amazon.randomcutforest.returntypes.DensityOutput;
import com.amazon.randomcutforest.returntypes.DiVector;
import com.amazon.randomcutforest.returntypes.InterpolationMeasure;
import com.amazon.randomcutforest.returntypes.Neighbor;
import com.amazon.randomcutforest.returntypes.OneSidedConvergingDiVectorAccumulator;
import com.amazon.randomcutforest.returntypes.OneSidedConvergingDoubleAccumulator;
import com.amazon.randomcutforest.returntypes.RangeVector;
import com.amazon.randomcutforest.returntypes.SampleSummary;
import com.amazon.randomcutforest.sampler.CompactSampler;
import com.amazon.randomcutforest.state.RandomCutForestMapper;
import com.amazon.randomcutforest.state.RandomCutForestState;
import com.amazon.randomcutforest.store.PointStore;
import com.amazon.randomcutforest.tree.ITree;
import com.amazon.randomcutforest.tree.RandomCutTree;
import com.amazon.randomcutforest.util.ShingleBuilder;
public class RandomCutForestTest {
private int dimensions;
private int sampleSize;
private int numberOfTrees;
private ComponentList<Integer, float[]> components;
private AbstractForestTraversalExecutor traversalExecutor;
private IStateCoordinator<Integer, float[]> updateCoordinator;
private AbstractForestUpdateExecutor<Integer, float[]> updateExecutor;
private RandomCutForest forest;
@BeforeEach
public void setUp() {
dimensions = 2;
sampleSize = 256;
numberOfTrees = 10;
components = new ComponentList<>();
for (int i = 0; i < numberOfTrees; i++) {
CompactSampler sampler = mock(CompactSampler.class);
when(sampler.getCapacity()).thenReturn(sampleSize);
RandomCutTree tree = mock(RandomCutTree.class);
components.add(spy(new SamplerPlusTree<>(sampler, tree)));
}
updateCoordinator = spy(
new PointStoreCoordinator<>(new PointStore.Builder().dimensions(2).capacity(1).build()));
traversalExecutor = spy(new SequentialForestTraversalExecutor(components));
updateExecutor = spy(new SequentialForestUpdateExecutor<>(updateCoordinator, components));
RandomCutForest.Builder<?> builder = RandomCutForest.builder().dimensions(dimensions)
.numberOfTrees(numberOfTrees).sampleSize(sampleSize);
forest = spy(new RandomCutForest(builder, updateCoordinator, components, builder.getRandom()));
Whitebox.setInternalState(forest, "traversalExecutor", traversalExecutor);
Whitebox.setInternalState(forest, "updateExecutor", updateExecutor);
}
@Test
public void testUpdate() {
float[] point = { 2.2f, -1.1f };
forest.update(point);
verify(updateExecutor, times(1)).update(point);
}
@Test
public void testUpdateInvalid() {
assertThrows(NullPointerException.class, () -> forest.update((double[]) null));
assertThrows(NullPointerException.class, () -> forest.update((float[]) null));
assertThrows(IllegalArgumentException.class, () -> forest.update(new double[] { 1.2, 3.4, -5.6 }));
}
@Test
public void testTraverseForestBinaryAccumulator() {
float[] point = { 2.2f, -1.1f };
BinaryOperator<Double> accumulator = Double::sum;
Function<Double, Double> finisher = x -> x / numberOfTrees;
components.forEach(c -> {
doReturn(0.0).when(c).traverse(aryEq(point), any(VisitorFactory.class));
});
forest.traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator, finisher);
verify(traversalExecutor, times(1)).traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator,
finisher);
}
@Test
public void testTranverseForestBinaryAccumulatorInvalid() {
float[] point = { 2.2f, -1.1f };
BinaryOperator<Double> accumulator = Double::sum;
Function<Double, Double> finisher = x -> x / numberOfTrees;
components.forEach(c -> {
when(c.traverse(aryEq(point), any())).thenReturn(0.0);
});
assertThrows(NullPointerException.class,
() -> forest.traverseForest(null, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator, finisher));
assertThrows(IllegalArgumentException.class, () -> forest.traverseForest(new float[] { 2.2f, -1.1f, 3.3f },
TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator, finisher));
assertThrows(NullPointerException.class, () -> forest.traverseForest(point, null, accumulator, finisher));
assertThrows(NullPointerException.class, () -> forest.traverseForest(point,
TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, (BinaryOperator<Double>) null, finisher));
assertThrows(NullPointerException.class,
() -> forest.traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator, null));
}
@Test
public void testTraverseForestCollector() {
float[] point = { 2.2f, -1.1f };
components.forEach(c -> {
doReturn(0.0).when(c).traverse(aryEq(point), any(VisitorFactory.class));
});
forest.traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, TestUtils.SORTED_LIST_COLLECTOR);
verify(traversalExecutor, times(1)).traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY,
TestUtils.SORTED_LIST_COLLECTOR);
}
@Test
public void testTranverseForestCollectorInvalid() {
float[] point = { 2.2f, -1.1f };
components.forEach(c -> {
when(c.traverse(aryEq(point), any())).thenReturn(0.0);
});
assertThrows(NullPointerException.class, () -> forest.traverseForest(null,
TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, TestUtils.SORTED_LIST_COLLECTOR));
assertThrows(IllegalArgumentException.class, () -> forest.traverseForest(new float[] { 2.2f, -1.1f, 3.3f },
TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, TestUtils.SORTED_LIST_COLLECTOR));
assertThrows(NullPointerException.class,
() -> forest.traverseForest(point, null, TestUtils.SORTED_LIST_COLLECTOR));
assertThrows(NullPointerException.class,
() -> forest.traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, null));
}
@Test
public void testTraverseForestConverging() {
float[] point = new float[] { 1.2f, -3.4f };
int convergenceThreshold = numberOfTrees / 2;
ConvergingAccumulator<Double> accumulator = TestUtils.convergeAfter(convergenceThreshold);
Function<Double, Double> finisher = x -> x / accumulator.getValuesAccepted();
components.forEach(c -> {
doReturn(0.0).when(c).traverse(aryEq(point), any(VisitorFactory.class));
});
forest.traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator, finisher);
verify(traversalExecutor, times(1)).traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator,
finisher);
}
@Test
public void testTraverseForestConvergingInvalid() {
float[] point = new float[] { 1.2f, -3.4f };
int convergenceThreshold = numberOfTrees / 2;
ConvergingAccumulator<Double> accumulator = TestUtils.convergeAfter(convergenceThreshold);
Function<Double, Double> finisher = x -> x / accumulator.getValuesAccepted();
components.forEach(c -> {
when(c.traverse(aryEq(point), any())).thenReturn(0.0);
});
assertThrows(NullPointerException.class,
() -> forest.traverseForest(null, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator, finisher));
assertThrows(IllegalArgumentException.class, () -> forest.traverseForest(new float[] { 1.2f, -3.4f, 5.6f },
TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator, finisher));
assertThrows(NullPointerException.class, () -> forest.traverseForest(point, null, accumulator, finisher));
assertThrows(NullPointerException.class, () -> forest.traverseForest(point,
TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, (ConvergingAccumulator<Double>) null, finisher));
assertThrows(NullPointerException.class,
() -> forest.traverseForest(point, TestUtils.DUMMY_GENERIC_VISITOR_FACTORY, accumulator, null));
}
@Test
public void traverseForestMultiBinaryAccumulator() {
float[] point = { 2.2f, -1.1f };
BinaryOperator<Double> accumulator = Double::sum;
Function<Double, Double> finisher = x -> x / numberOfTrees;
components.forEach(c -> {
doReturn(0.0).when(c).traverseMulti(aryEq(point), any(MultiVisitorFactory.class));
});
forest.traverseForestMulti(point, TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY, accumulator, finisher);
verify(traversalExecutor, times(1)).traverseForestMulti(point, TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY,
accumulator, finisher);
}
@Test
public void testTranverseForestMultiBinaryAccumulatorInvalid() {
float[] point = { 2.2f, -1.1f };
BinaryOperator<Double> accumulator = Double::sum;
Function<Double, Double> finisher = x -> x / numberOfTrees;
components.forEach(c -> {
when(c.traverseMulti(aryEq(point), any())).thenReturn(0.0);
});
assertThrows(NullPointerException.class, () -> forest.traverseForestMulti(null,
TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY, accumulator, finisher));
assertThrows(IllegalArgumentException.class, () -> forest.traverseForestMulti(new float[] { 2.2f, -1.1f, 3.3f },
TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY, accumulator, finisher));
assertThrows(NullPointerException.class, () -> forest.traverseForestMulti(point, null, accumulator, finisher));
assertThrows(NullPointerException.class, () -> forest.traverseForestMulti(point,
TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY, (BinaryOperator<Double>) null, finisher));
assertThrows(NullPointerException.class, () -> forest.traverseForestMulti(point,
TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY, accumulator, null));
}
@Test
public void testTraverseForestMultiCollector() {
float[] point = { 2.2f, -1.1f };
components.forEach(c -> {
doReturn(0.0).when(c).traverseMulti(aryEq(point), any(MultiVisitorFactory.class));
});
forest.traverseForestMulti(point, TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY,
TestUtils.SORTED_LIST_COLLECTOR);
verify(traversalExecutor, times(1)).traverseForestMulti(point, TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY,
TestUtils.SORTED_LIST_COLLECTOR);
}
@Test
public void testTranverseForestCollectorMultiInvalid() {
float[] point = { 2.2f, -1.1f };
components.forEach(c -> {
when(c.traverse(aryEq(point), any())).thenReturn(0.0);
});
assertThrows(NullPointerException.class, () -> forest.traverseForestMulti(null,
TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY, TestUtils.SORTED_LIST_COLLECTOR));
assertThrows(IllegalArgumentException.class, () -> forest.traverseForestMulti(new float[] { 2.2f, -1.1f, 3.3f },
TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY, TestUtils.SORTED_LIST_COLLECTOR));
assertThrows(NullPointerException.class,
() -> forest.traverseForestMulti(point, null, TestUtils.SORTED_LIST_COLLECTOR));
assertThrows(NullPointerException.class,
() -> forest.traverseForestMulti(point, TestUtils.DUMMY_GENERIC_MULTI_VISITOR_FACTORY, null));
}
@Test
public void testGetAnomalyScore() {
float[] point = { 1.2f, -3.4f };
assertFalse(forest.isOutputReady());
assertEquals(0.0, forest.getAnomalyScore(point));
doReturn(true).when(forest).isOutputReady();
double expectedResult = 0.0;
for (int i = 0; i < numberOfTrees; i++) {
SamplerPlusTree<Integer, float[]> component = (SamplerPlusTree<Integer, float[]>) components.get(i);
ITree<Integer, float[]> tree = component.getTree();
double treeResult = Math.random();
when(tree.traverse(aryEq(point), any(IVisitorFactory.class))).thenReturn(treeResult);
when(tree.getMass()).thenReturn(256);
expectedResult += treeResult;
}
expectedResult /= numberOfTrees;
assertEquals(expectedResult, forest.getAnomalyScore(point), EPSILON);
}
@Test
public void testGetApproximateAnomalyScore() {
float[] point = { 1.2f, -3.4f };
assertFalse(forest.isOutputReady());
assertEquals(0.0, forest.getApproximateAnomalyScore(point));
doReturn(true).when(forest).isOutputReady();
ConvergingAccumulator<Double> accumulator = new OneSidedConvergingDoubleAccumulator(
RandomCutForest.DEFAULT_APPROXIMATE_ANOMALY_SCORE_HIGH_IS_CRITICAL,
RandomCutForest.DEFAULT_APPROXIMATE_DYNAMIC_SCORE_PRECISION,
RandomCutForest.DEFAULT_APPROXIMATE_DYNAMIC_SCORE_MIN_VALUES_ACCEPTED, numberOfTrees);
for (int i = 0; i < numberOfTrees; i++) {
SamplerPlusTree<Integer, float[]> component = (SamplerPlusTree<Integer, float[]>) components.get(i);
ITree<Integer, float[]> tree = component.getTree();
double treeResult = Math.random();
when(tree.traverse(aryEq(point), any(IVisitorFactory.class))).thenReturn(treeResult);
when(tree.getMass()).thenReturn(256);
if (!accumulator.isConverged()) {
accumulator.accept(treeResult);
}
}
double expectedResult = accumulator.getAccumulatedValue() / accumulator.getValuesAccepted();
assertEquals(expectedResult, forest.getApproximateAnomalyScore(point), EPSILON);
}
@Test
public void testGetAnomalyAttribution() {
float[] point = { 1.2f, -3.4f };
assertFalse(forest.isOutputReady());
DiVector zero = new DiVector(dimensions);
DiVector result = forest.getAnomalyAttribution(point);
assertArrayEquals(zero.high, result.high);
assertArrayEquals(zero.low, result.low);
doReturn(true).when(forest).isOutputReady();
DiVector expectedResult = new DiVector(dimensions);
for (int i = 0; i < numberOfTrees; i++) {
DiVector treeResult = new DiVector(dimensions);
for (int j = 0; j < dimensions; j++) {
treeResult.high[j] = Math.random();
treeResult.low[j] = Math.random();
}
SamplerPlusTree<Integer, float[]> component = (SamplerPlusTree<Integer, float[]>) components.get(i);
ITree<Integer, float[]> tree = component.getTree();
when(tree.traverse(aryEq(point), any(VisitorFactory.class))).thenReturn(treeResult);
when(tree.getMass()).thenReturn(256);
DiVector.addToLeft(expectedResult, treeResult);
}
expectedResult = expectedResult.scale(1.0 / numberOfTrees);
result = forest.getAnomalyAttribution(point);
assertArrayEquals(expectedResult.high, result.high, EPSILON);
assertArrayEquals(expectedResult.low, result.low, EPSILON);
}
@Test
public void testGetApproximateAnomalyAttribution() {
float[] point = { 1.2f, -3.4f };
DiVector zero = new DiVector(dimensions);
DiVector result = forest.getApproximateAnomalyAttribution(point);
assertFalse(forest.isOutputReady());
assertArrayEquals(zero.high, result.high, EPSILON);
assertArrayEquals(zero.low, result.low, EPSILON);
doReturn(true).when(forest).isOutputReady();
ConvergingAccumulator<DiVector> accumulator = new OneSidedConvergingDiVectorAccumulator(dimensions,
RandomCutForest.DEFAULT_APPROXIMATE_ANOMALY_SCORE_HIGH_IS_CRITICAL,
RandomCutForest.DEFAULT_APPROXIMATE_DYNAMIC_SCORE_PRECISION,
RandomCutForest.DEFAULT_APPROXIMATE_DYNAMIC_SCORE_MIN_VALUES_ACCEPTED, numberOfTrees);
for (int i = 0; i < numberOfTrees; i++) {
SamplerPlusTree<Integer, float[]> component = (SamplerPlusTree<Integer, float[]>) components.get(i);
ITree<Integer, float[]> tree = component.getTree();
DiVector treeResult = new DiVector(dimensions);
for (int j = 0; j < dimensions; j++) {
treeResult.high[j] = Math.random();
treeResult.low[j] = Math.random();
}
when(tree.traverse(aryEq(point), any(VisitorFactory.class))).thenReturn(treeResult);
when(tree.getMass()).thenReturn(256);
if (!accumulator.isConverged()) {
accumulator.accept(treeResult);
}
}
DiVector expectedResult = accumulator.getAccumulatedValue().scale(1.0 / accumulator.getValuesAccepted());
result = forest.getApproximateAnomalyAttribution(point);
assertArrayEquals(expectedResult.high, result.high, EPSILON);
assertArrayEquals(expectedResult.low, result.low, EPSILON);
}
@Test
public void testGetSimpleDensity() {
float[] point = { 12.3f, -45.6f };
DensityOutput zero = new DensityOutput(dimensions, sampleSize);
assertFalse(forest.samplersFull());
DensityOutput result = forest.getSimpleDensity(point);
assertEquals(zero.getDensity(), result.getDensity(), EPSILON);
doReturn(true).when(forest).samplersFull();
List<InterpolationMeasure> intermediateResults = new ArrayList<>();
for (int i = 0; i < numberOfTrees; i++) {
InterpolationMeasure treeResult = new InterpolationMeasure(dimensions, sampleSize);
for (int j = 0; j < dimensions; j++) {
treeResult.measure.high[j] = Math.random();
treeResult.measure.low[j] = Math.random();
treeResult.distances.high[j] = Math.random();
treeResult.distances.low[j] = Math.random();
treeResult.probMass.high[j] = Math.random();
treeResult.probMass.low[j] = Math.random();
}
SamplerPlusTree<Integer, float[]> component = (SamplerPlusTree<Integer, float[]>) components.get(i);
ITree<Integer, float[]> tree = component.getTree();
when(tree.traverse(aryEq(point), any(VisitorFactory.class))).thenReturn(treeResult);
intermediateResults.add(treeResult);
}
Collector<InterpolationMeasure, ?, InterpolationMeasure> collector = InterpolationMeasure.collector(dimensions,
sampleSize, numberOfTrees);
DensityOutput expectedResult = new DensityOutput(intermediateResults.stream().collect(collector));
result = forest.getSimpleDensity(point);
assertEquals(expectedResult.getDensity(), result.getDensity(), EPSILON);
}
@Test
public void testImputeMissingValuesInvalid() {
float[] point = { 12.3f, -45.6f };
int numberOfMissingValues = 1;
int[] missingIndexes = { 0, 1 };
assertThrows(IllegalArgumentException.class, () -> forest.imputeMissingValues(point, -1, missingIndexes));
assertThrows(NullPointerException.class, () -> forest.imputeMissingValues(point, numberOfMissingValues, null));
assertThrows(IllegalArgumentException.class,
() -> forest.imputeMissingValues((float[]) null, numberOfMissingValues, missingIndexes));
int invalidNumberOfMissingValues = 99;
assertThrows(IllegalArgumentException.class,
() -> forest.imputeMissingValues(point, invalidNumberOfMissingValues, missingIndexes));
}
@Test
public void testImputeMissingValuesWithNoMissingValues() {
float[] point = { 12.3f, -45.6f };
int[] missingIndexes = { 1, 1000 }; // second value doesn't matter since numberOfMissingValues is 1o
double[] result = forest.imputeMissingValues(toDoubleArray(point), 0, missingIndexes);
assertArrayEquals(new double[] { 0.0, 0.0 }, result);
}
@Test
public void testImputMissingValuesWithOutputNotReady() {
double[] point = { 12.3, -45.6 };
int numberOfMissingValues = 1;
int[] missingIndexes = { 1, 1000 }; // second value doesn't matter since numberOfMissingValues is 1o
assertFalse(forest.isOutputReady());
double[] zero = new double[dimensions];
assertArrayEquals(zero, forest.imputeMissingValues(point, numberOfMissingValues, missingIndexes));
}
@Test
public void testExtrapolateBasic() {
doNothing().when(forest).extrapolateBasicCyclic(any(RangeVector.class), anyInt(), anyInt(), anyInt(),
any(float[].class), any(int[].class), anyDouble());
doNothing().when(forest).extrapolateBasicSliding(any(RangeVector.class), anyInt(), anyInt(), any(float[].class),
any(int[].class), anyDouble());
double[] point = new double[] { 2.0, -3.0 };
int horizon = 2;
int blockSize = 1;
boolean cyclic = true;
int shingleIndex = 1;
forest.extrapolateBasic(point, horizon, blockSize, cyclic, shingleIndex);
verify(forest).extrapolateBasicCyclic(any(RangeVector.class), eq(horizon), eq(blockSize), eq(shingleIndex),
any(float[].class), any(int[].class), anyDouble());
forest.extrapolateBasic(point, horizon, blockSize, cyclic);
verify(forest).extrapolateBasicCyclic(any(RangeVector.class), eq(horizon), eq(blockSize), eq(0),
any(float[].class), any(int[].class), anyDouble());
cyclic = false;
forest.extrapolateBasic(point, horizon, blockSize, cyclic, shingleIndex);
forest.extrapolateBasic(point, horizon, blockSize, cyclic);
verify(forest, times(2)).extrapolateBasicSliding(any(RangeVector.class), eq(horizon), eq(blockSize),
any(float[].class), any(int[].class), anyDouble());
}
@Test
public void testExtrapolateBasicInvalid() {
double[] point = new double[] { 2.0, -3.0 };
int horizon = 2;
int blockSize = 1;
boolean cyclic = true;
int shingleIndex = 1;
assertThrows(IllegalArgumentException.class,
() -> forest.extrapolateBasic(point, horizon, -10, cyclic, shingleIndex));
assertThrows(IllegalArgumentException.class,
() -> forest.extrapolateBasic(point, horizon, 0, cyclic, shingleIndex));
assertThrows(IllegalArgumentException.class,
() -> forest.extrapolateBasic(point, horizon, dimensions, cyclic, shingleIndex));
assertThrows(IllegalArgumentException.class,
() -> forest.extrapolateBasic(point, horizon, dimensions * 2, cyclic, shingleIndex));
assertThrows(NullPointerException.class,
() -> forest.extrapolateBasic((double[]) null, horizon, blockSize, cyclic, shingleIndex));
RandomCutForest f = RandomCutForest.defaultForest(20);
double[] p = new double[20];
// dimensions not divisible by blockSize
assertThrows(IllegalArgumentException.class, () -> f.extrapolateBasic(p, horizon, 7, cyclic, shingleIndex));
// invalid shingle index values
assertThrows(IllegalArgumentException.class, () -> f.extrapolateBasic(point, horizon, 5, cyclic, -1));
assertThrows(IllegalArgumentException.class, () -> f.extrapolateBasic(point, horizon, 5, cyclic, 4));
assertThrows(IllegalArgumentException.class, () -> f.extrapolateBasic(point, horizon, 4, cyclic, 44));
}
@Test
public void testExrapolateBasicWithShingleBuilder() {
doNothing().when(forest).extrapolateBasicCyclic(any(RangeVector.class), anyInt(), anyInt(), anyInt(),
any(float[].class), any(int[].class), anyDouble());
doNothing().when(forest).extrapolateBasicSliding(any(RangeVector.class), anyInt(), anyInt(), any(float[].class),
any(int[].class), anyDouble());
ShingleBuilder shingleBuilder = new ShingleBuilder(1, 2, true);
int horizon = 3;
forest.extrapolateBasic(shingleBuilder, horizon);
verify(forest, times(1)).extrapolateBasicCyclic(any(RangeVector.class), eq(horizon), eq(1), eq(0),
any(float[].class), any(int[].class), anyDouble());
shingleBuilder = new ShingleBuilder(1, 2, false);
forest.extrapolateBasic(shingleBuilder, horizon);
verify(forest, times(1)).extrapolateBasicSliding(any(RangeVector.class), eq(horizon), eq(1), any(float[].class),
any(int[].class), anyDouble());
}
@Test
public void testExtrapolateBasicSliding() {
int horizon = 3;
int blockSize = 2;
RangeVector result = new RangeVector(dimensions * horizon);
float[] queryPoint = new float[] { 1.0f, -2.0f };
int[] missingIndexes = new int[blockSize];
doReturn(new SampleSummary(new float[] { 2.0f, -3.0f }))
.doReturn(new SampleSummary(new float[] { 4.0f, -5.0f }))
.doReturn(new SampleSummary(new float[] { 6.0f, -7.0f })).when(forest)
.getConditionalFieldSummary(aryEq(queryPoint), eq(blockSize), any(int[].class), anyDouble());
forest.extrapolateBasicSliding(result, horizon, blockSize, queryPoint, missingIndexes, 1.0);
float[] expectedResult = new float[] { 2.0f, -3.0f, 4.0f, -5.0f, 6.0f, -7.0f };
assertArrayEquals(expectedResult, result.values);
// test properties of RangeVector as well
for (int i = 0; i < 6; i++) {
assert (result.upper[i] >= result.values[i]);
assert (result.lower[i] <= result.values[i]);
}
// validate subsequent operations (typically used in parkservices)
expectedResult[0] = 0f;
RangeVector newVector = new RangeVector(expectedResult);
RangeVector another = new RangeVector(result);
another.shift(0, -2.0f);
another.scale(2, 0.25f);
newVector.scale(2, 0.25f);
assertArrayEquals(newVector.values, another.values, 1e-6f);
for (int i = 0; i < 6; i++) {
assert (another.upper[i] >= another.values[i]);
assert (another.lower[i] <= another.values[i]);
}
}
@Test
public void testExtrapolateBasicCyclic() {
int horizon = 3;
int blockSize = 2;
RangeVector result = new RangeVector(dimensions * horizon);
int shingleIndex = 1;
float[] queryPoint = new float[] { 1.0f, -2.0f };
int[] missingIndexes = new int[blockSize];
doReturn(new SampleSummary(new float[] { 2.0f, -3.0f }))
.doReturn(new SampleSummary(new float[] { 4.0f, -5.0f }))
.doReturn(new SampleSummary(new float[] { 6.0f, -7.0f })).when(forest)
.getConditionalFieldSummary(aryEq(queryPoint), eq(blockSize), any(int[].class), anyDouble());
forest.extrapolateBasicCyclic(result, horizon, blockSize, shingleIndex, queryPoint, missingIndexes, 1.0);
float[] expectedResult = new float[] { -3.0f, 2.0f, -5.0f, 4.0f, -7.0f, 6.0f };
assertArrayEquals(expectedResult, result.values);
// test properties of RangeVector as well
for (int i = 0; i < 6; i++) {
assert (result.upper[i] >= result.values[i]);
assert (result.lower[i] <= result.values[i]);
}
}
@Test
public void testGetNearNeighborInSample() {
List<Long> indexes1 = new ArrayList<>();
indexes1.add(1L);
indexes1.add(3L);
List<Long> indexes2 = new ArrayList<>();
indexes2.add(2L);
indexes2.add(4L);
List<Long> indexes4 = new ArrayList<>();
indexes4.add(1L);
indexes4.add(3L);
List<Long> indexes5 = new ArrayList<>();
indexes5.add(2L);
indexes5.add(4L);
Neighbor neighbor1 = new Neighbor(new float[] { 1, 2 }, 5, indexes1);
when(((SamplerPlusTree<?, ?>) components.get(0)).getTree().traverse(any(float[].class),
any(IVisitorFactory.class))).thenReturn(Optional.of(neighbor1));
Neighbor neighbor2 = new Neighbor(new float[] { 1, 2 }, 5, indexes2);
when(((SamplerPlusTree<?, ?>) components.get(1)).getTree().traverse(any(float[].class),
any(IVisitorFactory.class))).thenReturn(Optional.of(neighbor2));
when(((SamplerPlusTree<?, ?>) components.get(2)).getTree().traverse(any(float[].class),
any(IVisitorFactory.class))).thenReturn(Optional.empty());
Neighbor neighbor4 = new Neighbor(new float[] { 2, 3 }, 4, indexes4);
when(((SamplerPlusTree<?, ?>) components.get(3)).getTree().traverse(any(float[].class),
any(IVisitorFactory.class))).thenReturn(Optional.of(neighbor4));
Neighbor neighbor5 = new Neighbor(new float[] { 2, 3 }, 4, indexes5);
when(((SamplerPlusTree<?, ?>) components.get(4)).getTree().traverse(any(float[].class),
any(IVisitorFactory.class))).thenReturn(Optional.of(neighbor5));
for (int i = 5; i < components.size(); i++) {
when(((SamplerPlusTree<?, ?>) components.get(i)).getTree().traverse(any(float[].class),
any(IVisitorFactory.class))).thenReturn(Optional.empty());
}
Whitebox.setInternalState(forest, "storeSequenceIndexesEnabled", true);
doReturn(true).when(forest).isOutputReady();
List<Neighbor> neighbors = forest.getNearNeighborsInSample(new double[] { 0, 0 }, 5);
List<Long> expectedIndexes = Arrays.asList(1L, 2L, 3L, 4L);
assertEquals(2, neighbors.size());
assertTrue(neighbors.get(0).point[0] == 2 && neighbors.get(0).point[1] == 3);
assertEquals(4, neighbors.get(0).distance);
assertEquals(4, neighbors.get(0).sequenceIndexes.size());
assertThat(neighbors.get(0).sequenceIndexes, is(expectedIndexes));
assertTrue(neighbors.get(1).point[0] == 1 && neighbors.get(1).point[1] == 2);
assertEquals(5, neighbors.get(1).distance);
assertEquals(4, neighbors.get(1).sequenceIndexes.size());
assertThat(neighbors.get(1).sequenceIndexes, is(expectedIndexes));
}
@Test
public void testGetNearNeighborsInSampleBeforeOutputReady() {
assertFalse(forest.isOutputReady());
assertTrue(forest.getNearNeighborsInSample(new double[] { 0.1, 0.2 }, 5.0).isEmpty());
}
@Test
public void testGetNearNeighborsInSampleNoDistanceThreshold() {
forest.getNearNeighborsInSample(new double[] { 0.1, 0.2 });
verify(forest, times(1)).getNearNeighborsInSample(aryEq(new float[] { 0.1f, 0.2f }),
eq(Double.POSITIVE_INFINITY));
}
@Test
public void testGetNearNeighborsInSampleInvalid() {
assertThrows(NullPointerException.class, () -> forest.getNearNeighborsInSample((double[]) null, 101.1));
assertThrows(IllegalArgumentException.class,
() -> forest.getNearNeighborsInSample(new double[] { 1.1, 2.2 }, -101.1));
assertThrows(IllegalArgumentException.class,
() -> forest.getNearNeighborsInSample(new double[] { 1.1, 2.2 }, 0.0));
}
@Test
public void testUpdateOnSmallBoundingBox() {
// verifies on small bounding boxes random cuts and tree updates are functional
RandomCutForest.Builder forestBuilder = RandomCutForest.builder().dimensions(1).numberOfTrees(1).sampleSize(3)
.timeDecay(0.5).randomSeed(0).parallelExecutionEnabled(false);
RandomCutForest forest = forestBuilder.build();
double[][] data = new double[][] { { 48.08 }, { 48.08000000000001 } };
for (int i = 0; i < 20000; i++) {
forest.update(data[i % data.length]);
}
}
@Test
public void testSamplersFull() {
long totalUpdates = sampleSize / 2;
when(updateCoordinator.getTotalUpdates()).thenReturn(totalUpdates);
assertFalse(forest.samplersFull());
totalUpdates = sampleSize;
when(updateCoordinator.getTotalUpdates()).thenReturn(totalUpdates);
assertTrue(forest.samplersFull());
totalUpdates = sampleSize * 10;
when(updateCoordinator.getTotalUpdates()).thenReturn(totalUpdates);
assertTrue(forest.samplersFull());
}
@Test
public void testGetTotalUpdates() {
long totalUpdates = 987654321L;
when(updateCoordinator.getTotalUpdates()).thenReturn(totalUpdates);
assertEquals(totalUpdates, forest.getTotalUpdates());
}
@Test
public void testIsOutputReady() {
assertFalse(forest.isOutputReady());
for (int i = 0; i < numberOfTrees / 2; i++) {
doReturn(true).when(components.get(i)).isOutputReady();
}
assertFalse(forest.isOutputReady());
for (int i = 0; i < numberOfTrees; i++) {
doReturn(true).when(components.get(i)).isOutputReady();
}
assertFalse(forest.isOutputReady());
when(updateCoordinator.getTotalUpdates()).thenReturn((long) sampleSize);
assertTrue(forest.isOutputReady());
// After forest.isOutputReady() returns true once, the result should be cached
for (int i = 0; i < numberOfTrees; i++) {
IComponentModel<?, ?> component = components.get(i);
reset(component);
doReturn(true).when(component).isOutputReady();
}
assertTrue(forest.isOutputReady());
for (int i = 0; i < numberOfTrees; i++) {
IComponentModel<?, ?> component = components.get(i);
verify(component, never()).isOutputReady();
}
}
@Test
public void testUpdateAfterRoundTrip() {
int dimensions = 10;
for (int trials = 0; trials < 10; trials++) {
RandomCutForest forest = RandomCutForest.builder().compact(true).dimensions(dimensions).sampleSize(64)
.build();
Random r = new Random();
for (int i = 0; i < new Random(trials).nextInt(3000); i++) {
forest.update(r.ints(dimensions, 0, 50).asDoubleStream().toArray());
}
// serialize + deserialize
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveExecutorContextEnabled(true);
mapper.setSaveTreeStateEnabled(true);
RandomCutForest forest2 = mapper.toModel(mapper.toState(forest));
// update re-instantiated forest
for (int i = 0; i < 10000; i++) {
double[] point = r.ints(dimensions, 0, 50).asDoubleStream().toArray();
double score = forest.getAnomalyScore(point);
assertEquals(score, forest2.getAnomalyScore(point), 1e-5);
forest2.update(point);
forest.update(point);
}
}
}
@Test
public void testUpdateAfterRoundTripMediumNodeStore() {
int dimensions = 5;
for (int trials = 0; trials < 10; trials++) {
RandomCutForest forest = RandomCutForest.builder().compact(true).dimensions(dimensions).numberOfTrees(1)
.sampleSize(20000).precision(Precision.FLOAT_32).build();
Random r = new Random();
for (int i = 0; i < 30000 + new Random().nextInt(300); i++) {
forest.update(r.ints(dimensions, 0, 50).asDoubleStream().toArray());
}
// serialize + deserialize
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveTreeStateEnabled(true);
mapper.setSaveExecutorContextEnabled(true);
RandomCutForestState state = mapper.toState(forest);
RandomCutForest forest2 = mapper.toModel(state);
// update re-instantiated forest
for (int i = 0; i < 10000; i++) {
double[] point = r.ints(dimensions, 0, 50).asDoubleStream().toArray();
double score = forest.getAnomalyScore(point);
assertEquals(score, forest2.getAnomalyScore(point), 1E-10);
forest2.update(point);
forest.update(point);
}
}
}
@Test
public void testUpdateAfterRoundTripLargeNodeStore() {
int dimensions = 5;
for (int trials = 0; trials < 1; trials++) {
RandomCutForest forest = RandomCutForest.builder().compact(true).dimensions(dimensions).numberOfTrees(1)
.sampleSize(200000).centerOfMassEnabled(true).build();
Random r = new Random();
for (int i = 0; i < 300000 + new Random().nextInt(300); i++) {
forest.update(r.ints(dimensions, 0, 50).asDoubleStream().toArray());
}
// serialize + deserialize
RandomCutForestMapper mapper = new RandomCutForestMapper();
mapper.setSaveTreeStateEnabled(true);
mapper.setSaveExecutorContextEnabled(true);
RandomCutForestState state = mapper.toState(forest);
RandomCutForest forest2 = mapper.toModel(state);
assert (forest2.isCenterOfMassEnabled());
// update re-instantiated forest
for (int i = 0; i < 10000; i++) {
double[] point = r.ints(dimensions, 0, 50).asDoubleStream().toArray();
double score = forest.getAnomalyScore(point);
assertEquals(score, forest2.getAnomalyScore(point), 1E-10);
forest2.update(point);
forest.update(point);
}
}
}
@Test
public void testInternalShinglingRotated() {
RandomCutForest forest = new RandomCutForest.Builder<>().internalShinglingEnabled(true)
.internalRotationEnabled(true).shingleSize(2).dimensions(4).numberOfTrees(1).build();
assertThrows(IllegalArgumentException.class, () -> forest.update(new double[] { 0 }));
forest.update(new double[] { 0.0, -0.0 });
assertArrayEquals(forest.lastShingledPoint(), new float[] { 0, 0, 0, 0 });
forest.update(new double[] { 1.0, -1.0 });
assertArrayEquals(forest.transformIndices(new int[] { 0, 1 }, 2), new int[] { 0, 1 });
forest.update(new double[] { 2.0, -2.0 });
assertEquals(forest.nextSequenceIndex(), 3);
assertArrayEquals(forest.lastShingledPoint(), new float[] { 2, -2, 1, -1 });
assertArrayEquals(forest.transformToShingledPoint(new float[] { 7, 8 }), new float[] { 2, -2, 7, 8 });
assertArrayEquals(forest.transformIndices(new int[] { 0, 1 }, 2), new int[] { 2, 3 });
assertThrows(IllegalArgumentException.class, () -> forest.update(new double[] { 0, 0, 0, 0 }));
}
@Test
public void testComponents() {
RandomCutForest forest = new RandomCutForest.Builder<>().dimensions(2).sampleSize(10).numberOfTrees(2).build();
for (IComponentModel model : forest.getComponents()) {
assertEquals(model.getConfig(Config.BOUNDING_BOX_CACHE_FRACTION), 1.0);
model.getConfig(Config.TIME_DECAY);
assertEquals(model.getConfig(Config.TIME_DECAY), 1.0 / 100);
assertThrows(IllegalArgumentException.class, () -> model.getConfig("foo"));
assertThrows(IllegalArgumentException.class, () -> model.setConfig("bar", 0));
}
}
@Test
public void testOutOfOrderUpdate() {
RandomCutForest forest = new RandomCutForest.Builder<>().dimensions(2).sampleSize(10).numberOfTrees(2).build();
forest.setTimeDecay(100); // will act almost like a sliding window buffer
forest.setBoundingBoxCacheFraction(0.2);
forest.update(new double[] { 20.0, -20.0 }, 20);
forest.update(new double[] { 0.0, -0.0 }, 0);
assertEquals(forest.getNearNeighborsInSample(new double[] { 0.0, -0.0 }, 1).size(), 1);
for (int i = 1; i < 19; i++) {
forest.update(new double[] { i, -i }, i);
}
// the {0,0} point should be flushed out
assertEquals(forest.getNearNeighborsInSample(new double[] { 0.0, -0.0 }, 1).size(), 0);
// the {20,-20} point is present still
assertEquals(forest.getNearNeighborsInSample(new double[] { 20.0, -20.0 }, 1).size(), 1);
}
@Test
public void testFloatingPointRandomCut() {
int dimensions = 16;
int numberOfTrees = 41;
int sampleSize = 64;
long seed = new Random().nextLong();
System.out.println(" seed " + seed);
int dataSize = 4000 * sampleSize;
double[][] big = generateShingledData(dataSize, dimensions, 2);
RandomCutForest forest = RandomCutForest.builder().compact(true).dimensions(dimensions)
.numberOfTrees(numberOfTrees).sampleSize(sampleSize).precision(Precision.FLOAT_32).randomSeed(seed)
.boundingBoxCacheFraction(1.0).build();
int num = 0;
for (double[] point : big) {
forest.update(point);
}
}
public static double[][] generateShingledData(int size, int dimensions, long seed) {
double[][] answer = new double[size][];
int entryIndex = 0;
boolean filledShingleAtleastOnce = false;
double[] history = new double[dimensions];
int count = 0;
double[] data = getDataD(size + dimensions - 1, 100, 5, seed);
for (int j = 0; j < size + dimensions - 1; ++j) { // we stream here ....
history[entryIndex] = data[j];
entryIndex = (entryIndex + 1) % dimensions;
if (entryIndex == 0) {
filledShingleAtleastOnce = true;
}
if (filledShingleAtleastOnce) {
// System.out.println("Adding " + j);
answer[count++] = getShinglePoint(history, entryIndex, dimensions);
}
}
return answer;
}
private static double[] getShinglePoint(double[] recentPointsSeen, int indexOfOldestPoint, int shingleLength) {
double[] shingledPoint = new double[shingleLength];
int i = 0;
for (int j = 0; j < shingleLength; ++j) {
double point = recentPointsSeen[(j + indexOfOldestPoint) % shingleLength];
shingledPoint[i++] = point;
}
return shingledPoint;
}
static double[] getDataD(int num, double amplitude, double noise, long seed) {
double[] data = new double[num];
Random noiseprg = new Random(seed);
for (int i = 0; i < num; i++) {
data[i] = amplitude * Math.cos(2 * PI * (i + 50) / 1000) + noise * noiseprg.nextDouble();
}
return data;
}
}
| 398 |
0 | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest | Create_ds/random-cut-forest-by-aws/Java/core/src/test/java/com/amazon/randomcutforest/tree/HyperTreeTest.java | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazon.randomcutforest.tree;
import static com.amazon.randomcutforest.CommonUtils.checkArgument;
import static com.amazon.randomcutforest.CommonUtils.toFloatArray;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import com.amazon.randomcutforest.CommonUtils;
import com.amazon.randomcutforest.RandomCutForest;
import com.amazon.randomcutforest.VisitorFactory;
import com.amazon.randomcutforest.anomalydetection.TransductiveScalarScoreVisitor;
import com.amazon.randomcutforest.store.PointStore;
import com.amazon.randomcutforest.testutils.NormalMixtureTestData;
public class HyperTreeTest {
private static int numberOfTrees;
private static int sampleSize;
private static int dimensions;
private static int randomSeed;
private static double baseMu;
private static double baseSigma;
private static double anomalyMu;
private static double anomalySigma;
private static double transitionToAnomalyProbability;
private static double transitionToBaseProbability;
private static int dataSize;
private static NormalMixtureTestData generator;
private static int numTrials = 5;
private static int numTest = 5;
public static Function<IBoundingBoxView, double[]> LAlphaSeparation(final double alpha) {
return (IBoundingBoxView boundingBox) -> {
double[] answer = new double[boundingBox.getDimensions()];
for (int i = 0; i < boundingBox.getDimensions(); ++i) {
double maxVal = boundingBox.getMaxValue(i);
double minVal = boundingBox.getMinValue(i);
double oldRange = maxVal - minVal;
if (oldRange > 0) {
if (alpha == 0)
answer[i] = 1.0;
else
answer[i] = Math.pow(oldRange, alpha);
}
}
return answer;
};
}
public static Function<IBoundingBoxView, double[]> GTFSeparation(final double gauge) {
return (IBoundingBoxView boundingBox) -> {
double[] answer = new double[boundingBox.getDimensions()];
for (int i = 0; i < boundingBox.getDimensions(); ++i) {
double maxVal = boundingBox.getMaxValue(i);
double minVal = boundingBox.getMinValue(i);
double oldRange = maxVal - minVal;
if (oldRange > 0) {
answer[i] = Math.log(1 + oldRange / gauge);
}
}
return answer;
};
}
class HyperForest {
int dimensions;
int seed;
Random random;
int sampleSize;
int numberOfTrees;
PointStore pointStore;
ArrayList<HyperTree> trees;
public HyperForest(int dimensions, int numberOfTrees, int sampleSize, int seed,
Function<IBoundingBoxView, double[]> vecSeparation) {
this.numberOfTrees = numberOfTrees;
this.seed = seed;
this.sampleSize = sampleSize;
this.dimensions = dimensions;
pointStore = PointStore.builder().capacity(numberOfTrees * sampleSize).dimensions(dimensions).shingleSize(1)
.build();
trees = new ArrayList<>();
random = new Random(seed);
for (int i = 0; i < numberOfTrees; i++) {
trees.add(new HyperTree.Builder().pointStoreView(pointStore).dimension(dimensions)
.buildGVec(vecSeparation).randomSeed(random.nextInt()).build());
}
}
// displacement scoring (multiplied by the normalizer log_2(treesize)) on the
// fly !!
// as introduced in Robust Random Cut Forest Based Anomaly Detection in Streams
// @ICML 2016. This does not address co-displacement (duplicity).
// seen function is (x,y) -> 1 which basically ignores everything
// unseen function is (x,y) -> y which corresponds to mass of sibling
// damp function is (x,y) -> 1 which is no dampening
public double getDisplacementScore(float[] point) {
return getDynamicScore(point, (x, y) -> 1.0, (x, y) -> y, (x, y) -> 1.0);
}
// Expected height (multiplied by the normalizer log_2(treesize) ) scoring on
// the fly !!
// seen function is (x,y) -> x+log(Y)/log(2) which depth + duplicity converted
// to depth
// unseen function is (x,y) -> x which is depth
// damp function is (x,y) -> 1 which is no dampening
// note that this is *NOT* anything like the expected height in
// Isolation Forest/Random Forest algorithms, because here
// the Expected height takes into account the contrafactual
// that "what would have happened had the point been available during
// the construction of the forest"
public double getHeightScore(float[] point) {
return getDynamicScore(point, (x, y) -> 1.0 * (x + Math.log(y)), (x, y) -> 1.0 * x, (x, y) -> 1.0);
}
public double getAnomalyScore(float[] point) {
return getDynamicScore(point, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction);
}
public double getDynamicScore(float[] point, BiFunction<Double, Double, Double> seen,
BiFunction<Double, Double, Double> unseen, BiFunction<Double, Double, Double> newDamp) {
checkArgument(dimensions == point.length, "incorrect dimensions");
VisitorFactory<Double> visitorFactory = new VisitorFactory<>(
(tree, y) -> new TransductiveScalarScoreVisitor(tree.projectToTree(y), tree.getMass(), seen, unseen,
newDamp, ((HyperTree) tree).getgVec()));
BinaryOperator<Double> accumulator = Double::sum;
Function<Double, Double> finisher = sum -> sum / numberOfTrees;
return trees.parallelStream().map(tree -> tree.traverse(point, visitorFactory)).reduce(accumulator)
.map(finisher).orElseThrow(() -> new IllegalStateException("accumulator returned an empty result"));
}
void makeForest(double[][] pointList, int prefix) {
for (int i = 0; i < pointList.length; i++) {
if (pointList[i].length != dimensions) {
throw new IllegalArgumentException("Points have incorrect dimensions");
}
}
boolean[][] status = new boolean[numberOfTrees + 1][pointList.length];
for (int i = 0; i < numberOfTrees; i++) {
int y = 0;
while (y < sampleSize) {
int z = random.nextInt(prefix);
if (!status[i][z]) {
status[i + 1][z] = true;
status[0][z] = true; // will compute union across trees
++y;
}
}
}
int[] reference = new int[pointList.length];
List<Integer>[] lists = new List[numberOfTrees];
for (int i = 0; i < numberOfTrees; i++) {
lists[i] = new ArrayList<>();
}
for (int i = 0; i < pointList.length; i++) {
if (status[0][i]) {
reference[i] = pointStore.add(toFloatArray(pointList[i]), 0L);
for (int j = 0; j < numberOfTrees; j++) {
if (status[j + 1][i]) {
lists[j].add(reference[i]);
}
}
}
;
}
for (int i = 0; i < numberOfTrees; i++) {
trees.get(i).makeTree(lists[i], random.nextInt());
}
}
}
// ===========================================================
public static double getSimulatedAnomalyScore(RandomCutForest forest, float[] point,
Function<IBoundingBoxView, double[]> gVec) {
return forest.getDynamicSimulatedScore(point, CommonUtils::defaultScoreSeenFunction,
CommonUtils::defaultScoreUnseenFunction, CommonUtils::defaultDampFunction, gVec);
}
public static double getSimulatedHeightScore(RandomCutForest forest, float[] point,
Function<IBoundingBoxView, double[]> gvec) {
return forest.getDynamicSimulatedScore(point, (x, y) -> 1.0 * (x + Math.log(y)), (x, y) -> 1.0 * x,
(x, y) -> 1.0, gvec);
}
public static double getSimulatedDisplacementScore(RandomCutForest forest, float[] point,
Function<IBoundingBoxView, double[]> gvec) {
return forest.getDynamicSimulatedScore(point, (x, y) -> 1.0, (x, y) -> y, (x, y) -> 1.0, gvec);
}
// ===========================================================
@BeforeAll
public static void setup() {
dataSize = 2000;
numberOfTrees = 1; // this is a tree test
sampleSize = 256;
dimensions = 30;
baseMu = 0.0;
baseSigma = 1.0;
anomalyMu = 0.0;
anomalySigma = 1.5;
transitionToAnomalyProbability = 0.0;
// ignoring anomaly cluster for now
transitionToBaseProbability = 1.0;
generator = new NormalMixtureTestData(baseMu, baseSigma, anomalyMu, anomalySigma,
transitionToAnomalyProbability, transitionToBaseProbability);
}
private class TestScores {
double sumCenterScore = 0;
double sumCenterDisp = 0;
double sumCenterHeight = 0;
double sumLeftScore = 0;
double sumRightScore = 0;
double sumLeftDisp = 0;
double sumRightDisp = 0;
double sumLeftHeight = 0;
double sumRightHeight = 0;
}
public static void runRCF(TestScores testScore, Function<IBoundingBoxView, double[]> gVec) {
Random prg = new Random(randomSeed);
for (int trials = 0; trials < numTrials; trials++) {
double[][] data = generator.generateTestData(dataSize + numTest, dimensions, 100 + trials);
RandomCutForest newForest = RandomCutForest.builder().dimensions(dimensions).numberOfTrees(numberOfTrees)
.sampleSize(sampleSize).randomSeed(prg.nextInt()).build();
for (int i = 0; i < dataSize; i++) {
// shrink, shift at random
for (int j = 0; j < dimensions; j++)
data[i][j] *= 0.01;
if (prg.nextDouble() < 0.5)
data[i][0] += 5.0;
else
data[i][0] -= 5.0;
newForest.update(data[i]);
// the points are streamed
}
for (int i = dataSize; i < dataSize + numTest; i++) {
for (int j = 0; j < dimensions; j++)
data[i][j] *= 0.01;
testScore.sumCenterScore += getSimulatedAnomalyScore(newForest, toFloatArray(data[i]), gVec);
testScore.sumCenterHeight += getSimulatedHeightScore(newForest, toFloatArray(data[i]), gVec);
testScore.sumCenterDisp += getSimulatedDisplacementScore(newForest, toFloatArray(data[i]), gVec);
data[i][0] += 5; // move to right cluster
testScore.sumRightScore += getSimulatedAnomalyScore(newForest, toFloatArray(data[i]), gVec);
testScore.sumRightHeight += getSimulatedHeightScore(newForest, toFloatArray(data[i]), gVec);
testScore.sumRightDisp += getSimulatedDisplacementScore(newForest, toFloatArray(data[i]), gVec);
data[i][0] -= 10; // move to left cluster
testScore.sumLeftScore += getSimulatedAnomalyScore(newForest, toFloatArray(data[i]), gVec);
testScore.sumLeftHeight += getSimulatedHeightScore(newForest, toFloatArray(data[i]), gVec);
testScore.sumLeftDisp += getSimulatedDisplacementScore(newForest, toFloatArray(data[i]), gVec);
}
}
assert (testScore.sumCenterScore > 2 * testScore.sumLeftScore);
assert (testScore.sumCenterScore > 2 * testScore.sumRightScore);
assert (testScore.sumCenterDisp > 10 * testScore.sumLeftDisp);
assert (testScore.sumCenterDisp > 10 * testScore.sumRightDisp);
assert (2 * testScore.sumCenterHeight < testScore.sumLeftHeight);
assert (2 * testScore.sumCenterHeight < testScore.sumRightHeight);
}
public void runGTFLAlpha(TestScores testScore, boolean flag, double gaugeOrAlpha) {
Random prg = new Random(randomSeed);
for (int trials = 0; trials < numTrials; trials++) {
double[][] data = generator.generateTestData(dataSize + numTest, dimensions, 100 + trials);
HyperForest newForest;
if (flag)
newForest = new HyperForest(dimensions, numberOfTrees, sampleSize, prg.nextInt(),
GTFSeparation(gaugeOrAlpha));
else
newForest = new HyperForest(dimensions, numberOfTrees, sampleSize, prg.nextInt(),
LAlphaSeparation(gaugeOrAlpha));
for (int i = 0; i < dataSize; i++) {
// shrink, shift at random
for (int j = 0; j < dimensions; j++)
data[i][j] *= 0.01;
if (prg.nextDouble() < 0.5)
data[i][0] += 5.0;
else
data[i][0] -= 5.0;
}
newForest.makeForest(data, dataSize);
for (int i = dataSize; i < dataSize + numTest; i++) {
for (int j = 0; j < dimensions; j++)
data[i][j] *= 0.01;
testScore.sumCenterScore += newForest.getAnomalyScore(toFloatArray(data[i]));
testScore.sumCenterHeight += newForest.getHeightScore(toFloatArray(data[i]));
testScore.sumCenterDisp += newForest.getDisplacementScore(toFloatArray(data[i]));
data[i][0] += 5; // move to right cluster
testScore.sumRightScore += newForest.getAnomalyScore(toFloatArray(data[i]));
testScore.sumRightHeight += newForest.getHeightScore(toFloatArray(data[i]));
testScore.sumRightDisp += newForest.getDisplacementScore(toFloatArray(data[i]));
data[i][0] -= 10; // move to left cluster
testScore.sumLeftScore += newForest.getAnomalyScore(toFloatArray(data[i]));
testScore.sumLeftHeight += newForest.getHeightScore(toFloatArray(data[i]));
testScore.sumLeftDisp += newForest.getDisplacementScore(toFloatArray(data[i]));
}
}
assert (testScore.sumCenterScore > 1.5 * testScore.sumLeftScore);
assert (testScore.sumCenterScore > 1.5 * testScore.sumRightScore);
assert (testScore.sumCenterDisp > 10 * testScore.sumLeftDisp);
assert (testScore.sumCenterDisp > 10 * testScore.sumRightDisp);
assert (1.5 * testScore.sumCenterHeight < testScore.sumLeftHeight);
assert (1.5 * testScore.sumCenterHeight < testScore.sumRightHeight);
}
public void simulateGTFLAlpha(TestScores testScore, boolean flag, double gaugeOrAlpha) {
Function<IBoundingBoxView, double[]> gVec = LAlphaSeparation(gaugeOrAlpha);
if (flag)
gVec = GTFSeparation(gaugeOrAlpha);
runRCF(testScore, gVec);
}
@Test
public void GaugeTransductiveForestTest() {
TestScores testScoreA = new TestScores();
runGTFLAlpha(testScoreA, true, 1);
TestScores testScoreB = new TestScores();
simulateGTFLAlpha(testScoreB, true, 1);
}
@Test
public void LAlphaForestTest() {
TestScores testScoreA = new TestScores();
runGTFLAlpha(testScoreA, false, 0.5);
TestScores testScoreB = new TestScores();
simulateGTFLAlpha(testScoreB, false, 0.5);
}
}
| 399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.