text
stringlengths 7
1.01M
|
|---|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.eoi.jax.flink.job.process;
import com.eoi.jax.api.annotation.Parameter;
import java.io.Serializable;
public class RenameFieldsDef implements Serializable {
@Parameter(
label = "原始列",
description = "重命名前列的名称"
)
private String oldField;
@Parameter(
label = "新列",
description = "重命名后列的名称。如果该列名已存在,那么原来的列会被覆盖"
)
private String newField;
@Parameter(
label = "是否覆盖",
description = "如果添加的字段已存在,是否覆盖原先的值",
defaultValue = "false"
)
private boolean isReplace;
public String getOldField() {
return oldField;
}
public void setOldField(String oldField) {
this.oldField = oldField;
}
public String getNewField() {
return newField;
}
public void setNewField(String newField) {
this.newField = newField;
}
public boolean isReplace() {
return isReplace;
}
public void setReplace(boolean replace) {
isReplace = replace;
}
}
|
/*
* openTCS copyright information:
* Copyright (c) 2005-2011 ifak e.V.
* Copyright (c) 2012 Fraunhofer IML
*
* This program is free software and subject to the MIT license. (For details,
* see the licensing information (LICENSE.txt) you should have received with
* this copy of the software.)
*/
package org.opentcs.guing.components.drawing.course;
import java.awt.Point;
import java.awt.geom.Point2D;
/**
* Eine Strategie mit der Pixelkoordinaten in tatsächliche Koordinaten
* umgewandelt werden können. Repräsentiert ein normales Koordinatensystem, bei
* dem der erste Quadrant in der rechten oberen Ecke liegt. Wichtig ist bei der
* Umrechnung der aktuelle Maßstab (Zoomfaktor) sowie die Position des
* Referenzpunktes.
*
* @author Sebastian Naumann (ifak e.V. Magdeburg)
*/
public class NormalCoordinateSystem
implements CoordinateSystem {
/**
* Creates a new instance of StandardPositionStrategy
*/
public NormalCoordinateSystem() {
}
@Override
public Point2D toPixel(Point refPointLocation, Point2D realValue, double scaleX, double scaleY) {
double xPixel = realValue.getX() / scaleX;
double yPixel = realValue.getY() / scaleY;
// Vorzeichen für y negativ (y-Achse zeigt nach oben)
return new Point2D.Double(refPointLocation.x + xPixel, -(refPointLocation.y + yPixel));
}
@Override
public Point2D toReal(Point refPointLocation, Point pixelValue, double scaleX, double scaleY) {
int xDiff = pixelValue.x - refPointLocation.x;
int yDiff = pixelValue.y - refPointLocation.y;
// Vorzeichen für y negativ (y-Achse zeigt nach oben)
return new Point2D.Double(scaleX * xDiff, -scaleY * yDiff);
}
}
|
/**
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
*
* The Apereo Foundation licenses this file to you under the Educational
* Community License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License
* at:
*
* http://opensource.org/licenses/ecl2.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package org.opencastproject.caption.impl;
import static org.opencastproject.util.MimeType.mimeType;
import org.opencastproject.caption.api.Caption;
import org.opencastproject.caption.api.CaptionConverter;
import org.opencastproject.caption.api.CaptionConverterException;
import org.opencastproject.caption.api.CaptionService;
import org.opencastproject.caption.api.UnsupportedCaptionFormatException;
import org.opencastproject.job.api.AbstractJobProducer;
import org.opencastproject.job.api.Job;
import org.opencastproject.mediapackage.MediaPackageElement;
import org.opencastproject.mediapackage.MediaPackageElementBuilder;
import org.opencastproject.mediapackage.MediaPackageElementBuilderFactory;
import org.opencastproject.mediapackage.MediaPackageElementFlavor;
import org.opencastproject.mediapackage.MediaPackageElementParser;
import org.opencastproject.mediapackage.MediaPackageException;
import org.opencastproject.security.api.OrganizationDirectoryService;
import org.opencastproject.security.api.SecurityService;
import org.opencastproject.security.api.UserDirectoryService;
import org.opencastproject.serviceregistry.api.ServiceRegistry;
import org.opencastproject.serviceregistry.api.ServiceRegistryException;
import org.opencastproject.util.IoSupport;
import org.opencastproject.util.LoadUtil;
import org.opencastproject.util.NotFoundException;
import org.opencastproject.workspace.api.Workspace;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.osgi.framework.InvalidSyntaxException;
import org.osgi.framework.ServiceReference;
import org.osgi.service.cm.ConfigurationException;
import org.osgi.service.cm.ManagedService;
import org.osgi.service.component.ComponentContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.Dictionary;
import java.util.HashMap;
import java.util.List;
import javax.activation.FileTypeMap;
/**
* Implementation of {@link CaptionService}. Uses {@link ComponentContext} to get all registered
* {@link CaptionConverter}s. Converters are searched based on <code>caption.format</code> property. If there is no
* match for specified input or output format {@link UnsupportedCaptionFormatException} is thrown.
*
*/
public class CaptionServiceImpl extends AbstractJobProducer implements CaptionService, ManagedService {
/**
* Creates a new caption service.
*/
public CaptionServiceImpl() {
super(JOB_TYPE);
}
/** Logging utility */
private static final Logger logger = LoggerFactory.getLogger(CaptionServiceImpl.class);
/** List of available operations on jobs */
private enum Operation {
Convert, ConvertWithLanguage
};
/** The collection name */
public static final String COLLECTION = "captions";
/** The load introduced on the system by creating a caption job */
public static final float DEFAULT_CAPTION_JOB_LOAD = 0.1f;
/** The key to look for in the service configuration file to override the {@link DEFAULT_CAPTION_JOB_LOAD} */
public static final String CAPTION_JOB_LOAD_KEY = "job.load.caption";
/** The load introduced on the system by creating a caption job */
private float captionJobLoad = DEFAULT_CAPTION_JOB_LOAD;
/** Reference to workspace */
protected Workspace workspace;
/** Reference to remote service manager */
protected ServiceRegistry serviceRegistry;
/** The security service */
protected SecurityService securityService = null;
/** The user directory service */
protected UserDirectoryService userDirectoryService = null;
/** The organization directory service */
protected OrganizationDirectoryService organizationDirectoryService = null;
/** Component context needed for retrieving Converter Engines */
protected ComponentContext componentContext = null;
/**
* Activate this service implementation via the OSGI service component runtime.
*
* @param componentContext
* the component context
*/
@Override
public void activate(ComponentContext componentContext) {
super.activate(componentContext);
this.componentContext = componentContext;
}
/**
* {@inheritDoc}
*
* @see org.opencastproject.caption.api.CaptionService#convert(org.opencastproject.mediapackage.MediaPackageElement,
* java.lang.String, java.lang.String)
*/
@Override
public Job convert(MediaPackageElement input, String inputFormat, String outputFormat)
throws UnsupportedCaptionFormatException,
CaptionConverterException, MediaPackageException {
if (input == null)
throw new IllegalArgumentException("Input catalog can't be null");
if (StringUtils.isBlank(inputFormat))
throw new IllegalArgumentException("Input format is null");
if (StringUtils.isBlank(outputFormat))
throw new IllegalArgumentException("Output format is null");
try {
return serviceRegistry.createJob(JOB_TYPE, Operation.Convert.toString(),
Arrays.asList(MediaPackageElementParser.getAsXml(input), inputFormat, outputFormat), captionJobLoad);
} catch (ServiceRegistryException e) {
throw new CaptionConverterException("Unable to create a job", e);
}
}
/**
* {@inheritDoc}
*
* @see org.opencastproject.caption.api.CaptionService#convert(org.opencastproject.mediapackage.MediaPackageElement,
* java.lang.String, java.lang.String, java.lang.String)
*/
@Override
public Job convert(MediaPackageElement input, String inputFormat, String outputFormat, String language)
throws UnsupportedCaptionFormatException, CaptionConverterException, MediaPackageException {
if (input == null)
throw new IllegalArgumentException("Input catalog can't be null");
if (StringUtils.isBlank(inputFormat))
throw new IllegalArgumentException("Input format is null");
if (StringUtils.isBlank(outputFormat))
throw new IllegalArgumentException("Output format is null");
if (StringUtils.isBlank(language))
throw new IllegalArgumentException("Language format is null");
try {
return serviceRegistry.createJob(JOB_TYPE, Operation.ConvertWithLanguage.toString(),
Arrays.asList(MediaPackageElementParser.getAsXml(input), inputFormat, outputFormat, language), captionJobLoad);
} catch (ServiceRegistryException e) {
throw new CaptionConverterException("Unable to create a job", e);
}
}
/**
* Converts the captions and returns them in a new catalog.
*
* @return the converted catalog
*/
protected MediaPackageElement convert(Job job, MediaPackageElement input, String inputFormat, String outputFormat,
String language)
throws UnsupportedCaptionFormatException, CaptionConverterException, MediaPackageException {
try {
// check parameters
if (input == null)
throw new IllegalArgumentException("Input element can't be null");
if (StringUtils.isBlank(inputFormat))
throw new IllegalArgumentException("Input format is null");
if (StringUtils.isBlank(outputFormat))
throw new IllegalArgumentException("Output format is null");
// get input file
File captionsFile;
try {
captionsFile = workspace.get(input.getURI());
} catch (NotFoundException e) {
throw new CaptionConverterException("Requested media package element " + input + " could not be found.");
} catch (IOException e) {
throw new CaptionConverterException("Requested media package element " + input + "could not be accessed.");
}
logger.debug("Atempting to convert from {} to {}...", inputFormat, outputFormat);
List<Caption> collection = null;
try {
collection = importCaptions(captionsFile, inputFormat, language);
logger.debug("Parsing to collection succeeded.");
} catch (UnsupportedCaptionFormatException e) {
throw new UnsupportedCaptionFormatException(inputFormat);
} catch (CaptionConverterException e) {
throw e;
}
URI exported;
try {
exported = exportCaptions(collection,
job.getId() + "." + FilenameUtils.getExtension(captionsFile.getAbsolutePath()), outputFormat, language);
logger.debug("Exporting captions succeeding.");
} catch (UnsupportedCaptionFormatException e) {
throw new UnsupportedCaptionFormatException(outputFormat);
} catch (IOException e) {
throw new CaptionConverterException("Could not export caption collection.", e);
}
// create catalog and set properties
CaptionConverter converter = getCaptionConverter(outputFormat);
MediaPackageElementBuilder elementBuilder = MediaPackageElementBuilderFactory.newInstance().newElementBuilder();
MediaPackageElement mpe = elementBuilder.elementFromURI(exported, converter.getElementType(),
new MediaPackageElementFlavor(
"captions", outputFormat + (language == null ? "" : "+" + language)));
if (mpe.getMimeType() == null) {
String[] mimetype = FileTypeMap.getDefaultFileTypeMap().getContentType(exported.getPath()).split("/");
mpe.setMimeType(mimeType(mimetype[0], mimetype[1]));
}
if (language != null)
mpe.addTag("lang:" + language);
return mpe;
} catch (Exception e) {
logger.warn("Error converting captions in " + input, e);
if (e instanceof CaptionConverterException) {
throw (CaptionConverterException) e;
} else if (e instanceof UnsupportedCaptionFormatException) {
throw (UnsupportedCaptionFormatException) e;
} else {
throw new CaptionConverterException(e);
}
}
}
/**
*
* {@inheritDoc}
*
*/
@Override
public String[] getLanguageList(MediaPackageElement input, String format) throws UnsupportedCaptionFormatException,
CaptionConverterException {
if (format == null) {
throw new UnsupportedCaptionFormatException("<null>");
}
CaptionConverter converter = getCaptionConverter(format);
if (converter == null) {
throw new UnsupportedCaptionFormatException(format);
}
File captions;
try {
captions = workspace.get(input.getURI());
} catch (NotFoundException e) {
throw new CaptionConverterException("Requested media package element " + input + " could not be found.");
} catch (IOException e) {
throw new CaptionConverterException("Requested media package element " + input + "could not be accessed.");
}
FileInputStream stream = null;
String[] languageList;
try {
stream = new FileInputStream(captions);
languageList = converter.getLanguageList(stream);
} catch (FileNotFoundException e) {
throw new CaptionConverterException("Requested file " + captions + "could not be found.");
} finally {
IoSupport.closeQuietly(stream);
}
return languageList == null ? new String[0] : languageList;
}
/**
* Returns all registered CaptionFormats.
*/
protected HashMap<String, CaptionConverter> getAvailableCaptionConverters() {
HashMap<String, CaptionConverter> captionConverters = new HashMap<String, CaptionConverter>();
ServiceReference[] refs = null;
try {
refs = componentContext.getBundleContext().getServiceReferences(CaptionConverter.class.getName(), null);
} catch (InvalidSyntaxException e) {
// should not happen since it is called with null argument
}
if (refs != null) {
for (ServiceReference ref : refs) {
CaptionConverter converter = (CaptionConverter) componentContext.getBundleContext().getService(ref);
String format = (String) ref.getProperty("caption.format");
if (captionConverters.containsKey(format)) {
logger.warn("Caption converter with format {} has already been registered. Ignoring second definition.",
format);
} else {
captionConverters.put((String) ref.getProperty("caption.format"), converter);
}
}
}
return captionConverters;
}
/**
* Returns specific {@link CaptionConverter}. Registry is searched based on formatName, so in order for
* {@link CaptionConverter} to be found, it has to have <code>caption.format</code> property set with
* {@link CaptionConverter} format. If none is found, null is returned, if more than one is found then the first
* reference is returned.
*
* @param formatName
* name of the caption format
* @return {@link CaptionConverter} or null if none is found
*/
protected CaptionConverter getCaptionConverter(String formatName) {
ServiceReference[] ref = null;
try {
ref = componentContext.getBundleContext().getServiceReferences(CaptionConverter.class.getName(),
"(caption.format=" + formatName + ")");
} catch (InvalidSyntaxException e) {
throw new RuntimeException(e);
}
if (ref == null) {
logger.warn("No caption format available for {}.", formatName);
return null;
}
if (ref.length > 1)
logger.warn("Multiple references for caption format {}! Returning first service reference.", formatName);
CaptionConverter converter = (CaptionConverter) componentContext.getBundleContext().getService(ref[0]);
return converter;
}
/**
* Imports captions using registered converter engine and specified language.
*
* @param input
* file containing captions
* @param inputFormat
* format of imported captions
* @param language
* (optional) captions' language
* @return {@link List} of parsed captions
* @throws UnsupportedCaptionFormatException
* if there is no registered engine for given format
* @throws IllegalCaptionFormatException
* if parser encounters exception
*/
private List<Caption> importCaptions(File input, String inputFormat, String language)
throws UnsupportedCaptionFormatException, CaptionConverterException {
// get input format
CaptionConverter converter = getCaptionConverter(inputFormat);
if (converter == null) {
logger.error("No available caption format found for {}.", inputFormat);
throw new UnsupportedCaptionFormatException(inputFormat);
}
FileInputStream fileStream = null;
try {
fileStream = new FileInputStream(input);
List<Caption> collection = converter.importCaption(fileStream, language);
return collection;
} catch (FileNotFoundException e) {
throw new CaptionConverterException("Could not locate file " + input);
} finally {
IOUtils.closeQuietly(fileStream);
}
}
/**
* Exports captions {@link List} to specified format. Extension is added to exported file name. Throws
* {@link UnsupportedCaptionFormatException} if format is not supported.
*
* @param captions
* {@link {@link List} to be exported
* @param outputName
* name under which exported captions will be stored
* @param outputFormat
* format of exported collection
* @param language
* (optional) captions' language
* @throws UnsupportedCaptionFormatException
* if there is no registered engine for given format
* @return location of converted captions
* @throws IOException
* if exception occurs while writing to output stream
*/
private URI exportCaptions(List<Caption> captions, String outputName, String outputFormat, String language)
throws UnsupportedCaptionFormatException, IOException {
CaptionConverter converter = getCaptionConverter(outputFormat);
if (converter == null) {
logger.error("No available caption format found for {}.", outputFormat);
throw new UnsupportedCaptionFormatException(outputFormat);
}
// TODO instead of first writing it all in memory, write it directly to disk
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
try {
converter.exportCaption(outputStream, captions, language);
} catch (IOException e) {
// since we're writing to memory, this should not happen
}
ByteArrayInputStream in = new ByteArrayInputStream(outputStream.toByteArray());
return workspace.putInCollection(COLLECTION, outputName + "." + converter.getExtension(), in);
}
/**
* {@inheritDoc}
*
* @see org.opencastproject.job.api.AbstractJobProducer#process(Job)
*/
@Override
protected String process(Job job) throws Exception {
Operation op = null;
String operation = job.getOperation();
List<String> arguments = job.getArguments();
try {
op = Operation.valueOf(operation);
MediaPackageElement catalog = MediaPackageElementParser.getFromXml(arguments.get(0));
String inputFormat = arguments.get(1);
String outputFormat = arguments.get(2);
MediaPackageElement resultingCatalog = null;
switch (op) {
case Convert:
resultingCatalog = convert(job, catalog, inputFormat, outputFormat, null);
return MediaPackageElementParser.getAsXml(resultingCatalog);
case ConvertWithLanguage:
String language = arguments.get(3);
resultingCatalog = convert(job, catalog, inputFormat, outputFormat, language);
return MediaPackageElementParser.getAsXml(resultingCatalog);
default:
throw new IllegalStateException("Don't know how to handle operation '" + operation + "'");
}
} catch (IllegalArgumentException e) {
throw new ServiceRegistryException("This service can't handle operations of type '" + op + "'", e);
} catch (IndexOutOfBoundsException e) {
throw new ServiceRegistryException("This argument list for operation '" + op + "' does not meet expectations", e);
} catch (Exception e) {
throw new ServiceRegistryException("Error handling operation '" + op + "'", e);
}
}
/**
* Setter for workspace via declarative activation
*/
protected void setWorkspace(Workspace workspace) {
this.workspace = workspace;
}
/**
* Setter for remote service manager via declarative activation
*/
protected void setServiceRegistry(ServiceRegistry serviceRegistry) {
this.serviceRegistry = serviceRegistry;
}
/**
* Callback for setting the security service.
*
* @param securityService
* the securityService to set
*/
public void setSecurityService(SecurityService securityService) {
this.securityService = securityService;
}
/**
* Callback for setting the user directory service.
*
* @param userDirectoryService
* the userDirectoryService to set
*/
public void setUserDirectoryService(UserDirectoryService userDirectoryService) {
this.userDirectoryService = userDirectoryService;
}
/**
* Sets a reference to the organization directory service.
*
* @param organizationDirectory
* the organization directory
*/
public void setOrganizationDirectoryService(OrganizationDirectoryService organizationDirectory) {
this.organizationDirectoryService = organizationDirectory;
}
/**
* {@inheritDoc}
*
* @see org.opencastproject.job.api.AbstractJobProducer#getSecurityService()
*/
@Override
protected SecurityService getSecurityService() {
return securityService;
}
/**
* {@inheritDoc}
*
* @see org.opencastproject.job.api.AbstractJobProducer#getOrganizationDirectoryService()
*/
@Override
protected OrganizationDirectoryService getOrganizationDirectoryService() {
return organizationDirectoryService;
}
/**
* {@inheritDoc}
*
* @see org.opencastproject.job.api.AbstractJobProducer#getUserDirectoryService()
*/
@Override
protected UserDirectoryService getUserDirectoryService() {
return userDirectoryService;
}
/**
* {@inheritDoc}
*
* @see org.opencastproject.job.api.AbstractJobProducer#getServiceRegistry()
*/
@Override
protected ServiceRegistry getServiceRegistry() {
return serviceRegistry;
}
@Override
public void updated(@SuppressWarnings("rawtypes") Dictionary properties) throws ConfigurationException {
captionJobLoad = LoadUtil.getConfiguredLoadValue(properties, CAPTION_JOB_LOAD_KEY, DEFAULT_CAPTION_JOB_LOAD, serviceRegistry);
}
}
|
package com.djekgrif.alternativeradio.common.events;
/**
* Created by djek-grif on 6/8/17.
*/
public class UpdateConfigurationDataEvent {
}
|
package org.jeffpiazza.derby;
import jssc.*;
import org.jeffpiazza.derby.devices.TimerDevice;
import org.jeffpiazza.derby.gui.TimerGui;
import javax.swing.*;
import org.jeffpiazza.derby.devices.AllDeviceTypes;
import org.jeffpiazza.derby.devices.NewBoldDevice;
import org.jeffpiazza.derby.devices.SimulatedDevice;
import org.jeffpiazza.derby.devices.SmartLineDevice;
import org.jeffpiazza.derby.devices.TheJudgeDevice;
import org.jeffpiazza.derby.devices.TimerDeviceCommon;
import org.jeffpiazza.derby.devices.TimerTask;
import org.jeffpiazza.derby.serialport.PlaybackSerialPortWrapper;
// Three threads for three "actors":
// timer polling loop runs on main thread,
// HttpTask runs on another thread,
// GUI event dispatch runs on a third thread.
public class TimerMain {
public static void usage() {
System.err.println("Usage: [options] <base-url>");
System.err.println(" -h or -help or --help: This message");
System.err.println(" -v: Show version");
System.err.println(" -x: Run headless, without gui.");
System.out.println(" -logdir <directory>: write log files in <directory>");
System.out.println(" instead of the current directory.");
System.err.println(" -t: Trace non-heartbeat messages sent");
System.err.println(" -th: Trace heartbeat messages sent");
System.err.println(" -r: Show responses to traced messages");
System.err.println(
" -u <user>: Specify username for authenticating to web server");
System.err.println(
" -p <password>: Specify password for authenticating to web server");
System.err.println(
" -n <port name>: Use specified port name instead of searching");
System.err.println(
" -min-gate-time <milliseconds>: Ignore gate transitions shorter than <milliseconds>");
System.err.println(
" -d <device name>: Use specified device instead of trying to identify");
System.err.println(" Known devices:");
AllDeviceTypes.listDeviceClassNames();
System.out.println(
" -delay-reset-after-race <nsec>: how long after race over");
System.out.println(
" before timer will be reset, default 10,");
System.out.println(
" for SmartLine, DerbyMagic, and NewBold");
System.err.
println(" -simulate-timer: Simulate timer device (for testing)");
System.err.println(" -lanes <n>: Specify number of lanes to report");
System.err.
println(" -pace <nsec>: Staging pace (seconds between heats)");
System.err.println(" -simulate-host: Exercise timer with simulated host");
System.err.
println(" -lanes <n>: Specify number of lanes for scheduling");
System.out.println();
System.out.println("Experimental flags for The Judge only:");
System.out.println(
" -reset-on-ready: reset timer when next heat scheduled");
System.out.println(
" -reset-on-race-over: reset timer immediately after Race Over from timer");
System.out.println();
}
private static LogWriter makeLogWriter() {
try {
LogWriter logwriter = new LogWriter();
logwriter.
serialPortLogInternal("derby-timer.jar version " + Version.get());
return logwriter;
} catch (Throwable t) {
t.printStackTrace();
System.exit(1);
return null;
}
}
public static void main(String[] args) {
String username = null;
String password = null;
String portname = null;
String devicename = null;
HttpTask.MessageTracer traceHeartbeats = null;
boolean traceResponses = false;
boolean showGui = true;
boolean simulateTimer = false;
boolean simulateHost = false;
boolean recording = false;
boolean playback = false;
LogWriter logwriter = null;
// Include HTTP traffic in the timer log:
HttpTask.MessageTracer traceMessages = null;
int consumed_args = 0;
while (consumed_args < args.length && args[consumed_args].startsWith("-")) {
final String arg = args[consumed_args];
final boolean has_value = (args.length - consumed_args) > 1;
if (arg.equals("-v")) {
System.out.println("DerbyNet version " + Version.get());
System.exit(0);
} else if (arg.equals("-x")) {
showGui = false;
++consumed_args;
} else if (arg.equals("-logdir") && has_value) {
LogFileFactory.setLogFileDirectory(args[consumed_args + 1]);
consumed_args += 2;
} else if (arg.equals("-t")) {
StdoutMessageTrace smt = new StdoutMessageTrace();
smt.traceResponses = traceResponses;
if (logwriter == null) {
logwriter = makeLogWriter();
}
traceMessages = new CombinedMessageTracer(smt, logwriter);
++consumed_args;
} else if (arg.equals("-th")) {
StdoutMessageTrace smt = new StdoutMessageTrace();
smt.traceResponses = traceResponses;
if (logwriter == null) {
logwriter = makeLogWriter();
}
traceHeartbeats = new CombinedMessageTracer(smt, logwriter);
++consumed_args;
} else if (arg.equals("-r")) { // Won't have effect unless it precedes -t, -th
traceResponses = true;
++consumed_args;
} else if (arg.equals("-u") && has_value) {
username = args[consumed_args + 1];
consumed_args += 2;
} else if (arg.equals("-p") && has_value) {
password = args[consumed_args + 1];
consumed_args += 2;
} else if ((arg.equals("-n") || arg.equals("-s")) && has_value) {
portname = args[consumed_args + 1];
consumed_args += 2;
} else if (arg.equals("-d") && has_value) {
devicename = args[consumed_args + 1];
consumed_args += 2;
} else if (arg.equals("-simulate-timer")) {
simulateTimer = true;
++consumed_args;
} else if (arg.equals("-simulate-host")) {
simulateHost = true;
++consumed_args;
} else if (arg.equals("-lanes") && has_value) {
int nlanes = Integer.parseInt(args[consumed_args + 1]);
SimulatedDevice.setNumberOfLanes(nlanes);
SimulatedClientSession.setNumberOfLanes(nlanes);
consumed_args += 2;
} else if (arg.equals("-pace") && has_value) {
SimulatedDevice.
setStagingTime(Integer.parseInt(args[consumed_args + 1]));
consumed_args += 2;
} else if (arg.equals("-reset-on-ready")) {
TheJudgeDevice.setResetOnReady(true);
++consumed_args;
} else if (arg.equals("-reset-on-race-over")) {
TheJudgeDevice.setResetOnRaceOver(true);
++consumed_args;
} else if (arg.equals("-delay-reset-after-race") ||
arg.equals("-reset-delay-on-race-over")) {
long millis = 1000 * Integer.parseInt(args[consumed_args + 1]);
NewBoldDevice.setPostRaceDisplayDurationMillis(millis);
TimerDeviceCommon.setPostRaceDisplayDurationMillis(millis);
consumed_args += 2;
} else if (arg.equals("-min-gate-time") && has_value) {
TimerDeviceCommon.setMinimumGateTimeMillis(
Integer.parseInt(args[consumed_args + 1]));
consumed_args += 2;
} else if (arg.equals("-record")) {
recording = true;
++consumed_args;
} else if (arg.equals("-playback") && has_value) {
playback = true;
PlaybackSerialPortWrapper.setFilename(args[consumed_args + 1]);
consumed_args += 2;
} else {
usage();
System.exit(1);
}
}
String base_url = null;
if (consumed_args < args.length) {
base_url = args[consumed_args];
}
if (!showGui) {
if (base_url == null && !simulateHost) {
usage();
System.exit(1);
}
if (username == null) {
username = "Timer";
}
if (password == null) {
password = "";
}
}
if (logwriter == null) {
logwriter = makeLogWriter();
}
if (traceMessages == null) {
traceMessages = logwriter;
}
ConnectorImpl connector = new ConnectorImpl(traceMessages);
SimulatedClientSession simulatedSession
= simulateHost ? new SimulatedClientSession(logwriter) : null;
try {
TimerGui timerGui = null;
if (showGui) {
timerGui = startTimerGui(traceMessages, traceHeartbeats,
connector, base_url,
username, password, simulatedSession);
} else {
final ClientSession clientSession
= simulatedSession == null ? new ClientSession(base_url)
: simulatedSession;
HttpTask.start(username, password, clientSession,
traceMessages, traceHeartbeats, connector,
new HttpTask.LoginCallback() {
@Override
public void onLoginSuccess() {
System.err.println("Successful login");
}
@Override
public void onLoginFailed(String message) {
System.err.println("Unsuccessful login: " + message);
System.exit(1);
}
});
}
TimerTask timerTask = new TimerTask(portname, devicename, timerGui,
logwriter, connector);
if (simulateTimer) {
timerTask.setSimulatedTimer();
}
if (recording) {
timerTask.setRecording();
}
if (playback) {
timerTask.setPlayback();
}
timerTask.run();
} catch (Throwable t) {
logwriter.stacktrace(t);
}
}
private static TimerGui startTimerGui(HttpTask.MessageTracer traceMessages,
HttpTask.MessageTracer traceHeartbeats,
ConnectorImpl connector, String base_url,
String username, String password,
ClientSession simulatedSession) {
final TimerGui timerGui = new TimerGui(traceMessages, traceHeartbeats,
connector);
SwingUtilities.invokeLater(new Runnable() {
public void run() {
timerGui.show();
}
});
if (base_url != null) {
timerGui.setUrl(base_url);
}
if (username != null || password != null) {
timerGui.setRoleAndPassword(username, password);
}
if (simulatedSession != null) {
timerGui.setClientSession(simulatedSession);
}
return timerGui;
}
// Allow the timer device and web server connection to come up in either
// order, or perhaps not at all; when they're both established, wire together
// callbacks and send hello with lane count to web server.
public static class ConnectorImpl implements Connector {
private HttpTask httpTask;
private TimerTask timerTask;
private HttpTask.MessageTracer traceMessages;
public ConnectorImpl(HttpTask.MessageTracer traceMessages) {
this.traceMessages = traceMessages;
}
@Override
public synchronized void setHttpTask(HttpTask httpTask) {
this.httpTask = httpTask;
maybeWireTogether();
}
@Override
public synchronized void setTimerTask(TimerTask deviceTask) {
this.timerTask = deviceTask;
maybeWireTogether();
}
private void maybeWireTogether() {
if (httpTask != null && timerTask != null && timerTask.device() != null) {
wireTogether(httpTask, timerTask, traceMessages);
int nlanes = 0;
try {
nlanes = timerTask.device().getNumberOfLanes();
} catch (SerialPortException e) {
e.printStackTrace();
}
httpTask.sendIdentified(
nlanes, timerTask.device().getClass().getSimpleName(),
timerTask.device().getTimerIdentifier());
}
}
// Registers callbacks that allow the httpTask and timer device to
// communicate asynchronously.
public static void wireTogether(final HttpTask httpTask,
final TimerTask timerTask,
final HttpTask.MessageTracer traceMessages) {
if (traceMessages != null) {
traceMessages.traceInternal("Timer detected.");
}
httpTask.registerTimerHealthCallback(timerTask);
httpTask.registerHeatReadyCallback(new HttpTask.HeatReadyCallback() {
public void onHeatReady(int roundid, int heat, int laneMask) {
try {
if (traceMessages != null) {
traceMessages.traceInternal(
"Heat ready: roundid=" + roundid + ", heat=" + heat);
}
timerTask.device().prepareHeat(roundid, heat, laneMask);
} catch (Throwable t) {
// TODO: details
t.printStackTrace();
httpTask.queueMessage(
new Message.Malfunction(false, "Can't ready timer."));
}
}
});
httpTask.registerAbortHeatCallback(new HttpTask.AbortHeatCallback() {
public void onAbortHeat() {
if (traceMessages != null) {
traceMessages.traceInternal("AbortHeat received");
}
try {
timerTask.device().abortHeat();
} catch (Throwable t) {
t.printStackTrace();
}
}
});
timerTask.device().registerRaceStartedCallback(
new TimerDevice.RaceStartedCallback() {
public void raceStarted() {
try {
httpTask.queueMessage(new Message.Started());
} catch (Throwable t) {
}
}
});
timerTask.device().registerRaceFinishedCallback(
new TimerDevice.RaceFinishedCallback() {
public void raceFinished(int roundid, int heat,
Message.LaneResult[] results) {
// Rely on recipient to ignore if not expecting any results
try {
httpTask.queueMessage(new Message.Finished(roundid, heat, results));
} catch (Throwable t) {
}
}
});
timerTask.device().registerTimerMalfunctionCallback(
new TimerDevice.TimerMalfunctionCallback() {
public void malfunction(boolean detectable, String msg) {
try {
httpTask.queueMessage(new Message.Malfunction(detectable, msg));
} catch (Throwable t) {
}
}
});
}
}
}
|
package bomberone.model.world;
import bomberone.controllers.match.event.WorldEventListener;
import bomberone.model.bomber.BomberImpl;
import bomberone.model.factory.GameObjectFactory;
import bomberone.model.world.collection.GameObjectCollection;
/**
* The world of gameplay.
*
*/
public interface World {
/**
*
* @return true if the enemies can respawn
*/
boolean getRespawn();
/**
* @return all the collection of the GameObjects
*/
GameObjectCollection getGameObjectCollection();
/**
*
* @return the objectFactory
*/
GameObjectFactory getGameObjectFactory();
/**
* Set the event listener.
*
* @param event
*/
void setEventListener(WorldEventListener event);
/**
* @return the bomber
*/
BomberImpl getBomber();
/**
* Update the state of the game and calls the update of each GameObject.
*
* @param time
*/
void updateState(int time);
/**
* Check if some of object breakable is colliding with fire, if the bomber picks
* up one PowerUp or if enemy hit Bomber.
*/
void checkCollision();
/**
* Check if the enemy can be respawned and eventually respawns them.
*/
void checkRespawn();
/**
* Check if bomber or enemy is colliding with Wall or Box.
*/
void checkBoundary();
/**
* Check if a bomb is exploded.
*/
void checkExplosion();
}
|
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.client.indices;
import org.opensearch.client.TimedRequest;
import org.opensearch.client.Validatable;
import org.opensearch.common.Nullable;
import org.opensearch.common.unit.TimeValue;
/**
* A request to read the content of component templates
*/
public class GetComponentTemplatesRequest implements Validatable {
private final String name;
private TimeValue masterNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT;
private boolean local = false;
/**
* Create a request to read the content of component template. If no template name is provided, all templates will
* be read
*
* @param name the name of template to read
*/
public GetComponentTemplatesRequest(String name) {
this.name = name;
}
/**
* @return the name of component template this request is requesting
*/
public String name() {
return name;
}
/**
* @return the timeout for waiting for the master node to respond
*/
public TimeValue getMasterNodeTimeout() {
return masterNodeTimeout;
}
public void setMasterNodeTimeout(@Nullable TimeValue masterNodeTimeout) {
this.masterNodeTimeout = masterNodeTimeout;
}
public void setMasterNodeTimeout(String masterNodeTimeout) {
final TimeValue timeValue = TimeValue.parseTimeValue(masterNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout");
setMasterNodeTimeout(timeValue);
}
/**
* @return true if this request is to read from the local cluster state, rather than the master node - false otherwise
*/
public boolean isLocal() {
return local;
}
public void setLocal(boolean local) {
this.local = local;
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.wipro.ats.bdre.imcrawler.robotstxt;
import java.util.SortedSet;
import java.util.TreeSet;
public class RuleSet extends TreeSet<String> {
private static final long serialVersionUID = 1L;
@Override
public boolean add(String str) {
SortedSet<String> sub = headSet(str);
if (!sub.isEmpty() && str.startsWith(sub.last())) {
// no need to add; prefix is already present
return false;
}
boolean retVal = super.add(str);
sub = tailSet(str + "\0");
while (!sub.isEmpty() && sub.first().startsWith(str)) {
// remove redundant entries
sub.remove(sub.first());
}
return retVal;
}
public boolean containsPrefixOf(String s) {
SortedSet<String> sub = headSet(s);
// because redundant prefixes have been eliminated,
// only a test against last item in headSet is necessary
if (!sub.isEmpty() && s.startsWith(sub.last())) {
return true; // prefix substring exists
}
// might still exist exactly (headSet does not contain boundary)
return contains(s);
}
}
|
package rip.bolt.nerve.privateserver;
import java.util.Optional;
import javax.inject.Inject;
import com.sk89q.minecraft.util.commands.Command;
import com.sk89q.minecraft.util.commands.CommandContext;
import com.velocitypowered.api.command.CommandSource;
import com.velocitypowered.api.proxy.Player;
import com.velocitypowered.api.proxy.ProxyServer;
import com.velocitypowered.api.proxy.server.RegisteredServer;
import net.kyori.adventure.text.format.NamedTextColor;
import rip.bolt.nerve.inject.commands.Commands;
import rip.bolt.nerve.utils.Executor;
import rip.bolt.nerve.utils.Messages;
public class PrivateCommand implements Commands {
private Executor executor;
private ProxyServer server;
private PrivateServerRequester requester;
@Inject
public PrivateCommand(Executor executor, ProxyServer server, PrivateServerRequester requester) {
this.executor = executor;
this.server = server;
this.requester = requester;
}
@Command(aliases = { "private" }, desc = "Request a private server", min = 0, max = 1)
public void execute(CommandContext args, CommandSource sender) {
executor.async(() -> {
boolean senderIsStaff = sender.hasPermission("nerve.staff");
boolean senderOwnsPrivate = sender.hasPermission("nerve.request");
boolean senderCanRequest = senderIsStaff || senderOwnsPrivate;
int maxArgs = senderIsStaff ? 1 : 0;
if (!senderCanRequest) {
sender.sendMessage(Messages.noPermsPrivateServer());
return;
}
if (args.argsLength() > maxArgs) {
if (senderIsStaff)
sender.sendMessage(Messages.colour(NamedTextColor.RED, "/private [player]"));
else
sender.sendMessage(Messages.colour(NamedTextColor.RED, "/private"));
return;
}
if (args.argsLength() == 0) {
if (!(sender instanceof Player)) {
sender.sendMessage(Messages.colour(NamedTextColor.RED, "/private <player>"));
return;
}
Player player = (Player) sender;
Optional<RegisteredServer> conflictingServer = server.getServer(player.getUsername());
if (conflictingServer.isPresent()) {
player.sendMessage(Messages.colour(NamedTextColor.GOLD, "Connecting you to " + conflictingServer.get().getServerInfo().getName() + "..."));
player.createConnectionRequest(conflictingServer.get()).fireAndForget();
return;
}
if (requester.exists(player)) {
player.sendMessage(Messages.colour(NamedTextColor.GOLD, "You have already requested a private server! Please wait for it to start up."));
return;
}
player.sendMessage(Messages.colour(NamedTextColor.GOLD, "Requesting private server..."));
if (!requester.request(player.getUsername()))
player.sendMessage(Messages.colour(NamedTextColor.RED, "An error occured while requesting your private server!"));
return;
} else if (args.argsLength() == 1) {
Optional<Player> optionalTarget = server.getPlayer(args.getString(0));
if (!optionalTarget.isPresent()) {
sender.sendMessage(Messages.colour(NamedTextColor.RED, "Player not found."));
return;
}
Player target = optionalTarget.get();
Optional<RegisteredServer> conflictingServer = server.getServer(target.getUsername());
if (conflictingServer.isPresent()) {
sender.sendMessage(Messages.colour(NamedTextColor.RED, target.getUsername() + "'s private server is already running!"));
return;
}
if (requester.exists(target)) {
sender.sendMessage(Messages.colour(NamedTextColor.GOLD, target.getUsername() + " has already requested a private server!"));
return;
}
sender.sendMessage(Messages.colour(NamedTextColor.GOLD, "Requesting private server for " + target.getUsername() + "..."));
if (!requester.request(target.getUsername()))
sender.sendMessage(Messages.colour(NamedTextColor.RED, "An error occured while requesting a private server!"));
return;
}
});
}
}
|
package com.phoegel.factorymethod.example;
import java.util.Scanner;
public class Main {
public static void main(String[] args) {
// 客户端输入一个类型
System.out.println("请输入记录类型:\nround:圆形镜子\tsquare:方形镜子");
Scanner sc = new Scanner(System.in);
String type = sc.nextLine();
Mirror mirror = MirrorFactory.getMirror(type);
mirror.look();
}
}
|
package cn.edu.ncut.hikvision_graduation;
import android.Manifest;
import android.content.DialogInterface;
import android.content.pm.PackageManager;
import android.os.Bundle;
import android.os.Environment;
import android.support.annotation.NonNull;
import android.support.v4.app.ActivityCompat;
import android.support.v4.content.ContextCompat;
import android.support.v7.app.AlertDialog;
import android.view.LayoutInflater;
import android.view.SurfaceView;
import android.view.View;
import android.view.WindowManager;
import android.widget.Button;
import android.widget.EditText;
import com.hikvision.netsdk.HCNetSDK;
import java.util.ArrayList;
import java.util.List;
import cn.edu.ncut.hikvision_graduation.pojo.CameraDevice;
import cn.edu.ncut.hikvision_graduation.util.ActivityCollector;
import cn.edu.ncut.hikvision_graduation.util.BaseActivity;
import cn.edu.ncut.hikvision_graduation.util.LogUtils;
import cn.edu.ncut.hikvision_graduation.util.ToastUtil;
import cn.edu.ncut.hikvision_graduation.widget.CustomDialog;
/**
* 在test6activity的基础上,加入登陆的点击事件
*/
public class Test7Activity extends BaseActivity implements View.OnClickListener {
private final static String TAG = "MainActivity";
private static final String SD_CARD_PATH = Environment.getExternalStorageDirectory().getAbsolutePath() + "/HikVision/sdklog";
//得到设备的实例
static HCNetSDK hCNetSDK = HCNetSDK.getInstance();
// static Player player = Player.getInstance();
private SurfaceView mrow_1_sv_1, mrow_1_sv_2, mrow_1_sv_3;
private SurfaceView mrow_2_sv_1, mrow_2_sv_2, mrow_2_sv_3;
private SurfaceView mrow_3_sv_1, mrow_3_sv_2, mrow_3_sv_3;
private int m_iLogID = -1;
//此处定义一个成员集合,收集DeviceMain,在用户按下返回键时,遍历一次执行释放资源
private List<DeviceMain> deviceMainList = new ArrayList<>();
//测试对象surfaceview--2
private CameraDevice cameradevice_test = new CameraDevice();
private DeviceMain device_test = new DeviceMain();
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
// LogUtils.level = LogUtils.Nothing;
//设置该活动常亮
this.getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
//进行动态权限授权
// riskAuthorization();
if (!initeSdk()) {
this.finish();
return;
}
if (!initeActivity()) {
this.finish();
return;
}
/*CameraDevice cameradevice = new CameraDevice();
cameradevice.setIp("10.13.21.3");
cameradevice.setUsername("admin");
cameradevice.setPassword("jdzh12345");
cameradevice.setPort(8000);
// cameradevice.setChannel();
DeviceMain device = new DeviceMain(cameradevice, mrow_1_sv_1.getHolder());
int loginErr = device.loginDevice();
if (loginErr < 0) {
LogUtils.logE(TAG, "登陆失败" + loginErr);
}
device.setExceptionCallBack();
device.startSinglePreview();*/
/*
cameradevice_test = new CameraDevice();
cameradevice_test.setIp("10.13.21.3");
cameradevice_test.setUsername("admin");
cameradevice_test.setPassword("jdzh12345");
cameradevice_test.setPort(8000);
// cameradevice.setChannel();
device_test = new DeviceMain(cameradevice_test, mrow_1_sv_2.getHolder());
int loginErr1 = device_test.loginDevice();
LogUtils.logE(TAG, "登陆失败" + loginErr1);
device_test.setExceptionCallBack();
device_test.startSinglePreview();
deviceMainList.add(device_test);*/
}
/**
* 初始化SDK
*
* @return true - success;false - fail
*/
private boolean initeSdk() {
// init net sdk
if (!hCNetSDK.NET_DVR_Init()) {
LogUtils.logE(TAG, "HCNetSDK init is failed!:" + hCNetSDK.NET_DVR_GetLastError());
return false;
}
return true;
}
/**
* GUI init
*
* @return true--成功,false--失败
*/
private boolean initeActivity() {
mrow_1_sv_1 = (SurfaceView) findViewById(R.id.row_1_sv_1);
mrow_1_sv_2 = (SurfaceView) findViewById(R.id.row_1_sv_2);
mrow_1_sv_3 = (SurfaceView) findViewById(R.id.row_1_sv_3);
mrow_2_sv_1 = (SurfaceView) findViewById(R.id.row_2_sv_1);
mrow_2_sv_2 = (SurfaceView) findViewById(R.id.row_2_sv_2);
mrow_2_sv_3 = (SurfaceView) findViewById(R.id.row_2_sv_3);
mrow_3_sv_1 = (SurfaceView) findViewById(R.id.row_3_sv_1);
mrow_3_sv_2 = (SurfaceView) findViewById(R.id.row_3_sv_2);
mrow_3_sv_3 = (SurfaceView) findViewById(R.id.row_3_sv_3);
mrow_1_sv_1.setOnClickListener(this);
mrow_1_sv_2.setOnClickListener(this);
mrow_1_sv_3.setOnClickListener(this);
mrow_2_sv_1.setOnClickListener(this);
mrow_2_sv_2.setOnClickListener(this);
mrow_2_sv_3.setOnClickListener(this);
mrow_3_sv_1.setOnClickListener(this);
mrow_3_sv_2.setOnClickListener(this);
mrow_3_sv_3.setOnClickListener(this);
return true;
}
@Override
public void onClick(View v) {
switch (v.getId()) {
case R.id.row_1_sv_1:
break;
case R.id.row_1_sv_2:
int isLogin = device_test.getLoginID();
if (isLogin < 0) {//isLogin=-1,未登陆,或者登陆失败
//未登陆
AlertDialog.Builder builder = new AlertDialog.Builder(Test7Activity.this);
builder.setTitle("请先登陆");
//通过LayoutInflater来加载一个xml的布局文件作为一个View对象
View view = LayoutInflater.from(Test7Activity.this).inflate(R.layout.layout_dialog, null);
//设置我们自己定义的布局文件作为弹出框的Content
builder.setView(view);
//局部内部类访问局部变量,需要加final
final EditText et_IPaddress = (EditText) view.findViewById(R.id.et_ip_address);
final EditText et_UserName = (EditText) view.findViewById(R.id.et_username);
final EditText et_PassWord = (EditText) view.findViewById(R.id.et_password);
final EditText et_Port = (EditText) view.findViewById(R.id.et_port);
builder.setCancelable(false);
builder.setPositiveButton("确定登陆", new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
//获取登陆所需要的信息
String ip = et_IPaddress.getText().toString().trim();
String username = et_UserName.getText().toString().trim();
String password = et_PassWord.getText().toString().trim();
int port = Integer.parseInt(et_Port.getText().toString().trim());
//开始登陆
cameradevice_test.setIp(ip);
cameradevice_test.setUsername(username);
cameradevice_test.setPassword(password);
cameradevice_test.setPort(port);
//将登陆信息初始化
device_test.setPara(cameradevice_test);
//设置播放容器
device_test.setHolder(mrow_1_sv_2.getHolder());
int m_iLogID = device_test.loginDevice();
if (m_iLogID < 0) { //还小于0那么登陆失败
LogUtils.logE(TAG, "登陆失败" + m_iLogID);
}
//设置错误回调函数
device_test.setExceptionCallBack();
//开始预览
device_test.startSinglePreview();
//将device_test添加到集合中,方便管理
deviceMainList.add(device_test);
ToastUtil.showMsg2(Test7Activity.this, "登陆成功……");
dialog.dismiss();
}
});
// Button btn_Login = (Button) view.findViewById(R.id.btn_login);
/*btn_Login.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
ToastUtil.showMsg2(Test7Activity.this, "登陆了");
alertDialog.dismiss();
}
});*/
builder.setNegativeButton("取消登陆", new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
ToastUtil.showMsg2(Test7Activity.this, "取消登陆……");
}
});
builder.show();
} else {
CustomDialog customDialog = new CustomDialog(Test7Activity.this, R.style.CustomDialog);
customDialog.setTitle("提示").setMessage("是否登出 ?")
.setCancel("取消", new CustomDialog.IOnCancelListener() {
@Override
public void onCancel(CustomDialog customDialog) {
ToastUtil.showMsg2(Test7Activity.this, "Cancel……");
customDialog.dismiss();
}
}).setConfirm("确认", new CustomDialog.IOnConfirmListener() {
@Override
public void onConfirm(CustomDialog customDialog) {
device_test.stopPlay();
device_test.logoutDevice();
device_test.freeSDK();
ToastUtil.showMsg2(Test7Activity.this, "登出成功啦!");
customDialog.dismiss();
}
}).show();
customDialog.setCancelable(false);
}
/*
04-20 20:22:06.649 12359-12359/cn.edu.ncut.hikvision_graduation D/DeviceMain: 停止实时播放成功!
04-20 20:22:06.655 12359-12359/cn.edu.ncut.hikvision_graduation D/DeviceMain: 停止本地播放成功!
04-20 20:22:06.663 12359-12359/cn.edu.ncut.hikvision_graduation D/DeviceMain: 关闭视频流成功!
04-20 20:22:06.664 12359-12359/cn.edu.ncut.hikvision_graduation D/DeviceMain: 释放播放端口成功!
04-20 20:22:06.664 12359-12359/cn.edu.ncut.hikvision_graduation D/DeviceMain: 停止播放成功!
04-20 20:22:06.686 12359-12359/cn.edu.ncut.hikvision_graduation D/DeviceMain: 登出设备成功!
04-20 20:22:06.712 12359-12359/cn.edu.ncut.hikvision_graduation D/DeviceMain: 释放SDK资源成功!
*/
break;
}
}
/**
* 进行运行时权限处理
*/
private void riskAuthorization() {
/*
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.CAPTURE_AUDIO_OUTPUT" />
<uses-permission android:name="android.permission.RECORD_AUDIO" />
*/
List<String> permissionList = new ArrayList<>();
//检查是否获得了权限
if (ContextCompat.checkSelfPermission(Test7Activity.this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.WRITE_EXTERNAL_STORAGE);
}
if (ContextCompat.checkSelfPermission(Test7Activity.this, Manifest.permission.CAPTURE_AUDIO_OUTPUT) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.CAPTURE_AUDIO_OUTPUT);
}
if (ContextCompat.checkSelfPermission(Test7Activity.this, Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.RECORD_AUDIO);
}
if (!permissionList.isEmpty()) {
String[] permissions = permissionList.toArray(new String[permissionList.size()]);
ActivityCompat.requestPermissions(Test7Activity.this, permissions, 1);
//申请权限
//开始弹出授权信息,并决定是否同意,之后会调onRequestPermissionsResult()回调
} else {
// TODO: 2019/4/17
ToastUtil.showMsg2(Test7Activity.this, "授权成功了");
}
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
switch (requestCode) {
case 1:
if (grantResults.length > 0) {
//此步判断同意的结果是否都PackageManager.PERMISSION_GRANTED??
for (int result : grantResults) {
if (result != PackageManager.PERMISSION_GRANTED) {
//必须同意所有权限才能使用本程序
ToastUtil.showMsg2(this, "必须同意所有权限才能使用本程序");
this.finish();
return;
}
}
// TODO: 2019/4/18
} else {
ToastUtil.showMsg2(this, "发生未知错误");
this.finish();
return;
}
break;
}
}
/**
* 按下返回键后关闭的逻辑判断
*/
private long lastClickTime = 0;
@Override
public void onBackPressed() {
// super.onBackPressed();
//第一次点击
if (lastClickTime <= 0) {
ToastUtil.showMsg2(this, "在按一次后退键退出应用");
lastClickTime = System.currentTimeMillis();
} else {
long currentClickTime = System.currentTimeMillis();
//第二次点击
//(currentClickTime-lastClickTime)<1秒,关闭
//(currentClickTime-lastClickTime)>1秒,lastClickTime = System.currentTimeMillis();
if (currentClickTime - lastClickTime < 1000) {
// TODO: 2019/4/19 结束资源,
if (!deviceMainList.isEmpty()) {
for (DeviceMain deviceMain : deviceMainList) {
if (null != deviceMain) {
deviceMain.stopPlay();
deviceMain.logoutDevice();
deviceMain.freeSDK();
}
//将对象从集合中清理
deviceMainList.remove(deviceMain);
}
}
ToastUtil.showMsg2(Test7Activity.this, "资源已回收,再会了!");
ActivityCollector.finishAll();
} else {
ToastUtil.showMsg2(this, "再按一次后退键退出应用");
lastClickTime = currentClickTime;
}
}
}
}
|
package com.flyco.animation.Attention;
import android.view.View;
import android.view.animation.CycleInterpolator;
import com.nineoldandroids.animation.ObjectAnimator;
import com.flyco.animation.BaseAnimatorSet;
public class ShakeHorizontal extends BaseAnimatorSet {
public ShakeHorizontal() {
duration = 1000;
}
@Override
public void setAnimation(View view) {
ObjectAnimator animator = ObjectAnimator.ofFloat(view, "translationX", -10, 10);
animator.setInterpolator(new CycleInterpolator(5));
animatorSet.playTogether(animator);
/**
* <pre>
* 另一种shake实现
* ObjectAnimator.ofFloat(view, "translationX", 0, 25, -25, 25, -25, 15, -15, 6, -6, 0);
* </pre>
*/
}
}
|
// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE
package org.bytedeco.systems.windows;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.systems.global.windows.*;
@Properties(inherit = org.bytedeco.systems.presets.windows.class)
public class PSAPI_WORKING_SET_EX_INFORMATION extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public PSAPI_WORKING_SET_EX_INFORMATION() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public PSAPI_WORKING_SET_EX_INFORMATION(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public PSAPI_WORKING_SET_EX_INFORMATION(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public PSAPI_WORKING_SET_EX_INFORMATION position(long position) {
return (PSAPI_WORKING_SET_EX_INFORMATION)super.position(position);
}
@Override public PSAPI_WORKING_SET_EX_INFORMATION getPointer(long i) {
return new PSAPI_WORKING_SET_EX_INFORMATION((Pointer)this).offsetAddress(i);
}
public native @Cast("PVOID") Pointer VirtualAddress(); public native PSAPI_WORKING_SET_EX_INFORMATION VirtualAddress(Pointer setter);
public native @ByRef PSAPI_WORKING_SET_EX_BLOCK VirtualAttributes(); public native PSAPI_WORKING_SET_EX_INFORMATION VirtualAttributes(PSAPI_WORKING_SET_EX_BLOCK setter);
}
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE129_Improper_Validation_of_Array_Index__console_readLine_array_read_check_max_61a.java
Label Definition File: CWE129_Improper_Validation_of_Array_Index.label.xml
Template File: sources-sinks-61a.tmpl.java
*/
/*
* @description
* CWE: 129 Improper Validation of Array Index
* BadSource: console_readLine Read data from the console using readLine
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: array_read_check_max
* GoodSink: Read from array after verifying index is at least 0 and less than array.length
* BadSink : Read from array after verifying that data less than array.length (but not verifying that data is at least 0)
* Flow Variant: 61 Data flow: data returned from one method to another in different classes in the same package
*
* */
package testcases.CWE129_Improper_Validation_of_Array_Index.s01;
import testcasesupport.*;
import javax.servlet.http.*;
public class CWE129_Improper_Validation_of_Array_Index__console_readLine_array_read_check_max_61a extends AbstractTestCase
{
public void bad() throws Throwable
{
int data = (new CWE129_Improper_Validation_of_Array_Index__console_readLine_array_read_check_max_61b()).badSource();
/* Need to ensure that the array is of size > 3 and < 101 due to the GoodSource and the large_fixed BadSource */
int array[] = { 0, 1, 2, 3, 4 };
/* POTENTIAL FLAW: Verify that data < array.length, but don't verify that data > 0, so may be attempting to read out of the array bounds */
if (data < array.length)
{
IO.writeLine(array[data]);
}
else
{
IO.writeLine("Array index out of bounds");
}
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
int data = (new CWE129_Improper_Validation_of_Array_Index__console_readLine_array_read_check_max_61b()).goodG2BSource();
/* Need to ensure that the array is of size > 3 and < 101 due to the GoodSource and the large_fixed BadSource */
int array[] = { 0, 1, 2, 3, 4 };
/* POTENTIAL FLAW: Verify that data < array.length, but don't verify that data > 0, so may be attempting to read out of the array bounds */
if (data < array.length)
{
IO.writeLine(array[data]);
}
else
{
IO.writeLine("Array index out of bounds");
}
}
/* goodB2G() - use badsource and goodsink */
private void goodB2G() throws Throwable
{
int data = (new CWE129_Improper_Validation_of_Array_Index__console_readLine_array_read_check_max_61b()).goodB2GSource();
/* Need to ensure that the array is of size > 3 and < 101 due to the GoodSource and the large_fixed BadSource */
int array[] = { 0, 1, 2, 3, 4 };
/* FIX: Fully verify data before reading from array at location data */
if (data >= 0 && data < array.length)
{
IO.writeLine(array[data]);
}
else
{
IO.writeLine("Array index out of bounds");
}
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
package eval.tmp;
import illinoisParser.AutoDecoder;
import illinoisParser.FineChartItem;
import illinoisParser.Grammar;
import illinoisParser.Sentence;
import illinoisParser.Tree;
import illinoisParser.models.BaselineModel;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Scanner;
import eval.Dep;
import eval.DepSet;
public class EvaluatePerceptronParserOutput {
/**
* @param args
* @throws FileNotFoundException
*/
public static void main(String[] args) throws FileNotFoundException {
String parsedOutput = "eval.auto";
String targetFile = "combined_training/eval.parsed.auto";
ArrayList<String> autoViterbi = getAutoViterbi(parsedOutput);
PrintWriter pw = new PrintWriter(new File(targetFile));
for(String sen : autoViterbi) {
pw.println(sen.trim());
}
pw.close();
evaluate(parsedOutput);
}
private static void evaluate(String parsedOutput) throws FileNotFoundException {
Scanner sc = new Scanner(new File(parsedOutput));
double total = 0;
int failures = 0;
int tooLong = 0;
double totalDeps = 0.0;
double predictedDeps = 0.0;
double matchedLabeled = 0.0;
double matchedUnlabeled = 0.0;
String line = null;
String auto;
String lastLine;
PrintWriter pw = new PrintWriter(new File("eval.parsed.auto"));
PrintWriter pfpw = new PrintWriter(new File("eval.failures.auto"));
//PrintWriter pw = new PrintWriter(new File("wsj0.parsed.auto"));
//PrintWriter pfpw = new PrintWriter(new File("wsj0.failures.auto"));
while(sc.hasNextLine()) {
lastLine = line;
line = sc.nextLine().trim();
if(!line.isEmpty()) {
if(line.startsWith("(<T")) {
total++;
auto = line;
pw.println(auto);
line = sc.nextLine().trim();
if(line.startsWith("<s>")) {
DepSet deps = null;
deps = new DepSet(Integer.parseInt(line.substring(4)));
while(!line.isEmpty()) {
line = sc.nextLine().trim();
if(line.startsWith("<\\s>")) {
break;
}
else if(!line.isEmpty()) {
deps.addDep(new Dep(line));
String[] toks = line.split("\\s+");
deps.addWord(Integer.parseInt(toks[0]), toks[4]);
deps.addWord(Integer.parseInt(toks[1]), toks[5]);
}
}
Sentence sen = new Sentence(lastLine);
AutoDecoder decoder = new AutoDecoder(sen, auto);
Tree<? extends FineChartItem> gold = decoder.buildTree(new BaselineModel(new Grammar()));
DepSet goldDeps = DepSet.getDepSetFromPargEntry(gold.buildPargString(sen));
totalDeps += goldDeps.size();
predictedDeps += deps.size();
matchedLabeled += deps.numMatchedLabeled(goldDeps);
matchedUnlabeled += deps.numMatchedUnlabeled(goldDeps);
}
else if(line.equals("TOO_LONG")) {
tooLong++;
}
else if(line.equals("PARSE_FAILURE")) {
pfpw.println(auto);
failures++;
}
}
}
}
sc.close();
pw.close();
pfpw.close();
System.out.println(total+ " total sentences");
System.out.println(tooLong+ " were too long");
System.out.println(failures+ " parse failures\n");
System.out.println("Results on remaining "+(total-tooLong-failures)+" sentences:");
System.out.println("Labeled recall: "+(matchedLabeled/totalDeps));
System.out.println("Labeled precision: "+(matchedLabeled/predictedDeps)+"\n");
System.out.println("Unabeled recall: "+(matchedUnlabeled/totalDeps));
System.out.println("Unlabeled precision: "+(matchedUnlabeled/predictedDeps)+"\n");
}
private static ArrayList<String> getAutoViterbi(String parsedOutput) throws FileNotFoundException {
Scanner sc = new Scanner(new File(parsedOutput));
ArrayList<String> list = new ArrayList<String>();
String line;
while(sc.hasNextLine()) {
line = sc.nextLine().trim();
if(line.startsWith("(<T")) {
list.add(line);
}
else if(line.equals("TOO_LONG")) {
list.add("");
}
else if(line.equals("PARSE_FAILURE")) {
list.add("");
}
}
sc.close();
return list;
}
}
|
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.services.task.audit.service;
import static org.kie.internal.query.QueryParameterIdentifiers.DATE_LIST;
import static org.kie.internal.query.QueryParameterIdentifiers.ID_LIST;
import static org.kie.internal.query.QueryParameterIdentifiers.TASK_ID_LIST;
import static org.kie.internal.query.QueryParameterIdentifiers.TASK_VARIABLE_NAME_ID_LIST;
import static org.kie.internal.query.QueryParameterIdentifiers.TASK_VARIABLE_VALUE_ID_LIST;
import static org.kie.internal.query.QueryParameterIdentifiers.TYPE_LIST;
import java.util.Date;
import java.util.List;
import org.jbpm.services.task.audit.commands.TaskVariableQueryCommand;
import org.jbpm.services.task.audit.impl.model.TaskVariableImpl;
import org.jbpm.services.task.commands.TaskCommand;
import org.kie.internal.query.QueryParameterIdentifiers;
import org.kie.internal.task.api.InternalTaskService;
import org.kie.internal.task.api.TaskPersistenceContext;
import org.kie.internal.task.api.TaskVariable;
import org.kie.internal.task.api.TaskVariable.VariableType;
import org.kie.internal.task.query.TaskVariableQueryBuilder;
public class TaskVariableQueryBuilderImpl extends AbstractTaskAuditQueryBuilderImpl<TaskVariableQueryBuilder, TaskVariable> implements TaskVariableQueryBuilder {
public TaskVariableQueryBuilderImpl(InternalTaskService taskService) {
super(taskService);
}
public TaskVariableQueryBuilderImpl(TaskJPAAuditService jpaService) {
super(jpaService);
}
@Override
public TaskVariableQueryBuilder taskId(long... taskId) {
addLongParameter(TASK_ID_LIST, "task id", taskId);
return this;
}
@Override
public TaskVariableQueryBuilder taskIdRange( Long taskIdMin, Long taskIdMax ) {
addRangeParameters(TASK_ID_LIST, "task id range", taskIdMin, taskIdMax);
return this;
}
@Override
public TaskVariableQueryBuilder id( long... id ) {
addLongParameter(ID_LIST, "task id", id);
return this;
}
@Override
public TaskVariableQueryBuilder modificationDate( Date... modDate ) {
addObjectParameter(DATE_LIST, "log time", modDate);
return this;
}
@Override
public TaskVariableQueryBuilder modificationDateRange( Date modDateMin, Date modDateMax ) {
addRangeParameters(DATE_LIST, "log time range", modDateMin, modDateMax);
return this;
}
@Override
public TaskVariableQueryBuilder name( String... name ) {
addObjectParameter(TASK_VARIABLE_NAME_ID_LIST, "name", name);
return this;
}
@Override
public TaskVariableQueryBuilder value( String... value ) {
addObjectParameter(TASK_VARIABLE_VALUE_ID_LIST, "value", value);
return this;
}
@Override
public TaskVariableQueryBuilder type( VariableType... type ) {
addObjectParameter(TYPE_LIST, "task variable type", type);
return this;
}
@Override
public TaskVariableQueryBuilder ascending( org.kie.internal.task.query.TaskVariableQueryBuilder.OrderBy field ) {
String listId = convertOrderByToListId(field);
this.queryWhere.setAscending(listId);
return this;
}
@Override
public TaskVariableQueryBuilder descending( org.kie.internal.task.query.TaskVariableQueryBuilder.OrderBy field ) {
String listId = convertOrderByToListId(field);
this.queryWhere.setDescending(listId);
return this;
}
private String convertOrderByToListId(org.kie.internal.task.query.TaskVariableQueryBuilder.OrderBy field) {
String listId;
switch( field ) {
case id:
listId = QueryParameterIdentifiers.ID_LIST;
break;
case taskId:
listId = QueryParameterIdentifiers.TASK_ID_LIST;
break;
case modificationDate:
listId = QueryParameterIdentifiers.DATE_LIST;
break;
case processInstanceId:
listId = QueryParameterIdentifiers.PROCESS_INSTANCE_ID_LIST;
break;
default:
throw new IllegalArgumentException("Unknown 'order-by' field: " + field.toString() );
}
return listId;
}
@Override
protected Class<TaskVariableImpl> getQueryType() {
return TaskVariableImpl.class;
}
@Override
protected Class<TaskVariable> getResultType() {
return TaskVariable.class;
}
@Override
protected TaskCommand<List<TaskVariable>> getCommand() {
return new TaskVariableQueryCommand(queryWhere);
}
}
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.identitymanagement.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUser" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DeleteUserRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* <p>
* The name of the user to delete.
* </p>
* <p>
* This parameter allows (through its <a href="http://wikipedia.org/wiki/regex">regex pattern</a>) a string of
* characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of
* the following characters: _+=,.@-
* </p>
*/
private String userName;
/**
* Default constructor for DeleteUserRequest object. Callers should use the setter or fluent setter (with...)
* methods to initialize the object after creating it.
*/
public DeleteUserRequest() {
}
/**
* Constructs a new DeleteUserRequest object. Callers should use the setter or fluent setter (with...) methods to
* initialize any additional object members.
*
* @param userName
* The name of the user to delete.</p>
* <p>
* This parameter allows (through its <a href="http://wikipedia.org/wiki/regex">regex pattern</a>) a string
* of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also
* include any of the following characters: _+=,.@-
*/
public DeleteUserRequest(String userName) {
setUserName(userName);
}
/**
* <p>
* The name of the user to delete.
* </p>
* <p>
* This parameter allows (through its <a href="http://wikipedia.org/wiki/regex">regex pattern</a>) a string of
* characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of
* the following characters: _+=,.@-
* </p>
*
* @param userName
* The name of the user to delete.</p>
* <p>
* This parameter allows (through its <a href="http://wikipedia.org/wiki/regex">regex pattern</a>) a string
* of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also
* include any of the following characters: _+=,.@-
*/
public void setUserName(String userName) {
this.userName = userName;
}
/**
* <p>
* The name of the user to delete.
* </p>
* <p>
* This parameter allows (through its <a href="http://wikipedia.org/wiki/regex">regex pattern</a>) a string of
* characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of
* the following characters: _+=,.@-
* </p>
*
* @return The name of the user to delete.</p>
* <p>
* This parameter allows (through its <a href="http://wikipedia.org/wiki/regex">regex pattern</a>) a string
* of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also
* include any of the following characters: _+=,.@-
*/
public String getUserName() {
return this.userName;
}
/**
* <p>
* The name of the user to delete.
* </p>
* <p>
* This parameter allows (through its <a href="http://wikipedia.org/wiki/regex">regex pattern</a>) a string of
* characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of
* the following characters: _+=,.@-
* </p>
*
* @param userName
* The name of the user to delete.</p>
* <p>
* This parameter allows (through its <a href="http://wikipedia.org/wiki/regex">regex pattern</a>) a string
* of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also
* include any of the following characters: _+=,.@-
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DeleteUserRequest withUserName(String userName) {
setUserName(userName);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getUserName() != null)
sb.append("UserName: ").append(getUserName());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DeleteUserRequest == false)
return false;
DeleteUserRequest other = (DeleteUserRequest) obj;
if (other.getUserName() == null ^ this.getUserName() == null)
return false;
if (other.getUserName() != null && other.getUserName().equals(this.getUserName()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getUserName() == null) ? 0 : getUserName().hashCode());
return hashCode;
}
@Override
public DeleteUserRequest clone() {
return (DeleteUserRequest) super.clone();
}
}
|
/*
* This file is part of the CS372Assignment6Exercise5 project.
*
* Author: Omid Jafari - omidjafari.com
* Copyright (c) 2018
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
import java.io.*;
public class RedBlackTreeChecker {
private static RedBlackTree readTree(String filename) throws IOException {
File file = new File(filename);
FileReader fr = new FileReader(file);
BufferedReader br = new BufferedReader(fr);
String[] triplets = br.readLine().trim().split(" ");
RedBlackTree tree = new RedBlackTree();
for (String triplet : triplets) {
triplet = triplet.substring(1, triplet.length() - 1);
String[] values = triplet.split(",");
Node x = new Node(Integer.parseInt(values[0]), Integer.parseInt(values[1]), values[2].charAt(0));
if (!tree.insert(x))
return null;
}
return tree;
}
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("You need to provide the file path as argument!!!");
System.exit(-1);
}
RedBlackTree tree = readTree(args[0]);
if (tree == null)
System.out.println("Error in the input file!!!");
else if (!tree.isBST())
System.out.println("Not a Red-Black tree, because it is not a BST.");
else if (!tree.isRootBlack())
System.out.println("Not a Red-Black tree, because root is not black.");
else if (tree.isRedRedChild())
System.out.println("Not a Red-Black tree, because a red node has a red child.");
else if (!tree.isBlackHeightSame())
System.out.println("Not a Red-Black tree, because paths from a node to descendant" +
" leaves contain different number of black nodes");
else
System.out.println("The provided tree is a Red-Black tree.");
}
}
|
package org.vertexium.accumulo.iterator;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.hadoop.io.Text;
import org.vertexium.accumulo.iterator.model.EdgeElementData;
import org.vertexium.accumulo.iterator.model.IteratorFetchHints;
import org.vertexium.security.Authorizations;
public class EdgeIterator extends ElementIterator<EdgeElementData> {
public static final String CF_SIGNAL_STRING = "E";
public static final Text CF_SIGNAL = new Text(CF_SIGNAL_STRING);
public static final byte[] CF_SIGNAL_BYTES = CF_SIGNAL.getBytes();
public static final String CF_OUT_VERTEX_STRING = "EOUT";
public static final Text CF_OUT_VERTEX = new Text(CF_OUT_VERTEX_STRING);
public static final byte[] CF_OUT_VERTEX_BYTES = CF_OUT_VERTEX.getBytes();
public static final String CF_IN_VERTEX_STRING = "EIN";
public static final Text CF_IN_VERTEX = new Text(CF_IN_VERTEX_STRING);
public static final byte[] CF_IN_VERTEX_BYTES = CF_IN_VERTEX.getBytes();
public EdgeIterator() {
this(null, false, (String[]) null);
}
public EdgeIterator(IteratorFetchHints fetchHints, boolean compressTransfer, String[] authorizations) {
super(null, fetchHints, compressTransfer, authorizations);
}
public EdgeIterator(
SortedKeyValueIterator<Key, Value> source,
IteratorFetchHints fetchHints,
boolean compressTransfer,
Authorizations authorizations
) {
super(source, fetchHints, compressTransfer, authorizations);
}
public EdgeIterator(IteratorFetchHints fetchHints, boolean compressTransfer, Authorizations authorizations) {
super(null, fetchHints, compressTransfer, authorizations);
}
@Override
protected boolean processColumn(KeyValue keyValue) {
if (keyValue.columnFamilyEquals(CF_IN_VERTEX_BYTES)) {
if (getElementData().inVertexIdTimestamp == null || keyValue.getTimestamp() > getElementData().inVertexIdTimestamp) {
getElementData().inVertexId = keyValue.takeColumnQualifier();
getElementData().inVertexIdTimestamp = keyValue.getTimestamp();
}
return true;
}
if (keyValue.columnFamilyEquals(CF_OUT_VERTEX_BYTES)) {
if (getElementData().outVertexIdTimestamp == null || keyValue.getTimestamp() > getElementData().outVertexIdTimestamp) {
getElementData().outVertexId = keyValue.takeColumnQualifier();
getElementData().outVertexIdTimestamp = keyValue.getTimestamp();
}
return true;
}
return false;
}
@Override
protected void processSignalColumn(KeyValue keyValue, boolean deleted) {
super.processSignalColumn(keyValue, deleted);
if (deleted) {
getElementData().label = null;
} else {
getElementData().label = keyValue.takeColumnQualifier();
}
}
@Override
protected byte[] getVisibilitySignal() {
return CF_SIGNAL_BYTES;
}
@Override
public SortedKeyValueIterator<Key, Value> deepCopy(IteratorEnvironment env) {
if (getSourceIterator() != null) {
return new EdgeIterator(getSourceIterator().deepCopy(env), getFetchHints(), isCompressTransfer(), getAuthorizations());
}
return new EdgeIterator(getFetchHints(), isCompressTransfer(), getAuthorizations());
}
@Override
protected String getDescription() {
return "This iterator encapsulates an entire Edge into a single Key/Value pair.";
}
@Override
protected EdgeElementData createElementData() {
return new EdgeElementData();
}
}
|
package com.medicine.dto.user.signup;
import lombok.Getter;
import lombok.NoArgsConstructor;
import javax.validation.constraints.*;
import java.util.Date;
@NoArgsConstructor
@Getter
public class SignUpInput {
@Email
@NotNull
@Size(max = 45)
private String email;
@NotBlank
@Size(min=8, max = 45)
private String password;
@NotBlank
@Size(max = 45)
private String nickname;
@NotNull
@Past
private Date birth;
@Pattern(regexp = "^[MF]$")
@Size(max = 1)
private String gender;
}
|
package com.icfcc.cache;
import java.util.Collection;
/**
* A manager for a set of {@link Cache}s.
*
* @author Costin Leau
* @since 3.1
*/
public interface CacheManager {
/**
* Return the cache associated with the given name.
* @param name cache identifier (must not be {@code null})
* @return associated cache, or {@code null} if none is found
*/
Cache getCache(String name);
/**
* Return a collection of the caches known by this cache manager.
* @return names of caches known by the cache manager.
*/
Collection<String> getCacheNames();
}
|
package net.meisen.general.sbconfigurator.config;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.UUID;
import net.meisen.general.genmisc.exceptions.registry.IExceptionRegistry;
import net.meisen.general.genmisc.resources.Resource;
import net.meisen.general.genmisc.resources.ResourceInfo;
import net.meisen.general.genmisc.resources.Xml;
import net.meisen.general.genmisc.types.Classes;
import net.meisen.general.genmisc.types.Objects;
import net.meisen.general.genmisc.types.Streams;
import net.meisen.general.sbconfigurator.ConfigurationCoreSettings;
import net.meisen.general.sbconfigurator.api.IConfiguration;
import net.meisen.general.sbconfigurator.api.placeholder.IPropertyReplacer;
import net.meisen.general.sbconfigurator.api.placeholder.IXmlPropertyReplacer;
import net.meisen.general.sbconfigurator.api.transformer.ILoaderDefinition;
import net.meisen.general.sbconfigurator.api.transformer.IXsdValidator;
import net.meisen.general.sbconfigurator.api.transformer.IXsltTransformer;
import net.meisen.general.sbconfigurator.config.exception.InvalidConfigurationException;
import net.meisen.general.sbconfigurator.config.exception.InvalidXsltException;
import net.meisen.general.sbconfigurator.config.exception.TransformationFailedException;
import net.meisen.general.sbconfigurator.config.exception.ValidationFailedException;
import net.meisen.general.sbconfigurator.config.placeholder.SpringPropertyHolder;
import net.meisen.general.sbconfigurator.factories.MethodExecutorBean;
import net.meisen.general.sbconfigurator.helper.SpringHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.PropertyValue;
import org.springframework.beans.factory.BeanCreationException;
import org.springframework.beans.factory.BeanDefinitionStoreException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.config.AutowireCapableBeanFactory;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.MethodInvokingFactoryBean;
import org.springframework.beans.factory.config.TypedStringValue;
import org.springframework.beans.factory.support.DefaultListableBeanFactory;
import org.springframework.beans.factory.xml.DefaultDocumentLoader;
import org.springframework.beans.factory.xml.XmlBeanDefinitionReader;
import org.springframework.core.io.ByteArrayResource;
import org.springframework.core.io.support.EncodedResource;
import org.springframework.util.MethodInvoker;
import org.springframework.util.xml.XmlValidationModeDetector;
import org.w3c.dom.Document;
import org.xml.sax.InputSource;
/**
* The default implementation of the <code>IConfiguration</code> interface. This
* is needed to load the configuration.
*
* @author pmeisen
*/
public class DefaultConfiguration implements IConfiguration {
private final static Logger LOG = LoggerFactory
.getLogger(DefaultConfiguration.class);
/**
* The <code>coreSettings</code> are defined in the
* <code>sbconfigurator-core.xml</code> context. The id must be
* <code>coreSettings</code> to be wired correctly.
*
* @see #coreSettingsId
*/
@Autowired
@Qualifier(coreSettingsId)
private ConfigurationCoreSettings coreSettings;
@Autowired
@Qualifier(corePropertyHolderId)
private SpringPropertyHolder corePropertyHolder;
@Autowired
@Qualifier(coreExceptionRegistryId)
private IExceptionRegistry coreExceptionRegistry;
/**
* This <code>Collection</code> is auto-wired with all the
* <code>LoaderDefintions</code>, which are configured via the
* <code>sbconfigurator-core.xml</code> context.
*/
@Autowired(required = false)
private Map<String, ILoaderDefinition> loaderDefinitions;
/**
* It is possible to wire the <code>DefaultConfiguration</code> with a
* <code>XsdValidator</code>, which is used to validate XML content. The
* <code>XsdValidator</code> must be defined in the
* <code>sbconfigurator-core.xml</code> context and must use the id
* <code>xsdValidator</code>.
*/
@Autowired(required = false)
@Qualifier("xsdValidator")
private IXsdValidator xsdValidator;
/**
* The <code>XsltTransformer</code> is used to transform XML definitions
* into bean XML definitions. The validator must be defined in the
* <code>sbconfigurator-core.xml</code> context and must use the id
* <code>xsltTransformer</code>.
*/
@Autowired
@Qualifier("xsltTransformer")
private IXsltTransformer xsltTransformer;
@Autowired(required = false)
@Qualifier(IXmlPropertyReplacer.xmlReplacerId)
private IXmlPropertyReplacer xmlReplacer;
@Autowired
@Qualifier(IPropertyReplacer.replacerId)
private IPropertyReplacer replacer;
/**
* The <code>Collection</code> of all the loaded modules. A module can be
* anything which is defined to be loaded via a
* <code>LoaderDefinition</code>. Each module is represented by its bean-id.
*/
private final Map<String, Object> modules = new HashMap<String, Object>();
/**
* The <code>Collection</code> of all the <code>BeanDefinition</code> found
* (so far)
*/
private final Map<String, BeanDefinition> moduleDefinitions = new HashMap<String, BeanDefinition>();
/**
* The <code>DefaultListableBeanFactory</code> which is used to load all the
* modules. This factory has to be an attribute, because of pre-loading
* purposes, i.e. if a bean retrieves a module from the configuration prior
* to the loading of the module (i.e. within an init-method).
*/
private DefaultListableBeanFactory moduleFactory = null;
/**
* Get the {@code XsltTransformer} used by this configuration.
*
* @return the {@code XsltTransformer} used by this configuration
*/
public IXsltTransformer getXsltTransformer() {
return xsltTransformer;
}
/**
* Get the {@code XsdValidator} used by this configuration.
*
* @return the {@code XsdValidator} used by this configuration
*/
public IXsdValidator getXsdValidator() {
return xsdValidator;
}
@Override
public void loadConfiguration(final Map<String, Object> injections)
throws InvalidConfigurationException {
if (LOG.isTraceEnabled()) {
LOG.trace("Starting to load the Configuration...");
}
// check if something was added via auto-wiring, if not there is nothing
// more to do
if (loaderDefinitions == null) {
// make sure we have a Collection from now on
loaderDefinitions = new LinkedHashMap<String, ILoaderDefinition>();
} else {
// load the default loader definitions
for (final Entry<String, ILoaderDefinition> entry : loaderDefinitions
.entrySet()) {
final ILoaderDefinition loaderDefinition = entry.getValue();
// do some logging
if (LOG.isDebugEnabled()) {
LOG.debug("Loading configuration from loader '"
+ entry.getKey() + "': " + loaderDefinition);
}
// now load the definition
final DefaultListableBeanFactory beanFactory = loadBeanFactory(
entry.getKey(), loaderDefinition);
final Map<String, PropertyInjectorBean> propIns = beanFactory
.getBeansOfType(PropertyInjectorBean.class, false,
false);
for (final PropertyInjectorBean propIn : propIns.values()) {
corePropertyHolder.setFinalProperties(propIn
.getProperties());
}
// add all the other definitions to be loaded later
final Map<String, BeanDefinition> defs = SpringHelper
.getBeanDefinitions(beanFactory,
ILoaderDefinition.class,
PropertyInjectorBean.class);
// everything else is registered as module
registerModuleBeanDefinitions(defs, entry.getKey());
}
}
// the core engine is up and running now
if (LOG.isTraceEnabled()) {
LOG.trace("Core implementation of Configuration is up and running.");
}
// print the used properties
if (LOG.isTraceEnabled()) {
LOG.trace("The following properties have been loaded:");
final List<Entry<Object, Object>> list = new ArrayList<Entry<Object, Object>>();
list.addAll(getProperties().entrySet());
Collections.sort(list, new Comparator<Entry<Object, Object>>() {
@Override
public int compare(final Entry<Object, Object> e1,
final Entry<Object, Object> e2) {
// determine if there are nulls
final boolean e1Null = e1 == null;
final boolean e2Null = e2 == null;
// check and return the result
if (e1Null && e2Null) {
return 0;
} else if (e1Null || e2Null) {
return e1Null ? -1 : 1;
} else {
return e1.getKey().toString()
.compareTo(e2.getKey().toString());
}
}
});
for (final Entry<Object, Object> e : list) {
LOG.trace(" - " + e.getKey() + " = " + e.getValue());
}
}
// create the factory
moduleFactory = SpringHelper.createBeanFactory(true, false);
registerDefaults(moduleFactory);
for (final Entry<String, BeanDefinition> entry : moduleDefinitions
.entrySet()) {
moduleFactory.registerBeanDefinition(entry.getKey(),
entry.getValue());
}
for (final Entry<String, Object> entry : injections.entrySet()) {
moduleFactory.registerSingleton(entry.getKey(), entry.getValue());
}
// now let's add all the modules
if (LOG.isTraceEnabled()) {
LOG.trace("Loaded '"
+ moduleDefinitions.size()
+ "' moduleDefinitions. The modules will be instantiated now.");
}
// first load the important methods
final List<String> head = new ArrayList<String>();
final List<String> body = new ArrayList<String>();
final List<String> tail = new ArrayList<String>();
for (final String name : moduleFactory.getBeanDefinitionNames()) {
final BeanDefinition beanDef = moduleFactory
.getBeanDefinition(name);
final String beanClassName = beanDef.getBeanClassName();
if (beanClassName == null) {
continue;
}
final Class<?> beanClass = Classes.getClass(beanClassName);
if (beanClass == null) {
// do nothing
} else if (MethodExecutorBean.class.isAssignableFrom(beanClass)) {
final PropertyValue typeProperty = beanDef.getPropertyValues()
.getPropertyValue("type");
final Object value = typeProperty == null ? null : typeProperty
.getValue();
if (value == null
|| value.equals(new TypedStringValue("factory"))) {
head.add(name);
} else if (value.equals(new TypedStringValue("init"))) {
tail.add(name);
} else {
body.add(name);
}
}
}
// register the modules of head and body now
registerFromFactory(head);
registerFromFactory(body);
for (final String name : moduleFactory.getBeanNamesForType(
Object.class, false, true)) {
if (!head.contains(name) && !body.contains(name)
&& !tail.contains(name)) {
registerFromFactory(name);
}
}
// register the modules of the tail
registerFromFactory(tail);
// load all the objects to ensure that everything is loaded
final Map<String, Object> modules = moduleFactory.getBeansOfType(
Object.class, false, true);
for (final Entry<String, Object> entry : modules.entrySet()) {
registerModule(entry.getKey(), entry.getValue());
}
}
/**
* Try to register all the specified names as module. That means that the
* {@code moduleFactory} is used to retrieve/create the beans.
*
* @param names
* the name of the beans to be created
*/
protected void registerFromFactory(final Collection<String> names) {
for (final String name : names) {
registerModule(name, moduleFactory.getBean(name));
}
}
/**
* Try to register the specified name as module. That means that the
* {@code moduleFactory} is used to retrieve/create the bean.
*
* @param name
* the name of the bean to be created
*
* @return {@code true} if the bean was successfully created and registered,
* otherwise {@code false}
*/
protected boolean registerFromFactory(final String name) {
final Object bean;
try {
bean = moduleFactory.getBean(name);
} catch (final BeanCreationException ex) {
// ignore try to get it in the last step using the Spring
// implementation
return false;
}
return registerModule(name, bean);
}
/**
* Helper method which registers all the default singletons (e.g.
* coreSettings, coreConfiguration).
*
* @param factory
* the {@link DefaultListableBeanFactory} to add the beans to
*/
protected void registerDefaults(final DefaultListableBeanFactory factory) {
factory.registerSingleton(coreSettingsId, coreSettings);
factory.registerSingleton(coreConfigurationId, this);
factory.registerSingleton(corePropertyHolderId, corePropertyHolder);
factory.registerSingleton(coreExceptionRegistryId,
coreExceptionRegistry);
}
/**
* Checks if the specified <code>module</code> is really a valid module,
* i.e. if it can be added to the loaded modules or not.
*
* @param id
* the id of the module to be checked
* @param module
* the module to be checked
*
* @return <code>true</code> if the specified <code>module</code> should be
* added, otherwise <code>false</code>
*/
protected boolean isModule(final String id, final Object module) {
if (id == null || module == null) {
return false;
} else if (coreSettingsId.equals(id)) {
return false;
} else if (coreConfigurationId.equals(id)) {
return false;
} else if (corePropertyHolderId.equals(id)) {
return false;
} else if (coreExceptionRegistryId.equals(id)) {
return false;
} else if (isAnonymousId(id)) {
return false;
} else if (module instanceof ConfigurationCoreSettings) {
return false;
}
// also don't add any factories or creation of those of Spring those are
// helper beans
else if (module instanceof MethodInvoker) {
return false;
} else {
return true;
}
}
/**
* Registers the specified <code>beanDefinitions</code> to the one that will
* be loaded for the <code>Configuration</code>.
*
* @param beanDefinitions
* the collection of <code>BeanDefinition</code> instances to be
* registered
* @param loaderId
* the loader's identifier for logging purposes
*/
protected void registerModuleBeanDefinitions(
final Map<String, BeanDefinition> beanDefinitions,
final String loaderId) {
// add each beanDefinition
for (final Entry<String, BeanDefinition> entry : beanDefinitions
.entrySet()) {
registerModuleBeanDefinition(entry.getKey(), entry.getValue(),
loaderId);
}
}
/**
* Registers a single <code>beanDefinition</code> to be loaded for the
* <code>Configuration</code>.
*
* @param id
* the <code>beanDefinition</code>'s identifier
* @param beanDefinition
* the <code>BeanDefinition</code> instance to be registered
* @param loaderId
* the loader's identifier for logging purposes
*/
protected void registerModuleBeanDefinition(final String id,
final BeanDefinition beanDefinition, final String loaderId) {
// register the module
if (Objects.empty(beanDefinition) || Objects.empty(id)) {
throw new IllegalArgumentException("The id ('" + id
+ "') or the beanDefinition cannot be null.");
}
// in the case of an anonymous id we should never override
if (isAnonymousId(id) && moduleDefinitions.containsKey(id)) {
moduleDefinitions.put(UUID.randomUUID().toString() + "_" + id,
beanDefinition);
} else if (moduleDefinitions.put(id, beanDefinition) != null) {
if (LOG.isWarnEnabled()) {
LOG.warn("Overloading the moduleDefinition '" + id
+ "' with the one from the loaderDefinition '"
+ loaderId + "'");
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Added the moduleDefinition '" + id
+ "' from loaderDefinition '" + loaderId + "'");
}
}
}
/**
* Registers the specified <code>module</code> to all the loaded modules.
*
* @param id
* the id of the module to be registered
* @param module
* the <code>module</code> to be registered
*
* @return <code>true</code> if the <code>module</code> was added, otherwise
* <code>false</code>
*/
protected boolean registerModule(final String id, final Object module) {
final Object current;
// register the module
if (!isModule(id, module)) {
if (isAnonymousId(id)) {
if (id.contains(MethodInvokingFactoryBean.class.getName())
|| id.contains(MethodExecutorBean.class.getName())
|| id.contains(net.meisen.general.sbconfigurator.factories.MethodInvokingFactoryBean.class
.getName())) {
if (LOG.isTraceEnabled()) {
LOG.trace("Skipping the bean '"
+ id
+ "' as module, because it is an anonymous bean used for MethodInvokation");
}
} else if (LOG.isWarnEnabled()) {
LOG.warn("Skipping the bean '"
+ id
+ "' as module, because it is probably an anonymous bean");
}
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Skipping the bean '" + id + "' as module");
}
}
return false;
} else if ((current = modules.put(id, module)) != null) {
if (LOG.isWarnEnabled() && !Objects.equals(current, module)) {
LOG.warn("Overloading the module '" + id + "'");
}
return true;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Loaded the module '" + id + "' of type '"
+ module.getClass().getName() + "'");
}
return true;
}
}
/**
* Checks if the id is a created one of Spring for an anonymous bean.
*
* @param id
* the id to be checked
* @return <code>true</code> if the id is anonymous, otherwise
* <code>false</code>
*/
protected boolean isAnonymousId(final String id) {
return id.matches(".*#\\d+$");
}
@SuppressWarnings("unchecked")
@Override
public <T> T getModule(final String name) {
Object module = modules.get(name);
// it might be that the module is not instantiated yet
if (module == null && moduleFactory != null
&& moduleDefinitions.containsKey(name)) {
module = moduleFactory.getBean(name);
// register the module
if (registerModule(name, module)) {
// do some logging
if (LOG.isDebugEnabled()) {
LOG.debug("Pre-Loaded the module '"
+ name
+ "', no need to be worried this might happen if init-methods are used.");
}
} else {
module = null;
}
}
// return the module
return (T) module;
}
@Override
public Map<String, Object> getAllModules() {
return Collections.unmodifiableMap(modules);
}
/**
* Loads the <code>BeanFactory</code> which is specified by the passed
* <code>LoaderDefinition</code>.
*
* @param loaderId
* the identifier of the loader, used to identify the xslt loaded
* @param loaderDefinition
* the <code>LoaderDefinition</code> which specifies which data
* to be loaded
*
* @return the <code>ListableBeanFactory</code> loaded by the specified
* <code>LoaderDefinition</code>
*/
public DefaultListableBeanFactory loadBeanFactory(final String loaderId,
final ILoaderDefinition loaderDefinition) {
return loadBeanFactory(loaderDefinition.getSelector(),
loaderDefinition.getDefaultSelector(),
loaderDefinition.getXsltTransformerInputStream(), loaderId,
loaderDefinition.getContext(),
loaderDefinition.isValidationEnabled(),
loaderDefinition.isBeanOverridingAllowed(),
loaderDefinition.isLoadFromClassPath(),
loaderDefinition.isLoadFromWorkingDir(),
loaderDefinition.isDefaultLoadFromClassPath(),
loaderDefinition.isDefaultLoadFromWorkingDir());
}
/**
* Loads the <code>BeanFactory</code> which is specified by the passed
* <code>xmlFileName</code>, using the passed <code>xsltTransformer</code>
* to transform the data into a bean XML definition. The XML might be
* validated if <code>validate</code> is set to <code>true</code>.
* Furthermore the <code>beanOverriding</code> specifies if beans can be
* overwritten within the context, i.e. all the XML files found with the
* specified <code>xmlFileName</code>.
*
* @param xmlFileName
* the XML files to be loaded
* @param xsltTransformer
* the XSLT transformer to be used to transform the XML files
* into bean definitions
* @param validate
* <code>true</code> if the XML of the <code>xmlFileName</code>
* should be validated, otherwise <code>false</code>
* @param beanOverriding
* <code>true</code> if beans of the context can be overwritten,
* otherwise <code>false</code>
* @param loadFromClasspath
* <code>true</code> if the <code>xmlFileName</code> should be
* searched on the classpath, otherwise <code>false</code>
* @param loadFromWorkingDir
* <code>true</code> if the <code>xmlFileName</code> should be
* searched in the current working-directory (and all
* sub-directories), otherwise <code>false</code>
*
* @return the <code>ListableBeanFactory</code> loaded by the specified
* parameters
*/
public DefaultListableBeanFactory loadBeanFactory(final String xmlFileName,
final InputStream xsltTransformer, final boolean validate,
final boolean beanOverriding, final boolean loadFromClasspath,
final boolean loadFromWorkingDir) {
return loadBeanFactory(xmlFileName, xsltTransformer, null, validate,
beanOverriding, loadFromClasspath, loadFromWorkingDir);
}
/**
* Loads the <code>BeanFactory</code> which is specified by the passed
* <code>xmlFileName</code>, using the passed <code>xsltTransformer</code>
* to transform the data into a bean XML definition. The XML might be
* validated if <code>validate</code> is set to <code>true</code>.
* Furthermore the <code>beanOverriding</code> specifies if beans can be
* overwritten within the context, i.e. all the XML files found with the
* specified <code>xmlFileName</code>.
*
* @param xmlSelector
* the XML files to be loaded
* @param xsltStream
* the stream of the XSLT used for transformation
* @param context
* the context to look for the specified <code>xmlFileName</code>
* , might be <code>null</code> if all files on the class-path
* with the specified <code>xmlFileName</code> should be loaded
* @param validate
* <code>true</code> if the XML of the <code>xmlFileName</code>
* should be validated, otherwise <code>false</code>
* @param beanOverriding
* <code>true</code> if beans of the context can be overwritten,
* otherwise <code>false</code>
* @param loadFromClasspath
* <code>true</code> if the <code>xmlFileName</code> should be
* searched on the classpath, otherwise <code>false</code>
* @param loadFromWorkingDir
* <code>true</code> if the <code>xmlFileName</code> should be
* searched in the current working-directory (and all
* sub-directories), otherwise <code>false</code>
*
* @return the <code>ListableBeanFactory</code> loaded by the specified
* parameters
*/
public DefaultListableBeanFactory loadBeanFactory(final String xmlSelector,
final InputStream xsltStream, final Class<?> context,
final boolean validate, final boolean beanOverriding,
final boolean loadFromClasspath, final boolean loadFromWorkingDir) {
return loadBeanFactory(xmlSelector, null, xsltStream, null, context,
validate, beanOverriding, loadFromClasspath,
loadFromWorkingDir, loadFromClasspath, loadFromWorkingDir);
}
/**
* Loads the <code>BeanFactory</code> which is specified by the passed
* <code>xmlFileName</code>, using the passed <code>xsltTransformer</code>
* to transform the data into a bean XML definition. The XML might be
* validated if <code>validate</code> is set to <code>true</code>.
* Furthermore the <code>beanOverriding</code> specifies if beans can be
* overwritten within the context, i.e. all the XML files found with the
* specified <code>xmlFileName</code>.
*
* @param xmlSelector
* the XML files to be loaded
* @param defaultXmlSelector
* the selector to be used if the {@code xmlSelector} doesn't
* select any files
* @param xsltStream
* the stream of the XSLT used for transformation
* @param xsltId
* a unique id which specifies the xslt to be used
* @param context
* the context to look for the specified <code>xmlFileName</code>
* , might be <code>null</code> if all files on the class-path
* with the specified <code>xmlFileName</code> should be loaded
* @param validate
* <code>true</code> if the XML of the <code>xmlFileName</code>
* should be validated, otherwise <code>false</code>
* @param beanOverriding
* <code>true</code> if beans of the context can be overwritten,
* otherwise <code>false</code>
* @param loadFromClasspath
* <code>true</code> if the <code>xmlSelector</code> should be
* searched on the classpath, otherwise <code>false</code>
* @param loadFromWorkingDir
* <code>true</code> if the <code>xmlSelector</code> should be
* searched in the current working-directory (and all
* sub-directories), otherwise <code>false</code>
* @param loadDefaultFromClasspath
* <code>true</code> if the <code>defaultXmlSelector</code>
* should be searched on the classpath, otherwise
* <code>false</code>
* @param loadDefaultFromWorkingDir
* <code>true</code> if the <code>defaultXmlSelector</code>
* should be searched in the current working-directory (and all
* sub-directories), otherwise <code>false</code>
*
* @return the <code>ListableBeanFactory</code> loaded by the specified
* parameters
*/
public DefaultListableBeanFactory loadBeanFactory(final String xmlSelector,
final String defaultXmlSelector, final InputStream xsltStream,
final String xsltId, final Class<?> context,
final boolean validate, final boolean beanOverriding,
final boolean loadFromClasspath, final boolean loadFromWorkingDir,
final boolean loadDefaultFromClasspath,
final boolean loadDefaultFromWorkingDir) {
// get all the resources to be loaded
List<InputStream> resIos = getResourceInputStreams(xmlSelector,
context, loadFromClasspath, loadFromWorkingDir);
// check if resources were found and use the default otherwise
if (resIos.size() == 0 && defaultXmlSelector != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Didn't find any files for '" + xmlSelector
+ "'. The defaultXmlSelector '" + defaultXmlSelector
+ "' is used to load the resources to be loaded.");
}
resIos = getResourceInputStreams(defaultXmlSelector, context,
loadDefaultFromClasspath, loadDefaultFromWorkingDir);
}
// get the factory
final DefaultListableBeanFactory factory = loadBeanFactory(resIos,
xsltStream, xsltId, validate, beanOverriding);
if (LOG.isInfoEnabled()) {
LOG.info("Loaded factory for files '" + xmlSelector + "' (size: "
+ factory.getBeanDefinitionCount() + ")");
}
return factory;
}
private List<InputStream> getResourceInputStreams(final String xmlSelector,
final Class<?> context, final boolean loadFromClasspath,
final boolean loadFromWorkingDir) {
// get the selector with replaced properties
final String replaceXmlSelector = replacer.replacePlaceholders(
xmlSelector, getProperties());
// get all the resources to be loaded
final List<InputStream> resIos = new ArrayList<InputStream>();
if (context == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Creating factory for files '" + replaceXmlSelector
+ "' without context");
}
// read all the bean.xmls on the classpath
final Collection<ResourceInfo> resInfos = Resource.getResources(
replaceXmlSelector, loadFromClasspath, loadFromWorkingDir);
if (LOG.isTraceEnabled()) {
LOG.trace("Found '" + resInfos.size()
+ "' to be loaded for selector '" + replaceXmlSelector
+ "'");
}
// read all the loaded resources
for (final ResourceInfo resInfo : resInfos) {
final InputStream resIo = Resource.getResourceAsStream(resInfo);
// log the current resource
if (LOG.isTraceEnabled()) {
LOG.trace("Loading '" + replaceXmlSelector
+ "' at location '" + resInfo.getFullPath() + "'");
}
resIos.add(resIo);
}
} else {
final String contextPath = context.getPackage().getName()
.replace(".", "/");
final String fileClassPath = contextPath + "/" + replaceXmlSelector;
if (LOG.isTraceEnabled()) {
LOG.trace("Creating factory for file '" + replaceXmlSelector
+ "' using context '" + fileClassPath + "'");
}
// get the resource
final InputStream resIo = context.getClassLoader()
.getResourceAsStream(fileClassPath);
resIos.add(resIo);
}
return resIos;
}
/**
* Loads a {@link DefaultListableBeanFactory} from the specified resources.
*
* @param resIos
* the {@link InputStream} instances to load the definitions from
* @param xsltStream
* the stream of the XSLT used for transformation
* @param xsltId
* a unique id which specifies the xslt to be used
* @param validate
* <code>true</code> if the XML of the <code>xmlFileName</code>
* should be validated, otherwise <code>false</code>
* @param beanOverriding
* <code>true</code> if beans of the context can be overwritten,
* otherwise <code>false</code>
*
* @return the <code>ListableBeanFactory</code> loaded by the specified
* parameters
*/
public DefaultListableBeanFactory loadBeanFactory(
final Collection<InputStream> resIos, final InputStream xsltStream,
final String xsltId, final boolean validate,
final boolean beanOverriding) {
// create the factory and enable auto-wiring to setup the core system
final DefaultListableBeanFactory factory = SpringHelper
.createBeanFactory(true, beanOverriding);
/*
* If we don't have any resources we can just return the factory, this
* should increase the performance for empty reads, cause template
* creation might take some time sometimes.
*/
if (resIos.size() == 0) {
return factory;
}
// create the reader
final XmlBeanDefinitionReader reader = new XmlBeanDefinitionReader(
factory);
// initialize the xslt transformer
if (xsltTransformer != null) {
try {
if (xsltTransformer.hasCachedXslt(xsltId)) {
xsltTransformer.setCachedXsltTransformer(xsltId, null);
} else {
// replace the properties within the xslt
org.springframework.core.io.Resource res = replacePlaceholders(xsltStream);
final InputStream xsltReplacedStream = res == null ? null
: res.getInputStream();
// cache if asked for
if (xsltId == null) {
xsltTransformer.setXsltTransformer(xsltReplacedStream);
} else {
xsltTransformer.setCachedXsltTransformer(xsltId,
xsltReplacedStream);
}
}
} catch (final InvalidXsltException e) {
throw new InvalidConfigurationException(
"The specified XSLT is invalid and therefore cannot be used.",
e);
} catch (final IOException e) {
throw new InvalidConfigurationException(
"The specified XSLT stream cannot be accessed.", e);
}
}
// transform all the resources and add those
for (final InputStream resIo : resIos) {
addResourceToReader(reader, xsltTransformer, resIo, validate);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Loaded " + factory.getBeanDefinitionCount()
+ " modules.");
}
/*
* use the postProcessing to replace properties (i.e. for imports, those
* are loaded directly via Spring and therefore not replaced within the
* normal replacement)
*/
if (corePropertyHolder != null) {
corePropertyHolder.postProcessBeanFactory(factory);
}
return factory;
}
/**
* Adds a specific resource to the <code>reader</code>.
*
* @param reader
* the <code>XmlBeanDefinitionReader</code> to which the resource
* should be added
* @param xsltTransformer
* the XSLT transformer used to transform the XML stream into a
* XML bean definition
* @param resStream
* the resource to be added to the <code>reader</code>
* @param validate
* <code>true</code> if the streamed XML should be validated,
* otherwise <code>false</code>
*/
protected void addResourceToReader(final XmlBeanDefinitionReader reader,
final IXsltTransformer xsltTransformer,
final InputStream resStream, final boolean validate) {
// get the content of the stream
org.springframework.core.io.Resource res = replacePlaceholders(resStream);
// validate the resource if needed
if (validate && xsdValidator != null
&& coreSettings.isConfigurationValidationEnabled()) {
if (LOG.isTraceEnabled()) {
LOG.trace("Start to validate the current resource.");
}
try {
xsdValidator.validate(SpringHelper.getInputStream(res));
} catch (final ValidationFailedException e) {
throw new BeanDefinitionStoreException(
"The resource could not be validated", e);
}
}
// now we have to transform the resource
if (xsltTransformer != null) {
final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
if (LOG.isTraceEnabled()) {
LOG.trace("Start to transform the current resource.");
}
try {
xsltTransformer.transform(SpringHelper.getInputStream(res),
outputStream);
} catch (final TransformationFailedException e) {
throw new BeanDefinitionStoreException(
"The resource could not be transformed", e);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Finished transformation, result: "
+ System.getProperty("line.separator")
+ outputStream.toString());
}
// get the new content
res = new ByteArrayResource(outputStream.toByteArray());
}
// finally add the transformed resource
reader.setValidating(isConfigurationValidationEnabled());
reader.loadBeanDefinitions(res);
}
/**
* Defines if any loaded configuration should be validated against it's
* defined or specified XSD schema. If no
* <code>ConfigurationCoreSettings</code> are defined, the default return
* value is <code>false</code>, otherwise the value defined by the
* <code>ConfigurationCoreSettings</code> is used.
*
* @return <code>true</code> if any loaded configuration should be validated
* against it's defined or specified XSD schema, otherwise
* <code>false</code>
*/
public boolean isConfigurationValidationEnabled() {
return coreSettings != null
&& coreSettings.isConfigurationValidationEnabled();
}
/**
* Defines if an instance of a <code>ILoaderDefinition</code> defined by a
* user can be overridden by another user's <code>ILoaderDefinition</code>.
* If no <code>ConfigurationCoreSettings</code> are defined, the default
* return value is <code>false</code>, otherwise the value defined by the
* <code>ConfigurationCoreSettings</code> is used.
*
* @return <code>true</code> if a user's <code>ILoaderDefinition</code> can
* be overridden, otherwise <code>false</code>
*/
public boolean isUserLoaderOverridingAllowed() {
return coreSettings != null
&& coreSettings.isUserLoaderOverridingAllowed();
}
/**
* Loads the <code>Document</code> from the specified
* {@link org.springframework.core.io.Resource Resource}.
*
* @param res
* the resource to load the {@link Document} from
*
* @return the <code>Document</code> specified by the passed
* <code>Resource</code>
*/
protected Document loadDocument(
final org.springframework.core.io.Resource res) {
// get the resource as encoded one
final EncodedResource encRes = new EncodedResource(res);
// read the XML document and replace the placeholders
InputStream inputStream = null;
InputSource inputSource = null;
Document doc = null;
try {
inputStream = encRes.getResource().getInputStream();
inputSource = new InputSource(inputStream);
if (encRes.getEncoding() != null) {
inputSource.setEncoding(encRes.getEncoding());
}
// get the Document
final DefaultDocumentLoader loader = new DefaultDocumentLoader();
doc = loader.loadDocument(inputSource, null, null,
XmlValidationModeDetector.VALIDATION_NONE, false);
} catch (final Exception e) {
// log it
if (LOG.isWarnEnabled()) {
LOG.warn(
"Unable to parse the passed ByteArrayResource '"
+ res.getDescription() + "'.", e);
}
throw new IllegalArgumentException(
"The passed resource contains an invalid document.", e);
} finally {
// close the streams
Streams.closeIO(inputSource);
Streams.closeIO(inputStream);
}
return doc;
}
/**
* Replaces the properties within the passed <code>InputStream</code>.
*
* @param resStream
* the <code>InputStream</code> which provides the source to
* replace the properties in
*
* @return the <code>Resource</code> with the replaced properties
*/
protected org.springframework.core.io.Resource replacePlaceholders(
final InputStream resStream) {
// if we don't have anything we cannot create anything
if (resStream == null) {
return null;
} else {
final byte[] content;
try {
content = Streams.copyStreamToByteArray(resStream);
} catch (final IOException e) {
throw new BeanDefinitionStoreException(
"The resource could not be read", e);
} finally {
// close the stream
Streams.closeIO(resStream);
}
// now let's get the resource
org.springframework.core.io.Resource res = new ByteArrayResource(
content);
return replacePlaceholders(res);
}
}
/**
* Replaces all the placeholders (i.e. properties) within the passed
* {@link org.springframework.core.io.Resource Resource}.
*
* @param res
* the <code>Resource</code> to replace the placeholders in
*
* @return a <code>Resource</code> with replaced placeholders
*/
protected org.springframework.core.io.Resource replacePlaceholders(
final org.springframework.core.io.Resource res) {
org.springframework.core.io.Resource resultRes = res;
// replace the values
if (corePropertyHolder != null && xmlReplacer != null) {
// load the Document specified by the resource
final Document doc = loadDocument(res);
// get the properties
final Properties properties = getProperties();
// get the document with the replacements
final Document resDoc = xmlReplacer.replacePlaceholders(doc,
properties);
// get the content of the new document
final byte[] content = Xml.createByteArray(resDoc);
// now create the resource from the content
resultRes = new ByteArrayResource(content);
}
// return the new resource
return resultRes;
}
/**
* Gets all the <code>Properties</code> defined for <code>this</code>
* configuration.
*
* @return the <code>Properties</code> of <code>this</code> configuration
*/
public Properties getProperties() {
try {
return corePropertyHolder.getProperties();
} catch (final IOException e) {
if (LOG.isErrorEnabled()) {
LOG.error(
"Unable to load the properties of the configuration.",
e);
}
return new Properties();
}
}
@Override
public <T> T createInstance(final Class<T> clazz) {
@SuppressWarnings("unchecked")
final T bean = (T) moduleFactory.autowire(clazz,
AutowireCapableBeanFactory.AUTOWIRE_NO, false);
return bean;
}
@Override
public <T> T wireInstance(final T bean) {
moduleFactory.autowireBeanProperties(bean,
AutowireCapableBeanFactory.AUTOWIRE_NO, false);
return bean;
}
@Override
public DefaultModuleHolder loadDelayed(final String loaderId,
final InputStream resIo) throws InvalidConfigurationException {
final ILoaderDefinition loaderDefinition = loaderDefinitions
.get(loaderId);
// check if we have a loader
if (loaderDefinition == null) {
throw new InvalidConfigurationException("A loader with id '"
+ loaderId + "' could not be found.");
}
// create a factory to hold all the beans read
final List<InputStream> resIos = new ArrayList<InputStream>();
resIos.add(resIo);
final DefaultListableBeanFactory factory = loadBeanFactory(resIos,
loaderDefinition.getXsltTransformerInputStream(), loaderId,
loaderDefinition.isValidationEnabled(),
loaderDefinition.isBeanOverridingAllowed());
// set a parent factory
factory.setParentBeanFactory(moduleFactory);
// now get all the modules
final Map<String, Object> factoryModules = factory
.getBeansOfType(Object.class);
final Map<String, Object> delayedModules = new HashMap<String, Object>();
for (final Entry<String, Object> e : factoryModules.entrySet()) {
if (isModule(e.getKey(), e.getValue())) {
delayedModules.put(e.getKey(), e.getValue());
}
}
return new DefaultModuleHolder(factory);
}
@Override
public void release() {
moduleFactory.destroySingletons();
}
}
|
/*
* Copyright (C) 2016 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.strata.pricer.capfloor;
import com.opengamma.strata.basics.currency.Payment;
import com.opengamma.strata.market.sensitivity.PointSensitivities;
import com.opengamma.strata.market.sensitivity.PointSensitivityBuilder;
import com.opengamma.strata.pricer.DiscountingPaymentPricer;
import com.opengamma.strata.pricer.rate.RatesProvider;
import com.opengamma.strata.product.capfloor.ResolvedIborCapFloor;
import com.opengamma.strata.product.capfloor.ResolvedIborCapFloorTrade;
/**
* Pricer for cap/floor trades in SABR model.
*/
public class SabrIborCapFloorTradePricer
extends VolatilityIborCapFloorTradePricer {
/**
* Default implementation.
*/
public static final SabrIborCapFloorTradePricer DEFAULT =
new SabrIborCapFloorTradePricer(SabrIborCapFloorProductPricer.DEFAULT, DiscountingPaymentPricer.DEFAULT);
/**
* The pricer for {@link ResolvedIborCapFloor}.
*/
private final SabrIborCapFloorProductPricer productPricer;
/**
* Creates an instance.
*
* @param productPricer the pricer for {@link ResolvedIborCapFloor}
* @param paymentPricer the pricer for {@link Payment}
*/
public SabrIborCapFloorTradePricer(SabrIborCapFloorProductPricer productPricer, DiscountingPaymentPricer paymentPricer) {
super(productPricer, paymentPricer);
this.productPricer = productPricer;
}
//-------------------------------------------------------------------------
/**
* Calculates the present value rates sensitivity of the Ibor cap/floor trade.
* <p>
* The present value sensitivity is computed in a "sticky model parameter" style, i.e. the sensitivity to the
* curve nodes with the SABR model parameters unchanged. This sensitivity does not include a potential
* re-calibration of the model parameters to the raw market data.
*
* @param trade the Ibor cap/floor trade
* @param ratesProvider the rates provider
* @param volatilities the volatilities
* @return the present value sensitivity
*/
public PointSensitivities presentValueSensitivityRatesStickyModel(
ResolvedIborCapFloorTrade trade,
RatesProvider ratesProvider,
SabrIborCapletFloorletVolatilities volatilities) {
PointSensitivityBuilder pvSensiProduct =
productPricer.presentValueSensitivityRatesStickyModel(trade.getProduct(), ratesProvider, volatilities);
if (!trade.getPremium().isPresent()) {
return pvSensiProduct.build();
}
PointSensitivityBuilder pvSensiPremium =
getPaymentPricer().presentValueSensitivity(trade.getPremium().get(), ratesProvider);
return pvSensiProduct.combinedWith(pvSensiPremium).build();
}
/**
* Calculates the present value volatility sensitivity of the Ibor cap/floor trade.
* <p>
* The sensitivity of the present value to the SABR model parameters, alpha, beta, rho and nu.
*
* @param trade the Ibor cap/floor trade
* @param ratesProvider the rates provider
* @param volatilities the volatilities
* @return the present value sensitivity
*/
public PointSensitivityBuilder presentValueSensitivityModelParamsSabr(
ResolvedIborCapFloorTrade trade,
RatesProvider ratesProvider,
SabrIborCapletFloorletVolatilities volatilities) {
return productPricer.presentValueSensitivityModelParamsSabr(trade.getProduct(), ratesProvider, volatilities);
}
}
|
package com.vector.onetodo;
import android.widget.AbsListView;
public interface ProjectsScrollHolder {
void adjustScroll(int scrollHeight);
void onScroll(AbsListView view, int firstVisibleItem, int visibleItemCount,
int totalItemCount, int pagePosition);
}
|
/**
* This code was generated by
* \ / _ _ _| _ _
* | (_)\/(_)(_|\/| |(/_ v1.0.0
* / /
*/
package com.twilio.rest.preview.trustedComms.business;
import com.twilio.base.Fetcher;
import com.twilio.exception.ApiConnectionException;
import com.twilio.exception.ApiException;
import com.twilio.exception.RestException;
import com.twilio.http.HttpMethod;
import com.twilio.http.Request;
import com.twilio.http.Response;
import com.twilio.http.TwilioRestClient;
import com.twilio.rest.Domains;
/**
* PLEASE NOTE that this class contains preview products that are subject to
* change. Use them with caution. If you currently do not have developer preview
* access, please contact help@twilio.com.
*/
public class BrandFetcher extends Fetcher<Brand> {
private final String pathBusinessSid;
private final String pathSid;
/**
* Construct a new BrandFetcher.
*
* @param pathBusinessSid Business Sid.
* @param pathSid Brand Sid.
*/
public BrandFetcher(final String pathBusinessSid,
final String pathSid) {
this.pathBusinessSid = pathBusinessSid;
this.pathSid = pathSid;
}
/**
* Make the request to the Twilio API to perform the fetch.
*
* @param client TwilioRestClient with which to make the request
* @return Fetched Brand
*/
@Override
@SuppressWarnings("checkstyle:linelength")
public Brand fetch(final TwilioRestClient client) {
Request request = new Request(
HttpMethod.GET,
Domains.PREVIEW.toString(),
"/TrustedComms/Businesses/" + this.pathBusinessSid + "/Brands/" + this.pathSid + ""
);
Response response = client.request(request);
if (response == null) {
throw new ApiConnectionException("Brand fetch failed: Unable to connect to server");
} else if (!TwilioRestClient.SUCCESS.apply(response.getStatusCode())) {
RestException restException = RestException.fromJson(response.getStream(), client.getObjectMapper());
if (restException == null) {
throw new ApiException("Server Error, no content");
}
throw new ApiException(restException);
}
return Brand.fromJson(response.getStream(), client.getObjectMapper());
}
}
|
import java.io.PrintWriter;
import java.io.RandomAccessFile;
public class ClassProperty {
//============================================= Properties
private String name;
private String type;
private String scope;
private boolean getter;
private String getterScope;
private boolean setter;
private String setterScope;
//============================================= Constructors
public ClassProperty (String name, String type, String scope, boolean getter, String getterScope, boolean setter, String setterScope) {
setName(name);
setType(type);
setScope(scope);
setGetter(getter);
setGetterScope(getterScope);
setSetter(setter);
setSetterScope(setterScope);
}
public ClassProperty (RandomAccessFile rafReader) throws Exception{
this (
Utilities.nextLineData(rafReader),
Utilities.nextLineData(rafReader),
Utilities.nextLineData(rafReader),
Utilities.nextLineData(rafReader).equalsIgnoreCase("true"),
Utilities.nextLineData(rafReader),
Utilities.nextLineData(rafReader).equalsIgnoreCase("true"),
Utilities.nextLineData(rafReader)
);
}
//============================================= Methods
public void writeProperty(PrintWriter pw) {
if (scope.equalsIgnoreCase("private"))
pw.print("\tprivate ");
else
pw.print("\tpublic ");
pw.print(type + " ");
pw.println(name + ";");
}
public void writeHeader(PrintWriter pw) {
pw.print(type + " " + name);
}
public void writeSetterCaller(PrintWriter pw) {
pw.println("\t\t" + "set" + name.substring(0,1).toUpperCase() + name.substring(1) + "(" + name + ");");
}
public void writeEmptySetterCaller(PrintWriter pw) {
if (type.equalsIgnoreCase("String"))
pw.print("\"\"");
else if (type.equals("int"))
pw.print("0");
else if (type.equals("double"))
pw.print("0.0");
else if (type.equals("boolean"))
pw.print("false");
else if (type.equals("char"))
pw.print("\u0000");
else
pw.print("null");
}
public void writeCopySetterCaller(PrintWriter pw, String objName) {
pw.print(objName + "." + name);
}
public void writeGetter(PrintWriter pw) {
if (getter == false)
return;
if (getterScope.equals("public"))
pw.print("\tpublic ");
else
pw.print("\tprivate ");
pw.println(type + " get" + name.substring(0,1).toUpperCase() + name.substring(1) + "() {");
pw.println("\t\treturn " + name + ";");
pw.println("\t}");
}
public void writeSetter(PrintWriter pw) {
if (setter == false)
return;
if (setterScope.equals("public"))
pw.print("\tpublic ");
else
pw.print("\tprivate ");
pw.println(type + " set" + name.substring(0,1).toUpperCase() + name.substring(1) + "(" + type + " " + name + ") {");
pw.println("\t\tthis." + name + " = " + name + ";");
pw.println("\t}");
}
@Override
public String toString() {
return String.format(
"ClassProperty [name=%s, type=%s, scope=%s, getter=%s, getterScope=%s, setter=%s, SetterScope=%s]",
name, type, scope, getter, getterScope, setter, setterScope);
}
//============================================= Getters and Setters
public String getName() {
return name;
}
public String getType() {
return type;
}
public String getScope() {
return scope;
}
public boolean isGetter() {
return getter;
}
public String getGetterScope() {
return getterScope;
}
public boolean isSetter() {
return setter;
}
public String getSetterScope() {
return setterScope;
}
public void setName(String name) {
this.name = name;
}
public void setType(String type) {
this.type = type;
}
public void setScope(String scope) {
this.scope = scope;
}
public void setGetter(boolean getter) {
this.getter = getter;
}
public void setGetterScope(String getterScope) {
this.getterScope = getterScope;
}
public void setSetter(boolean setter) {
this.setter = setter;
}
public void setSetterScope(String setterScope) {
this.setterScope = setterScope;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.monitor;
import java.io.IOException;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.*;
import org.apache.lucene.analysis.CharArraySet;
final class SuffixingNGramTokenFilter extends TokenFilter {
private final String suffix;
private final int maxTokenLength;
private final String anyToken;
private char[] curTermBuffer;
private int curTermLength;
private int curCodePointCount;
private int curGramSize;
private int curPos;
private int curPosInc, curPosLen;
private int tokStart;
private int tokEnd;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final PositionIncrementAttribute posIncAtt;
private final PositionLengthAttribute posLenAtt;
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
private final KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class);
private final CharArraySet seenSuffixes = new CharArraySet(1024, false);
private final CharArraySet seenInfixes = new CharArraySet(1024, false);
/**
* Creates SuffixingNGramTokenFilter.
*
* @param input {@link org.apache.lucene.analysis.TokenStream} holding the input to be tokenized
* @param suffix a string to suffix to all ngrams
* @param wildcardToken a token to emit if the input token is longer than maxTokenLength
* @param maxTokenLength tokens longer than this will not be ngrammed
*/
public SuffixingNGramTokenFilter(TokenStream input, String suffix, String wildcardToken, int maxTokenLength) {
super(input);
this.suffix = suffix;
this.anyToken = wildcardToken;
this.maxTokenLength = maxTokenLength;
posIncAtt = addAttribute(PositionIncrementAttribute.class);
posLenAtt = addAttribute(PositionLengthAttribute.class);
}
/**
* Returns the next token in the stream, or null at EOS.
*/
@Override
public final boolean incrementToken() throws IOException {
while (true) {
if (curTermBuffer == null) {
if (!input.incrementToken()) {
return false;
}
if (keywordAtt.isKeyword())
return true;
curTermBuffer = termAtt.buffer().clone();
curTermLength = termAtt.length();
curCodePointCount = Character.codePointCount(termAtt, 0, termAtt.length());
curGramSize = curTermLength;
curPos = 0;
curPosInc = posIncAtt.getPositionIncrement();
curPosLen = posLenAtt.getPositionLength();
tokStart = offsetAtt.startOffset();
tokEnd = offsetAtt.endOffset();
//termAtt.setEmpty().append(suffix);
return true;
}
if (curTermLength > maxTokenLength) {
clearAttributes();
termAtt.append(anyToken);
curTermBuffer = null;
return true;
}
if (curGramSize == 0) {
++curPos;
curGramSize = curTermLength - curPos;
}
if (curGramSize >= 0 && (curPos + curGramSize) <= curCodePointCount) {
clearAttributes();
final int start = Character.offsetByCodePoints(curTermBuffer, 0, curTermLength, 0, curPos);
final int end = Character.offsetByCodePoints(curTermBuffer, 0, curTermLength, start, curGramSize);
termAtt.copyBuffer(curTermBuffer, start, end - start);
termAtt.append(suffix);
if ((curGramSize == curTermLength - curPos) && !seenSuffixes.add(termAtt.subSequence(0, termAtt.length()))) {
curTermBuffer = null;
continue;
}
if (!seenInfixes.add(termAtt.subSequence(0, termAtt.length()))) {
curGramSize = 0;
continue;
}
posIncAtt.setPositionIncrement(curPosInc);
curPosInc = 0;
posLenAtt.setPositionLength(curPosLen);
offsetAtt.setOffset(tokStart, tokEnd);
curGramSize--;
return true;
}
curTermBuffer = null;
}
}
@Override
public void reset() throws IOException {
super.reset();
curTermBuffer = null;
seenInfixes.clear();
seenSuffixes.clear();
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
import com.amazonaws.services.s3.model.CopyObjectRequest;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.MultiObjectDeleteException;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.PutObjectResult;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams;
import com.amazonaws.services.s3.model.SSECustomerKey;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.amazonaws.services.s3.model.UploadPartResult;
import com.amazonaws.services.s3.transfer.Copy;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerConfiguration;
import com.amazonaws.services.s3.transfer.Upload;
import com.amazonaws.event.ProgressListener;
import com.amazonaws.event.ProgressEvent;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ListeningExecutorService;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.GlobalStorageStatistics;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StorageStatistics;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata;
import org.apache.hadoop.fs.s3a.s3guard.MetadataStoreListFilesIterator;
import org.apache.hadoop.fs.s3a.s3guard.MetadataStore;
import org.apache.hadoop.fs.s3a.s3guard.PathMetadata;
import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
import org.apache.hadoop.fs.s3native.S3xLoginHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.SemaphoredDelegatingExecutor;
import static org.apache.hadoop.fs.s3a.Constants.*;
import static org.apache.hadoop.fs.s3a.Listing.ACCEPT_ALL;
import static org.apache.hadoop.fs.s3a.S3AUtils.*;
import static org.apache.hadoop.fs.s3a.Statistic.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The core S3A Filesystem implementation.
*
* This subclass is marked as private as code should not be creating it
* directly; use {@link FileSystem#get(Configuration)} and variants to
* create one.
*
* If cast to {@code S3AFileSystem}, extra methods and features may be accessed.
* Consider those private and unstable.
*
* Because it prints some of the state of the instrumentation,
* the output of {@link #toString()} must also be considered unstable.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class S3AFileSystem extends FileSystem {
/**
* Default blocksize as used in blocksize and FS status queries.
*/
public static final int DEFAULT_BLOCKSIZE = 32 * 1024 * 1024;
private URI uri;
private Path workingDir;
private String username;
private AmazonS3 s3;
private String bucket;
private int maxKeys;
private Listing listing;
private long partSize;
private boolean enableMultiObjectsDelete;
private TransferManager transfers;
private ListeningExecutorService boundedThreadPool;
private ExecutorService unboundedThreadPool;
private long multiPartThreshold;
public static final Logger LOG = LoggerFactory.getLogger(S3AFileSystem.class);
private static final Logger PROGRESS =
LoggerFactory.getLogger("org.apache.hadoop.fs.s3a.S3AFileSystem.Progress");
private LocalDirAllocator directoryAllocator;
private CannedAccessControlList cannedACL;
private S3AEncryptionMethods serverSideEncryptionAlgorithm;
private S3AInstrumentation instrumentation;
private S3AStorageStatistics storageStatistics;
private long readAhead;
private S3AInputPolicy inputPolicy;
private final AtomicBoolean closed = new AtomicBoolean(false);
private MetadataStore metadataStore;
private boolean allowAuthoritative;
// The maximum number of entries that can be deleted in any call to s3
private static final int MAX_ENTRIES_TO_DELETE = 1000;
private boolean blockUploadEnabled;
private String blockOutputBuffer;
private S3ADataBlocks.BlockFactory blockFactory;
private int blockOutputActiveBlocks;
/** Add any deprecated keys. */
@SuppressWarnings("deprecation")
private static void addDeprecatedKeys() {
Configuration.addDeprecations(
new Configuration.DeprecationDelta[]{
// never shipped in an ASF release, but did get into the wild.
new Configuration.DeprecationDelta(
OLD_S3A_SERVER_SIDE_ENCRYPTION_KEY,
SERVER_SIDE_ENCRYPTION_KEY)
});
Configuration.reloadExistingConfigurations();
}
static {
addDeprecatedKeys();
}
/** Called after a new FileSystem instance is constructed.
* @param name a uri whose authority section names the host, port, etc.
* for this FileSystem
* @param originalConf the configuration to use for the FS. The
* bucket-specific options are patched over the base ones before any use is
* made of the config.
*/
public void initialize(URI name, Configuration originalConf)
throws IOException {
uri = S3xLoginHelper.buildFSURI(name);
// get the host; this is guaranteed to be non-null, non-empty
bucket = name.getHost();
// clone the configuration into one with propagated bucket options
Configuration conf = propagateBucketOptions(originalConf, bucket);
patchSecurityCredentialProviders(conf);
super.initialize(name, conf);
setConf(conf);
try {
instrumentation = new S3AInstrumentation(name);
// Username is the current user at the time the FS was instantiated.
username = UserGroupInformation.getCurrentUser().getShortUserName();
workingDir = new Path("/user", username)
.makeQualified(this.uri, this.getWorkingDirectory());
Class<? extends S3ClientFactory> s3ClientFactoryClass = conf.getClass(
S3_CLIENT_FACTORY_IMPL, DEFAULT_S3_CLIENT_FACTORY_IMPL,
S3ClientFactory.class);
s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf)
.createS3Client(name);
maxKeys = intOption(conf, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS, 1);
listing = new Listing(this);
partSize = getMultipartSizeProperty(conf,
MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
multiPartThreshold = getMultipartSizeProperty(conf,
MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
//check but do not store the block size
longBytesOption(conf, FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE, 1);
enableMultiObjectsDelete = conf.getBoolean(ENABLE_MULTI_DELETE, true);
readAhead = longBytesOption(conf, READAHEAD_RANGE,
DEFAULT_READAHEAD_RANGE, 0);
storageStatistics = (S3AStorageStatistics)
GlobalStorageStatistics.INSTANCE
.put(S3AStorageStatistics.NAME,
new GlobalStorageStatistics.StorageStatisticsProvider() {
@Override
public StorageStatistics provide() {
return new S3AStorageStatistics();
}
});
int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
if (maxThreads < 2) {
LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
maxThreads = 2;
}
int totalTasks = intOption(conf,
MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS, 1);
long keepAliveTime = longOption(conf, KEEPALIVE_TIME,
DEFAULT_KEEPALIVE_TIME, 0);
boundedThreadPool = BlockingThreadPoolExecutorService.newInstance(
maxThreads,
maxThreads + totalTasks,
keepAliveTime, TimeUnit.SECONDS,
"s3a-transfer-shared");
unboundedThreadPool = new ThreadPoolExecutor(
maxThreads, Integer.MAX_VALUE,
keepAliveTime, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
BlockingThreadPoolExecutorService.newDaemonThreadFactory(
"s3a-transfer-unbounded"));
initTransferManager();
initCannedAcls(conf);
verifyBucketExists();
initMultipartUploads(conf);
serverSideEncryptionAlgorithm = getEncryptionAlgorithm(conf);
inputPolicy = S3AInputPolicy.getPolicy(
conf.getTrimmed(INPUT_FADVISE, INPUT_FADV_NORMAL));
blockUploadEnabled = conf.getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD);
if (blockUploadEnabled) {
blockOutputBuffer = conf.getTrimmed(FAST_UPLOAD_BUFFER,
DEFAULT_FAST_UPLOAD_BUFFER);
partSize = ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
blockFactory = S3ADataBlocks.createFactory(this, blockOutputBuffer);
blockOutputActiveBlocks = intOption(conf,
FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS, 1);
LOG.debug("Using S3ABlockOutputStream with buffer = {}; block={};" +
" queue limit={}",
blockOutputBuffer, partSize, blockOutputActiveBlocks);
} else {
LOG.debug("Using S3AOutputStream");
}
metadataStore = S3Guard.getMetadataStore(this);
allowAuthoritative = conf.getBoolean(METADATASTORE_AUTHORITATIVE,
DEFAULT_METADATASTORE_AUTHORITATIVE);
if (hasMetadataStore()) {
LOG.debug("Using metadata store {}, authoritative={}",
getMetadataStore(), allowAuthoritative);
}
} catch (AmazonClientException e) {
throw translateException("initializing ", new Path(name), e);
}
}
/**
* Verify that the bucket exists. This does not check permissions,
* not even read access.
* @throws FileNotFoundException the bucket is absent
* @throws IOException any other problem talking to S3
*/
protected void verifyBucketExists()
throws FileNotFoundException, IOException {
try {
if (!s3.doesBucketExist(bucket)) {
throw new FileNotFoundException("Bucket " + bucket + " does not exist");
}
} catch (AmazonS3Exception e) {
// this is a sign of a serious startup problem so do dump everything
LOG.warn(stringify(e), e);
throw translateException("doesBucketExist", bucket, e);
} catch (AmazonServiceException e) {
// this is a sign of a serious startup problem so do dump everything
LOG.warn(stringify(e), e);
throw translateException("doesBucketExist", bucket, e);
} catch (AmazonClientException e) {
throw translateException("doesBucketExist", bucket, e);
}
}
/**
* Get S3A Instrumentation. For test purposes.
* @return this instance's instrumentation.
*/
public S3AInstrumentation getInstrumentation() {
return instrumentation;
}
private void initTransferManager() {
TransferManagerConfiguration transferConfiguration =
new TransferManagerConfiguration();
transferConfiguration.setMinimumUploadPartSize(partSize);
transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);
transferConfiguration.setMultipartCopyPartSize(partSize);
transferConfiguration.setMultipartCopyThreshold(multiPartThreshold);
transfers = new TransferManager(s3, unboundedThreadPool);
transfers.setConfiguration(transferConfiguration);
}
private void initCannedAcls(Configuration conf) {
String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL);
if (!cannedACLName.isEmpty()) {
cannedACL = CannedAccessControlList.valueOf(cannedACLName);
} else {
cannedACL = null;
}
}
private void initMultipartUploads(Configuration conf) throws IOException {
boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART,
DEFAULT_PURGE_EXISTING_MULTIPART);
long purgeExistingMultipartAge = longOption(conf,
PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE, 0);
if (purgeExistingMultipart) {
Date purgeBefore =
new Date(new Date().getTime() - purgeExistingMultipartAge * 1000);
try {
transfers.abortMultipartUploads(bucket, purgeBefore);
} catch (AmazonServiceException e) {
if (e.getStatusCode() == 403) {
instrumentation.errorIgnored();
LOG.debug("Failed to purging multipart uploads against {}," +
" FS may be read only", bucket, e);
} else {
throw translateException("purging multipart uploads", bucket, e);
}
}
}
}
/**
* Return the protocol scheme for the FileSystem.
*
* @return "s3a"
*/
@Override
public String getScheme() {
return "s3a";
}
/**
* Returns a URI whose scheme and authority identify this FileSystem.
*/
@Override
public URI getUri() {
return uri;
}
@Override
public int getDefaultPort() {
return Constants.S3A_DEFAULT_PORT;
}
/**
* Returns the S3 client used by this filesystem.
* @return AmazonS3Client
*/
AmazonS3 getAmazonS3Client() {
return s3;
}
/**
* Get the region of a bucket.
* @return the region in which a bucket is located
* @throws IOException on any failure.
*/
public String getBucketLocation() throws IOException {
return getBucketLocation(bucket);
}
/**
* Get the region of a bucket.
* @param bucketName the name of the bucket
* @return the region in which a bucket is located
* @throws IOException on any failure.
*/
public String getBucketLocation(String bucketName) throws IOException {
try {
return s3.getBucketLocation(bucketName);
} catch (AmazonClientException e) {
throw translateException("getBucketLocation()",
bucketName, e);
}
}
/**
* Returns the read ahead range value used by this filesystem
* @return
*/
@VisibleForTesting
long getReadAheadRange() {
return readAhead;
}
/**
* Get the input policy for this FS instance.
* @return the input policy
*/
@InterfaceStability.Unstable
public S3AInputPolicy getInputPolicy() {
return inputPolicy;
}
/**
* Demand create the directory allocator, then create a temporary file.
* {@link LocalDirAllocator#createTmpFileForWrite(String, long, Configuration)}.
* @param pathStr prefix for the temporary file
* @param size the size of the file that is going to be written
* @param conf the Configuration object
* @return a unique temporary file
* @throws IOException IO problems
*/
synchronized File createTmpFileForWrite(String pathStr, long size,
Configuration conf) throws IOException {
if (directoryAllocator == null) {
String bufferDir = conf.get(BUFFER_DIR) != null
? BUFFER_DIR : "hadoop.tmp.dir";
directoryAllocator = new LocalDirAllocator(bufferDir);
}
return directoryAllocator.createTmpFileForWrite(pathStr, size, conf);
}
/**
* Get the bucket of this filesystem.
* @return the bucket
*/
public String getBucket() {
return bucket;
}
/**
* Change the input policy for this FS.
* @param inputPolicy new policy
*/
@InterfaceStability.Unstable
public void setInputPolicy(S3AInputPolicy inputPolicy) {
Objects.requireNonNull(inputPolicy, "Null inputStrategy");
LOG.debug("Setting input strategy: {}", inputPolicy);
this.inputPolicy = inputPolicy;
}
/**
* Turns a path (relative or otherwise) into an S3 key.
*
* @param path input path, may be relative to the working dir
* @return a key excluding the leading "/", or, if it is the root path, ""
*/
@VisibleForTesting
public String pathToKey(Path path) {
if (!path.isAbsolute()) {
path = new Path(workingDir, path);
}
if (path.toUri().getScheme() != null && path.toUri().getPath().isEmpty()) {
return "";
}
return path.toUri().getPath().substring(1);
}
/**
* Turns a path (relative or otherwise) into an S3 key, adding a trailing
* "/" if the path is not the root <i>and</i> does not already have a "/"
* at the end.
*
* @param key s3 key or ""
* @return the with a trailing "/", or, if it is the root key, "",
*/
private String maybeAddTrailingSlash(String key) {
if (!key.isEmpty() && !key.endsWith("/")) {
return key + '/';
} else {
return key;
}
}
/**
* Convert a path back to a key.
* @param key input key
* @return the path from this key
*/
private Path keyToPath(String key) {
return new Path("/" + key);
}
/**
* Convert a key to a fully qualified path.
* @param key input key
* @return the fully qualified path including URI scheme and bucket name.
*/
Path keyToQualifiedPath(String key) {
return qualify(keyToPath(key));
}
/**
* Qualify a path.
* @param path path to qualify
* @return a qualified path.
*/
public Path qualify(Path path) {
return path.makeQualified(uri, workingDir);
}
/**
* Check that a Path belongs to this FileSystem.
* Unlike the superclass, this version does not look at authority,
* only hostnames.
* @param path to check
* @throws IllegalArgumentException if there is an FS mismatch
*/
@Override
public void checkPath(Path path) {
S3xLoginHelper.checkPath(getConf(), getUri(), path, getDefaultPort());
}
@Override
protected URI canonicalizeUri(URI rawUri) {
return S3xLoginHelper.canonicalizeUri(rawUri, getDefaultPort());
}
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
public FSDataInputStream open(Path f, int bufferSize)
throws IOException {
LOG.debug("Opening '{}' for reading.", f);
final FileStatus fileStatus = getFileStatus(f);
if (fileStatus.isDirectory()) {
throw new FileNotFoundException("Can't open " + f
+ " because it is a directory");
}
return new FSDataInputStream(
new S3AInputStream(new S3ObjectAttributes(
bucket,
pathToKey(f),
serverSideEncryptionAlgorithm,
getServerSideEncryptionKey(getConf())),
fileStatus.getLen(),
s3,
statistics,
instrumentation,
readAhead,
inputPolicy));
}
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* @param f the file name to open
* @param permission the permission to set.
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize the requested block size.
* @param progress the progress reporter.
* @throws IOException in the event of IO related errors.
* @see #setPermission(Path, FsPermission)
*/
@Override
@SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
String key = pathToKey(f);
FileStatus status = null;
try {
// get the status or throw an FNFE
status = getFileStatus(f);
// if the thread reaches here, there is something at the path
if (status.isDirectory()) {
// path references a directory: automatic error
throw new FileAlreadyExistsException(f + " is a directory");
}
if (!overwrite) {
// path references a file and overwrite is disabled
throw new FileAlreadyExistsException(f + " already exists");
}
LOG.debug("Overwriting file {}", f);
} catch (FileNotFoundException e) {
// this means the file is not found
}
instrumentation.fileCreated();
FSDataOutputStream output;
if (blockUploadEnabled) {
output = new FSDataOutputStream(
new S3ABlockOutputStream(this,
key,
new SemaphoredDelegatingExecutor(boundedThreadPool,
blockOutputActiveBlocks, true),
progress,
partSize,
blockFactory,
instrumentation.newOutputStreamStatistics(statistics),
new WriteOperationHelper(key)
),
null);
} else {
// We pass null to FSDataOutputStream so it won't count writes that
// are being buffered to a file
output = new FSDataOutputStream(
new S3AOutputStream(getConf(),
this,
key,
progress
),
null);
}
return output;
}
/**
* {@inheritDoc}
* @throws FileNotFoundException if the parent directory is not present -or
* is not a directory.
*/
@Override
public FSDataOutputStream createNonRecursive(Path path,
FsPermission permission,
EnumSet<CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
Progressable progress) throws IOException {
Path parent = path.getParent();
if (parent != null) {
// expect this to raise an exception if there is no parent
if (!getFileStatus(parent).isDirectory()) {
throw new FileAlreadyExistsException("Not a directory: " + parent);
}
}
return create(path, permission,
flags.contains(CreateFlag.OVERWRITE), bufferSize,
replication, blockSize, progress);
}
/**
* Append to an existing file (optional operation).
* @param f the existing file to be appended.
* @param bufferSize the size of the buffer to be used.
* @param progress for reporting progress if it is not null.
* @throws IOException indicating that append is not supported.
*/
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*
* Warning: S3 does not support renames. This method does a copy which can
* take S3 some time to execute with large files and directories. Since
* there is no Progressable passed in, this can time out jobs.
*
* Note: This implementation differs with other S3 drivers. Specifically:
* <pre>
* Fails if src is a file and dst is a directory.
* Fails if src is a directory and dst is a file.
* Fails if the parent of dst does not exist or is a file.
* Fails if dst is a directory that is not empty.
* </pre>
*
* @param src path to be renamed
* @param dst new path after rename
* @throws IOException on IO failure
* @return true if rename is successful
*/
public boolean rename(Path src, Path dst) throws IOException {
try {
return innerRename(src, dst);
} catch (AmazonClientException e) {
throw translateException("rename(" + src +", " + dst + ")", src, e);
} catch (RenameFailedException e) {
LOG.debug(e.getMessage());
return e.getExitCode();
} catch (FileNotFoundException e) {
LOG.debug(e.toString());
return false;
}
}
/**
* The inner rename operation. See {@link #rename(Path, Path)} for
* the description of the operation.
* This operation throws an exception on any failure which needs to be
* reported and downgraded to a failure. That is: if a rename
* @param source path to be renamed
* @param dest new path after rename
* @throws RenameFailedException if some criteria for a state changing
* rename was not met. This means work didn't happen; it's not something
* which is reported upstream to the FileSystem APIs, for which the semantics
* of "false" are pretty vague.
* @throws FileNotFoundException there's no source file.
* @throws IOException on IO failure.
* @throws AmazonClientException on failures inside the AWS SDK
*/
private boolean innerRename(Path source, Path dest)
throws RenameFailedException, FileNotFoundException, IOException,
AmazonClientException {
Path src = qualify(source);
Path dst = qualify(dest);
LOG.debug("Rename path {} to {}", src, dst);
incrementStatistic(INVOCATION_RENAME);
String srcKey = pathToKey(src);
String dstKey = pathToKey(dst);
if (srcKey.isEmpty()) {
throw new RenameFailedException(src, dst, "source is root directory");
}
if (dstKey.isEmpty()) {
throw new RenameFailedException(src, dst, "dest is root directory");
}
// get the source file status; this raises a FNFE if there is no source
// file.
S3AFileStatus srcStatus = innerGetFileStatus(src, true);
if (srcKey.equals(dstKey)) {
LOG.debug("rename: src and dest refer to the same file or directory: {}",
dst);
throw new RenameFailedException(src, dst,
"source and dest refer to the same file or directory")
.withExitCode(srcStatus.isFile());
}
S3AFileStatus dstStatus = null;
try {
dstStatus = innerGetFileStatus(dst, true);
// if there is no destination entry, an exception is raised.
// hence this code sequence can assume that there is something
// at the end of the path; the only detail being what it is and
// whether or not it can be the destination of the rename.
if (srcStatus.isDirectory()) {
if (dstStatus.isFile()) {
throw new RenameFailedException(src, dst,
"source is a directory and dest is a file")
.withExitCode(srcStatus.isFile());
} else if (dstStatus.isEmptyDirectory() != Tristate.TRUE) {
throw new RenameFailedException(src, dst,
"Destination is a non-empty directory")
.withExitCode(false);
}
// at this point the destination is an empty directory
} else {
// source is a file. The destination must be a directory,
// empty or not
if (dstStatus.isFile()) {
throw new RenameFailedException(src, dst,
"Cannot rename onto an existing file")
.withExitCode(false);
}
}
} catch (FileNotFoundException e) {
LOG.debug("rename: destination path {} not found", dst);
// Parent must exist
Path parent = dst.getParent();
if (!pathToKey(parent).isEmpty()) {
try {
S3AFileStatus dstParentStatus = innerGetFileStatus(dst.getParent(),
false);
if (!dstParentStatus.isDirectory()) {
throw new RenameFailedException(src, dst,
"destination parent is not a directory");
}
} catch (FileNotFoundException e2) {
throw new RenameFailedException(src, dst,
"destination has no parent ");
}
}
}
// If we have a MetadataStore, track deletions/creations.
Collection<Path> srcPaths = null;
List<PathMetadata> dstMetas = null;
if (hasMetadataStore()) {
srcPaths = new HashSet<>(); // srcPaths need fast look up before put
dstMetas = new ArrayList<>();
}
// TODO S3Guard HADOOP-13761: retries when source paths are not visible yet
// TODO S3Guard: performance: mark destination dirs as authoritative
// Ok! Time to start
if (srcStatus.isFile()) {
LOG.debug("rename: renaming file {} to {}", src, dst);
long length = srcStatus.getLen();
if (dstStatus != null && dstStatus.isDirectory()) {
String newDstKey = dstKey;
if (!newDstKey.endsWith("/")) {
newDstKey = newDstKey + "/";
}
String filename =
srcKey.substring(pathToKey(src.getParent()).length()+1);
newDstKey = newDstKey + filename;
copyFile(srcKey, newDstKey, length);
S3Guard.addMoveFile(metadataStore, srcPaths, dstMetas, src,
keyToQualifiedPath(newDstKey), length, getDefaultBlockSize(dst),
username);
} else {
copyFile(srcKey, dstKey, srcStatus.getLen());
S3Guard.addMoveFile(metadataStore, srcPaths, dstMetas, src, dst,
length, getDefaultBlockSize(dst), username);
}
innerDelete(srcStatus, false);
} else {
LOG.debug("rename: renaming directory {} to {}", src, dst);
// This is a directory to directory copy
if (!dstKey.endsWith("/")) {
dstKey = dstKey + "/";
}
if (!srcKey.endsWith("/")) {
srcKey = srcKey + "/";
}
//Verify dest is not a child of the source directory
if (dstKey.startsWith(srcKey)) {
throw new RenameFailedException(srcKey, dstKey,
"cannot rename a directory to a subdirectory of itself ");
}
List<DeleteObjectsRequest.KeyVersion> keysToDelete = new ArrayList<>();
if (dstStatus != null && dstStatus.isEmptyDirectory() == Tristate.TRUE) {
// delete unnecessary fake directory.
keysToDelete.add(new DeleteObjectsRequest.KeyVersion(dstKey));
}
Path parentPath = keyToPath(srcKey);
RemoteIterator<LocatedFileStatus> iterator = listFilesAndEmptyDirectories(
parentPath, true);
while (iterator.hasNext()) {
LocatedFileStatus status = iterator.next();
long length = status.getLen();
String key = pathToKey(status.getPath());
if (status.isDirectory() && !key.endsWith("/")) {
key += "/";
}
keysToDelete
.add(new DeleteObjectsRequest.KeyVersion(key));
String newDstKey =
dstKey + key.substring(srcKey.length());
copyFile(key, newDstKey, length);
if (hasMetadataStore()) {
// with a metadata store, the object entries need to be updated,
// including, potentially, the ancestors
Path childSrc = keyToQualifiedPath(key);
Path childDst = keyToQualifiedPath(newDstKey);
if (objectRepresentsDirectory(key, length)) {
S3Guard.addMoveDir(metadataStore, srcPaths, dstMetas, childSrc,
childDst, username);
} else {
S3Guard.addMoveFile(metadataStore, srcPaths, dstMetas, childSrc,
childDst, length, getDefaultBlockSize(childDst), username);
}
// Ancestor directories may not be listed, so we explicitly add them
S3Guard.addMoveAncestors(metadataStore, srcPaths, dstMetas,
keyToQualifiedPath(srcKey), childSrc, childDst, username);
}
if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) {
removeKeys(keysToDelete, true, false);
}
}
if (!keysToDelete.isEmpty()) {
removeKeys(keysToDelete, false, false);
}
// We moved all the children, now move the top-level dir
// Empty directory should have been added as the object summary
if (hasMetadataStore()
&& srcPaths != null
&& !srcPaths.contains(src)) {
LOG.debug("To move the non-empty top-level dir src={} and dst={}",
src, dst);
S3Guard.addMoveDir(metadataStore, srcPaths, dstMetas, src, dst,
username);
}
}
metadataStore.move(srcPaths, dstMetas);
if (src.getParent() != dst.getParent()) {
deleteUnnecessaryFakeDirectories(dst.getParent());
createFakeDirectoryIfNecessary(src.getParent());
}
return true;
}
/**
* Low-level call to get at the object metadata.
* @param path path to the object
* @return metadata
* @throws IOException IO and object access problems.
*/
@VisibleForTesting
public ObjectMetadata getObjectMetadata(Path path) throws IOException {
return getObjectMetadata(pathToKey(path));
}
/**
* Does this Filesystem have a metadata store?
* @return true iff the FS has been instantiated with a metadata store
*/
public boolean hasMetadataStore() {
return !S3Guard.isNullMetadataStore(metadataStore);
}
/**
* Get the metadata store.
* This will always be non-null, but may be bound to the
* {@code NullMetadataStore}.
* @return the metadata store of this FS instance
*/
@VisibleForTesting
public MetadataStore getMetadataStore() {
return metadataStore;
}
/** For testing only. See ITestS3GuardEmptyDirs. */
@VisibleForTesting
void setMetadataStore(MetadataStore ms) {
metadataStore = ms;
}
/**
* Increment a statistic by 1.
* @param statistic The operation to increment
*/
protected void incrementStatistic(Statistic statistic) {
incrementStatistic(statistic, 1);
}
/**
* Increment a statistic by a specific value.
* @param statistic The operation to increment
* @param count the count to increment
*/
protected void incrementStatistic(Statistic statistic, long count) {
instrumentation.incrementCounter(statistic, count);
storageStatistics.incrementCounter(statistic, count);
}
/**
* Decrement a gauge by a specific value.
* @param statistic The operation to decrement
* @param count the count to decrement
*/
protected void decrementGauge(Statistic statistic, long count) {
instrumentation.decrementGauge(statistic, count);
}
/**
* Increment a gauge by a specific value.
* @param statistic The operation to increment
* @param count the count to increment
*/
protected void incrementGauge(Statistic statistic, long count) {
instrumentation.incrementGauge(statistic, count);
}
/**
* Get the storage statistics of this filesystem.
* @return the storage statistics
*/
@Override
public S3AStorageStatistics getStorageStatistics() {
return storageStatistics;
}
/**
* Request object metadata; increments counters in the process.
* @param key key
* @return the metadata
*/
protected ObjectMetadata getObjectMetadata(String key) {
incrementStatistic(OBJECT_METADATA_REQUESTS);
GetObjectMetadataRequest request =
new GetObjectMetadataRequest(bucket, key);
//SSE-C requires to be filled in if enabled for object metadata
if(S3AEncryptionMethods.SSE_C.equals(serverSideEncryptionAlgorithm) &&
StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))){
request.setSSECustomerKey(generateSSECustomerKey());
}
ObjectMetadata meta = s3.getObjectMetadata(request);
incrementReadOperations();
return meta;
}
/**
* Initiate a {@code listObjects} operation, incrementing metrics
* in the process.
* @param request request to initiate
* @return the results
*/
protected ObjectListing listObjects(ListObjectsRequest request) {
incrementStatistic(OBJECT_LIST_REQUESTS);
incrementReadOperations();
return s3.listObjects(request);
}
/**
* List the next set of objects.
* @param objects paged result
* @return the next result object
*/
protected ObjectListing continueListObjects(ObjectListing objects) {
incrementStatistic(OBJECT_CONTINUE_LIST_REQUESTS);
incrementReadOperations();
return s3.listNextBatchOfObjects(objects);
}
/**
* Increment read operations.
*/
public void incrementReadOperations() {
statistics.incrementReadOps(1);
}
/**
* Increment the write operation counter.
* This is somewhat inaccurate, as it appears to be invoked more
* often than needed in progress callbacks.
*/
public void incrementWriteOperations() {
statistics.incrementWriteOps(1);
}
/**
* Delete an object.
* Increments the {@code OBJECT_DELETE_REQUESTS} and write
* operation statistics.
* @param key key to blob to delete.
*/
private void deleteObject(String key) throws InvalidRequestException {
blockRootDelete(key);
incrementWriteOperations();
incrementStatistic(OBJECT_DELETE_REQUESTS);
s3.deleteObject(bucket, key);
}
/**
* Reject any request to delete an object where the key is root.
* @param key key to validate
* @throws InvalidRequestException if the request was rejected due to
* a mistaken attempt to delete the root directory.
*/
private void blockRootDelete(String key) throws InvalidRequestException {
if (key.isEmpty() || "/".equals(key)) {
throw new InvalidRequestException("Bucket "+ bucket
+" cannot be deleted");
}
}
/**
* Perform a bulk object delete operation.
* Increments the {@code OBJECT_DELETE_REQUESTS} and write
* operation statistics.
* @param deleteRequest keys to delete on the s3-backend
* @throws MultiObjectDeleteException one or more of the keys could not
* be deleted.
* @throws AmazonClientException amazon-layer failure.
*/
private void deleteObjects(DeleteObjectsRequest deleteRequest)
throws MultiObjectDeleteException, AmazonClientException {
incrementWriteOperations();
incrementStatistic(OBJECT_DELETE_REQUESTS, 1);
try {
s3.deleteObjects(deleteRequest);
} catch (MultiObjectDeleteException e) {
// one or more of the operations failed.
List<MultiObjectDeleteException.DeleteError> errors = e.getErrors();
LOG.error("Partial failure of delete, {} errors", errors.size(), e);
for (MultiObjectDeleteException.DeleteError error : errors) {
LOG.error("{}: \"{}\" - {}",
error.getKey(), error.getCode(), error.getMessage());
}
throw e;
}
}
/**
* Create a putObject request.
* Adds the ACL and metadata
* @param key key of object
* @param metadata metadata header
* @param srcfile source file
* @return the request
*/
public PutObjectRequest newPutObjectRequest(String key,
ObjectMetadata metadata, File srcfile) {
Preconditions.checkNotNull(srcfile);
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
srcfile);
setOptionalPutRequestParameters(putObjectRequest);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setMetadata(metadata);
return putObjectRequest;
}
/**
* Create a {@link PutObjectRequest} request.
* The metadata is assumed to have been configured with the size of the
* operation.
* @param key key of object
* @param metadata metadata header
* @param inputStream source data.
* @return the request
*/
PutObjectRequest newPutObjectRequest(String key,
ObjectMetadata metadata,
InputStream inputStream) {
Preconditions.checkNotNull(inputStream);
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
inputStream, metadata);
setOptionalPutRequestParameters(putObjectRequest);
putObjectRequest.setCannedAcl(cannedACL);
return putObjectRequest;
}
/**
* Create a new object metadata instance.
* Any standard metadata headers are added here, for example:
* encryption.
* @return a new metadata instance
*/
public ObjectMetadata newObjectMetadata() {
final ObjectMetadata om = new ObjectMetadata();
setOptionalObjectMetadata(om);
return om;
}
/**
* Create a new object metadata instance.
* Any standard metadata headers are added here, for example:
* encryption.
*
* @param length length of data to set in header.
* @return a new metadata instance
*/
public ObjectMetadata newObjectMetadata(long length) {
final ObjectMetadata om = newObjectMetadata();
if (length >= 0) {
om.setContentLength(length);
}
return om;
}
/**
* Start a transfer-manager managed async PUT of an object,
* incrementing the put requests and put bytes
* counters.
* It does not update the other counters,
* as existing code does that as progress callbacks come in.
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
* Because the operation is async, any stream supplied in the request
* must reference data (files, buffers) which stay valid until the upload
* completes.
* @param putObjectRequest the request
* @return the upload initiated
*/
public UploadInfo putObject(PutObjectRequest putObjectRequest) {
long len;
if (putObjectRequest.getFile() != null) {
len = putObjectRequest.getFile().length();
} else {
len = putObjectRequest.getMetadata().getContentLength();
}
incrementPutStartStatistics(len);
try {
Upload upload = transfers.upload(putObjectRequest);
incrementPutCompletedStatistics(true, len);
return new UploadInfo(upload, len);
} catch (AmazonClientException e) {
incrementPutCompletedStatistics(false, len);
throw e;
}
}
/**
* PUT an object directly (i.e. not via the transfer manager).
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
* <i>Important: this call will close any input stream in the request.</i>
* @param putObjectRequest the request
* @return the upload initiated
* @throws AmazonClientException on problems
*/
PutObjectResult putObjectDirect(PutObjectRequest putObjectRequest)
throws AmazonClientException {
long len = getPutRequestLength(putObjectRequest);
LOG.debug("PUT {} bytes to {}", len, putObjectRequest.getKey());
incrementPutStartStatistics(len);
try {
PutObjectResult result = s3.putObject(putObjectRequest);
incrementPutCompletedStatistics(true, len);
return result;
} catch (AmazonClientException e) {
incrementPutCompletedStatistics(false, len);
throw e;
}
}
/**
* Get the length of the PUT, verifying that the length is known.
* @param putObjectRequest a request bound to a file or a stream.
* @return the request length
* @throws IllegalArgumentException if the length is negative
*/
private long getPutRequestLength(PutObjectRequest putObjectRequest) {
long len;
if (putObjectRequest.getFile() != null) {
len = putObjectRequest.getFile().length();
} else {
len = putObjectRequest.getMetadata().getContentLength();
}
Preconditions.checkState(len >= 0, "Cannot PUT object of unknown length");
return len;
}
/**
* Upload part of a multi-partition file.
* Increments the write and put counters.
* <i>Important: this call does not close any input stream in the request.</i>
* @param request request
* @return the result of the operation.
* @throws AmazonClientException on problems
*/
public UploadPartResult uploadPart(UploadPartRequest request)
throws AmazonClientException {
long len = request.getPartSize();
incrementPutStartStatistics(len);
try {
UploadPartResult uploadPartResult = s3.uploadPart(request);
incrementPutCompletedStatistics(true, len);
return uploadPartResult;
} catch (AmazonClientException e) {
incrementPutCompletedStatistics(false, len);
throw e;
}
}
/**
* At the start of a put/multipart upload operation, update the
* relevant counters.
*
* @param bytes bytes in the request.
*/
public void incrementPutStartStatistics(long bytes) {
LOG.debug("PUT start {} bytes", bytes);
incrementWriteOperations();
incrementStatistic(OBJECT_PUT_REQUESTS);
incrementGauge(OBJECT_PUT_REQUESTS_ACTIVE, 1);
if (bytes > 0) {
incrementGauge(OBJECT_PUT_BYTES_PENDING, bytes);
}
}
/**
* At the end of a put/multipart upload operation, update the
* relevant counters and gauges.
*
* @param success did the operation succeed?
* @param bytes bytes in the request.
*/
public void incrementPutCompletedStatistics(boolean success, long bytes) {
LOG.debug("PUT completed success={}; {} bytes", success, bytes);
incrementWriteOperations();
if (bytes > 0) {
incrementStatistic(OBJECT_PUT_BYTES, bytes);
decrementGauge(OBJECT_PUT_BYTES_PENDING, bytes);
}
incrementStatistic(OBJECT_PUT_REQUESTS_COMPLETED);
decrementGauge(OBJECT_PUT_REQUESTS_ACTIVE, 1);
}
/**
* Callback for use in progress callbacks from put/multipart upload events.
* Increments those statistics which are expected to be updated during
* the ongoing upload operation.
* @param key key to file that is being written (for logging)
* @param bytes bytes successfully uploaded.
*/
public void incrementPutProgressStatistics(String key, long bytes) {
PROGRESS.debug("PUT {}: {} bytes", key, bytes);
incrementWriteOperations();
if (bytes > 0) {
statistics.incrementBytesWritten(bytes);
}
}
/**
* A helper method to delete a list of keys on a s3-backend.
*
* @param keysToDelete collection of keys to delete on the s3-backend.
* if empty, no request is made of the object store.
* @param clearKeys clears the keysToDelete-list after processing the list
* when set to true
* @param deleteFakeDir indicates whether this is for deleting fake dirs
* @throws InvalidRequestException if the request was rejected due to
* a mistaken attempt to delete the root directory.
* @throws MultiObjectDeleteException one or more of the keys could not
* be deleted in a multiple object delete operation.
* @throws AmazonClientException amazon-layer failure.
*/
@VisibleForTesting
void removeKeys(List<DeleteObjectsRequest.KeyVersion> keysToDelete,
boolean clearKeys, boolean deleteFakeDir)
throws MultiObjectDeleteException, AmazonClientException,
InvalidRequestException {
if (keysToDelete.isEmpty()) {
// exit fast if there are no keys to delete
return;
}
for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) {
blockRootDelete(keyVersion.getKey());
}
if (enableMultiObjectsDelete) {
deleteObjects(new DeleteObjectsRequest(bucket).withKeys(keysToDelete));
} else {
for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) {
deleteObject(keyVersion.getKey());
}
}
if (!deleteFakeDir) {
instrumentation.fileDeleted(keysToDelete.size());
} else {
instrumentation.fakeDirsDeleted(keysToDelete.size());
}
if (clearKeys) {
keysToDelete.clear();
}
}
/**
* Delete a Path. This operation is at least {@code O(files)}, with
* added overheads to enumerate the path. It is also not atomic.
*
* @param f the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException due to inability to delete a directory or file.
*/
public boolean delete(Path f, boolean recursive) throws IOException {
try {
return innerDelete(innerGetFileStatus(f, true), recursive);
} catch (FileNotFoundException e) {
LOG.debug("Couldn't delete {} - does not exist", f);
instrumentation.errorIgnored();
return false;
} catch (AmazonClientException e) {
throw translateException("delete", f, e);
}
}
/**
* Delete an object. See {@link #delete(Path, boolean)}.
*
* @param status fileStatus object
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException due to inability to delete a directory or file.
* @throws AmazonClientException on failures inside the AWS SDK
*/
private boolean innerDelete(S3AFileStatus status, boolean recursive)
throws IOException, AmazonClientException {
Path f = status.getPath();
LOG.debug("Delete path {} - recursive {}", f , recursive);
String key = pathToKey(f);
if (status.isDirectory()) {
LOG.debug("delete: Path is a directory: {}", f);
Preconditions.checkArgument(
status.isEmptyDirectory() != Tristate.UNKNOWN,
"File status must have directory emptiness computed");
if (!key.endsWith("/")) {
key = key + "/";
}
if (key.equals("/")) {
return rejectRootDirectoryDelete(status, recursive);
}
if (!recursive && status.isEmptyDirectory() == Tristate.FALSE) {
throw new PathIsNotEmptyDirectoryException(f.toString());
}
if (status.isEmptyDirectory() == Tristate.TRUE) {
LOG.debug("Deleting fake empty directory {}", key);
// HADOOP-13761 S3Guard: retries here
deleteObject(key);
metadataStore.delete(f);
instrumentation.directoryDeleted();
} else {
LOG.debug("Getting objects for directory prefix {} to delete", key);
ListObjectsRequest request = createListObjectsRequest(key, null);
ObjectListing objects = listObjects(request);
List<DeleteObjectsRequest.KeyVersion> keys =
new ArrayList<>(objects.getObjectSummaries().size());
while (true) {
for (S3ObjectSummary summary : objects.getObjectSummaries()) {
keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
LOG.debug("Got object to delete {}", summary.getKey());
if (keys.size() == MAX_ENTRIES_TO_DELETE) {
// TODO: HADOOP-13761 S3Guard: retries
removeKeys(keys, true, false);
}
}
if (objects.isTruncated()) {
objects = continueListObjects(objects);
} else {
if (!keys.isEmpty()) {
// TODO: HADOOP-13761 S3Guard: retries
removeKeys(keys, false, false);
}
break;
}
}
}
metadataStore.deleteSubtree(f);
} else {
LOG.debug("delete: Path is a file");
instrumentation.fileDeleted(1);
deleteObject(key);
metadataStore.delete(f);
}
Path parent = f.getParent();
if (parent != null) {
createFakeDirectoryIfNecessary(parent);
}
return true;
}
/**
* Implements the specific logic to reject root directory deletion.
* The caller must return the result of this call, rather than
* attempt to continue with the delete operation: deleting root
* directories is never allowed. This method simply implements
* the policy of when to return an exit code versus raise an exception.
* @param status filesystem status
* @param recursive recursive flag from command
* @return a return code for the operation
* @throws PathIOException if the operation was explicitly rejected.
*/
private boolean rejectRootDirectoryDelete(S3AFileStatus status,
boolean recursive) throws IOException {
LOG.info("s3a delete the {} root directory of {}", bucket, recursive);
boolean emptyRoot = status.isEmptyDirectory() == Tristate.TRUE;
if (emptyRoot) {
return true;
}
if (recursive) {
return false;
} else {
// reject
throw new PathIOException(bucket, "Cannot delete root path");
}
}
private void createFakeDirectoryIfNecessary(Path f)
throws IOException, AmazonClientException {
String key = pathToKey(f);
if (!key.isEmpty() && !s3Exists(f)) {
LOG.debug("Creating new fake directory at {}", f);
createFakeDirectory(key);
}
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f given path
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException {
try {
return innerListStatus(f);
} catch (AmazonClientException e) {
throw translateException("listStatus", f, e);
}
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f given path
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException when the path does not exist;
* @throws IOException due to an IO problem.
* @throws AmazonClientException on failures inside the AWS SDK
*/
public FileStatus[] innerListStatus(Path f) throws FileNotFoundException,
IOException, AmazonClientException {
Path path = qualify(f);
String key = pathToKey(path);
LOG.debug("List status for path: {}", path);
incrementStatistic(INVOCATION_LIST_STATUS);
List<FileStatus> result;
final FileStatus fileStatus = getFileStatus(path);
if (fileStatus.isDirectory()) {
if (!key.isEmpty()) {
key = key + '/';
}
DirListingMetadata dirMeta = metadataStore.listChildren(path);
if (allowAuthoritative && dirMeta != null && dirMeta.isAuthoritative()) {
return S3Guard.dirMetaToStatuses(dirMeta);
}
ListObjectsRequest request = createListObjectsRequest(key, "/");
LOG.debug("listStatus: doing listObjects for directory {}", key);
Listing.FileStatusListingIterator files =
listing.createFileStatusListingIterator(path,
request,
ACCEPT_ALL,
new Listing.AcceptAllButSelfAndS3nDirs(path));
result = new ArrayList<>(files.getBatchSize());
while (files.hasNext()) {
result.add(files.next());
}
return S3Guard.dirListingUnion(metadataStore, path, result, dirMeta,
allowAuthoritative);
} else {
LOG.debug("Adding: rd (not a dir): {}", path);
FileStatus[] stats = new FileStatus[1];
stats[0]= fileStatus;
return stats;
}
}
/**
* Create a {@code ListObjectsRequest} request against this bucket,
* with the maximum keys returned in a query set by {@link #maxKeys}.
* @param key key for request
* @param delimiter any delimiter
* @return the request
*/
@VisibleForTesting
ListObjectsRequest createListObjectsRequest(String key,
String delimiter) {
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(bucket);
request.setMaxKeys(maxKeys);
request.setPrefix(key);
if (delimiter != null) {
request.setDelimiter(delimiter);
}
return request;
}
/**
* Set the current working directory for the given file system. All relative
* paths will be resolved relative to it.
*
* @param newDir the current working directory.
*/
public void setWorkingDirectory(Path newDir) {
workingDir = newDir;
}
/**
* Get the current working directory for the given file system.
* @return the directory pathname
*/
public Path getWorkingDirectory() {
return workingDir;
}
/**
* Get the username of the FS.
* @return the short name of the user who instantiated the FS
*/
public String getUsername() {
return username;
}
/**
*
* Make the given path and all non-existent parents into
* directories. Has the semantics of Unix {@code 'mkdir -p'}.
* Existence of the directory hierarchy is not an error.
* @param path path to create
* @param permission to apply to f
* @return true if a directory was created
* @throws FileAlreadyExistsException there is a file at the path specified
* @throws IOException other IO problems
*/
// TODO: If we have created an empty file at /foo/bar and we then call
// mkdirs for /foo/bar/baz/roo what happens to the empty file /foo/bar/?
public boolean mkdirs(Path path, FsPermission permission) throws IOException,
FileAlreadyExistsException {
try {
return innerMkdirs(path, permission);
} catch (AmazonClientException e) {
throw translateException("innerMkdirs", path, e);
}
}
/**
*
* Make the given path and all non-existent parents into
* directories.
* See {@link #mkdirs(Path, FsPermission)}
* @param p path to create
* @param permission to apply to f
* @return true if a directory was created or already existed
* @throws FileAlreadyExistsException there is a file at the path specified
* @throws IOException other IO problems
* @throws AmazonClientException on failures inside the AWS SDK
*/
private boolean innerMkdirs(Path p, FsPermission permission)
throws IOException, FileAlreadyExistsException, AmazonClientException {
Path f = qualify(p);
LOG.debug("Making directory: {}", f);
incrementStatistic(INVOCATION_MKDIRS);
FileStatus fileStatus;
List<Path> metadataStoreDirs = null;
if (hasMetadataStore()) {
metadataStoreDirs = new ArrayList<>();
}
try {
fileStatus = getFileStatus(f);
if (fileStatus.isDirectory()) {
return true;
} else {
throw new FileAlreadyExistsException("Path is a file: " + f);
}
} catch (FileNotFoundException e) {
// Walk path to root, ensuring closest ancestor is a directory, not file
Path fPart = f.getParent();
if (metadataStoreDirs != null) {
metadataStoreDirs.add(f);
}
while (fPart != null) {
try {
fileStatus = getFileStatus(fPart);
if (fileStatus.isDirectory()) {
break;
}
if (fileStatus.isFile()) {
throw new FileAlreadyExistsException(String.format(
"Can't make directory for path '%s' since it is a file.",
fPart));
}
} catch (FileNotFoundException fnfe) {
instrumentation.errorIgnored();
// We create all missing directories in MetadataStore; it does not
// infer directories exist by prefix like S3.
if (metadataStoreDirs != null) {
metadataStoreDirs.add(fPart);
}
}
fPart = fPart.getParent();
}
String key = pathToKey(f);
createFakeDirectory(key);
S3Guard.makeDirsOrdered(metadataStore, metadataStoreDirs, username, true);
// this is complicated because getParent(a/b/c/) returns a/b/c, but
// we want a/b. See HADOOP-14428 for more details.
deleteUnnecessaryFakeDirectories(new Path(f.toString()).getParent());
return true;
}
}
/**
* Return a file status object that represents the path.
* @param f The path we want information from
* @return a FileStatus object
* @throws FileNotFoundException when the path does not exist
* @throws IOException on other problems.
*/
public FileStatus getFileStatus(final Path f) throws IOException {
return innerGetFileStatus(f, false);
}
/**
* Internal version of {@link #getFileStatus(Path)}.
* @param f The path we want information from
* @param needEmptyDirectoryFlag if true, implementation will calculate
* a TRUE or FALSE value for {@link S3AFileStatus#isEmptyDirectory()}
* @return a S3AFileStatus object
* @throws FileNotFoundException when the path does not exist
* @throws IOException on other problems.
*/
@VisibleForTesting
S3AFileStatus innerGetFileStatus(final Path f,
boolean needEmptyDirectoryFlag) throws IOException {
incrementStatistic(INVOCATION_GET_FILE_STATUS);
final Path path = qualify(f);
String key = pathToKey(path);
LOG.debug("Getting path status for {} ({})", path, key);
// Check MetadataStore, if any.
PathMetadata pm = metadataStore.get(path, needEmptyDirectoryFlag);
Set<Path> tombstones = Collections.EMPTY_SET;
if (pm != null) {
if (pm.isDeleted()) {
throw new FileNotFoundException("Path " + f + " is recorded as " +
"deleted by S3Guard");
}
FileStatus msStatus = pm.getFileStatus();
if (needEmptyDirectoryFlag && msStatus.isDirectory()) {
if (pm.isEmptyDirectory() != Tristate.UNKNOWN) {
// We have a definitive true / false from MetadataStore, we are done.
return S3AFileStatus.fromFileStatus(msStatus, pm.isEmptyDirectory());
} else {
DirListingMetadata children = metadataStore.listChildren(path);
if (children != null) {
tombstones = children.listTombstones();
}
LOG.debug("MetadataStore doesn't know if dir is empty, using S3.");
}
} else {
// Either this is not a directory, or we don't care if it is empty
return S3AFileStatus.fromFileStatus(msStatus, pm.isEmptyDirectory());
}
// If the metadata store has no children for it and it's not listed in
// S3 yet, we'll assume the empty directory is true;
S3AFileStatus s3FileStatus;
try {
s3FileStatus = s3GetFileStatus(path, key, tombstones);
} catch (FileNotFoundException e) {
return S3AFileStatus.fromFileStatus(msStatus, Tristate.TRUE);
}
// entry was found, save in S3Guard
return S3Guard.putAndReturn(metadataStore, s3FileStatus, instrumentation);
} else {
// there was no entry in S3Guard
// retrieve the data and update the metadata store in the process.
return S3Guard.putAndReturn(metadataStore,
s3GetFileStatus(path, key, tombstones), instrumentation);
}
}
/**
* Raw {@code getFileStatus} that talks direct to S3.
* Used to implement {@link #innerGetFileStatus(Path, boolean)},
* and for direct management of empty directory blobs.
* @param path Qualified path
* @param key Key string for the path
* @return Status
* @throws FileNotFoundException when the path does not exist
* @throws IOException on other problems.
*/
private S3AFileStatus s3GetFileStatus(final Path path, String key,
Set<Path> tombstones) throws IOException {
if (!key.isEmpty()) {
try {
ObjectMetadata meta = getObjectMetadata(key);
if (objectRepresentsDirectory(key, meta.getContentLength())) {
LOG.debug("Found exact file: fake directory");
return new S3AFileStatus(Tristate.TRUE, path, username);
} else {
LOG.debug("Found exact file: normal file");
return new S3AFileStatus(meta.getContentLength(),
dateToLong(meta.getLastModified()),
path,
getDefaultBlockSize(path),
username);
}
} catch (AmazonServiceException e) {
if (e.getStatusCode() != 404) {
throw translateException("getFileStatus", path, e);
}
} catch (AmazonClientException e) {
throw translateException("getFileStatus", path, e);
}
// Necessary?
if (!key.endsWith("/")) {
String newKey = key + "/";
try {
ObjectMetadata meta = getObjectMetadata(newKey);
if (objectRepresentsDirectory(newKey, meta.getContentLength())) {
LOG.debug("Found file (with /): fake directory");
return new S3AFileStatus(Tristate.TRUE, path, username);
} else {
LOG.warn("Found file (with /): real file? should not happen: {}",
key);
return new S3AFileStatus(meta.getContentLength(),
dateToLong(meta.getLastModified()),
path,
getDefaultBlockSize(path),
username);
}
} catch (AmazonServiceException e) {
if (e.getStatusCode() != 404) {
throw translateException("getFileStatus", newKey, e);
}
} catch (AmazonClientException e) {
throw translateException("getFileStatus", newKey, e);
}
}
}
try {
key = maybeAddTrailingSlash(key);
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(bucket);
request.setPrefix(key);
request.setDelimiter("/");
request.setMaxKeys(1);
ObjectListing objects = listObjects(request);
Collection<String> prefixes = objects.getCommonPrefixes();
Collection<S3ObjectSummary> summaries = objects.getObjectSummaries();
if (!isEmptyOfKeys(prefixes, tombstones) ||
!isEmptyOfObjects(summaries, tombstones)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found path as directory (with /): {}/{}",
prefixes.size(), summaries.size());
for (S3ObjectSummary summary : summaries) {
LOG.debug("Summary: {} {}", summary.getKey(), summary.getSize());
}
for (String prefix : prefixes) {
LOG.debug("Prefix: {}", prefix);
}
}
return new S3AFileStatus(Tristate.FALSE, path, username);
} else if (key.isEmpty()) {
LOG.debug("Found root directory");
return new S3AFileStatus(Tristate.TRUE, path, username);
}
} catch (AmazonServiceException e) {
if (e.getStatusCode() != 404) {
throw translateException("getFileStatus", key, e);
}
} catch (AmazonClientException e) {
throw translateException("getFileStatus", key, e);
}
LOG.debug("Not Found: {}", path);
throw new FileNotFoundException("No such file or directory: " + path);
}
/**
* Helper function to determine if a collection of paths is empty
* after accounting for tombstone markers (if provided).
* @param keys Collection of path (prefixes / directories or keys).
* @param tombstones Set of tombstone markers, or null if not applicable.
* @return false if summaries contains objects not accounted for by
* tombstones.
*/
private boolean isEmptyOfKeys(Collection<String> keys, Set<Path>
tombstones) {
if (tombstones == null) {
return keys.isEmpty();
}
for (String key : keys) {
Path qualified = keyToQualifiedPath(key);
if (!tombstones.contains(qualified)) {
return false;
}
}
return true;
}
/**
* Helper function to determine if a collection of object summaries is empty
* after accounting for tombstone markers (if provided).
* @param summaries Collection of objects as returned by listObjects.
* @param tombstones Set of tombstone markers, or null if not applicable.
* @return false if summaries contains objects not accounted for by
* tombstones.
*/
private boolean isEmptyOfObjects(Collection<S3ObjectSummary> summaries,
Set<Path> tombstones) {
if (tombstones == null) {
return summaries.isEmpty();
}
Collection<String> stringCollection = new ArrayList<>(summaries.size());
for (S3ObjectSummary summary : summaries) {
stringCollection.add(summary.getKey());
}
return isEmptyOfKeys(stringCollection, tombstones);
}
/**
* Raw version of {@link FileSystem#exists(Path)} which uses S3 only:
* S3Guard MetadataStore, if any, will be skipped.
* @return true if path exists in S3
*/
private boolean s3Exists(final Path f) throws IOException {
Path path = qualify(f);
String key = pathToKey(path);
try {
s3GetFileStatus(path, key, null);
return true;
} catch (FileNotFoundException e) {
return false;
}
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
*
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
*
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
* @throws IOException IO problem
* @throws FileAlreadyExistsException the destination file exists and
* overwrite==false
* @throws AmazonClientException failure in the AWS SDK
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src,
Path dst) throws IOException {
try {
innerCopyFromLocalFile(delSrc, overwrite, src, dst);
} catch (AmazonClientException e) {
throw translateException("copyFromLocalFile(" + src + ", " + dst + ")",
src, e);
}
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
*
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
*
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
* @throws IOException IO problem
* @throws FileAlreadyExistsException the destination file exists and
* overwrite==false
* @throws AmazonClientException failure in the AWS SDK
*/
private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite,
Path src, Path dst)
throws IOException, FileAlreadyExistsException, AmazonClientException {
incrementStatistic(INVOCATION_COPY_FROM_LOCAL_FILE);
final String key = pathToKey(dst);
if (!overwrite && exists(dst)) {
throw new FileAlreadyExistsException(dst + " already exists");
}
LOG.debug("Copying local file from {} to {}", src, dst);
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
File srcfile = local.pathToFile(src);
final ObjectMetadata om = newObjectMetadata(srcfile.length());
PutObjectRequest putObjectRequest = newPutObjectRequest(key, om, srcfile);
UploadInfo info = putObject(putObjectRequest);
Upload upload = info.getUpload();
ProgressableProgressListener listener = new ProgressableProgressListener(
this, key, upload, null);
upload.addProgressListener(listener);
try {
upload.waitForUploadResult();
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying " + src
+ " to " + dst + ", cancelling");
}
listener.uploadCompleted();
// This will delete unnecessary fake parent directories
finishedWrite(key, info.getLength());
if (delSrc) {
local.delete(src, false);
}
}
/**
* Close the filesystem. This shuts down all transfers.
* @throws IOException IO problem
*/
@Override
public void close() throws IOException {
if (closed.getAndSet(true)) {
// already closed
return;
}
try {
super.close();
} finally {
if (transfers != null) {
transfers.shutdownNow(true);
transfers = null;
}
if (metadataStore != null) {
metadataStore.close();
metadataStore = null;
}
}
}
/**
* Override getCanonicalServiceName because we don't support token in S3A.
*/
@Override
public String getCanonicalServiceName() {
// Does not support Token
return null;
}
/**
* Copy a single object in the bucket via a COPY operation.
* @param srcKey source object path
* @param dstKey destination object path
* @param size object size
* @throws AmazonClientException on failures inside the AWS SDK
* @throws InterruptedIOException the operation was interrupted
* @throws IOException Other IO problems
*/
private void copyFile(String srcKey, String dstKey, long size)
throws IOException, InterruptedIOException, AmazonClientException {
LOG.debug("copyFile {} -> {} ", srcKey, dstKey);
try {
ObjectMetadata srcom = getObjectMetadata(srcKey);
ObjectMetadata dstom = cloneObjectMetadata(srcom);
setOptionalObjectMetadata(dstom);
CopyObjectRequest copyObjectRequest =
new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
setOptionalCopyObjectRequestParameters(copyObjectRequest);
copyObjectRequest.setCannedAccessControlList(cannedACL);
copyObjectRequest.setNewObjectMetadata(dstom);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch (progressEvent.getEventType()) {
case TRANSFER_PART_COMPLETED_EVENT:
incrementWriteOperations();
break;
default:
break;
}
}
};
Copy copy = transfers.copy(copyObjectRequest);
copy.addProgressListener(progressListener);
try {
copy.waitForCopyResult();
incrementWriteOperations();
instrumentation.filesCopied(1, size);
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying " + srcKey
+ " to " + dstKey + ", cancelling");
}
} catch (AmazonClientException e) {
throw translateException("copyFile("+ srcKey+ ", " + dstKey + ")",
srcKey, e);
}
}
protected void setOptionalMultipartUploadRequestParameters(
InitiateMultipartUploadRequest req) {
switch (serverSideEncryptionAlgorithm) {
case SSE_KMS:
req.setSSEAwsKeyManagementParams(generateSSEAwsKeyParams());
break;
case SSE_C:
if (StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
//at the moment, only supports copy using the same key
req.setSSECustomerKey(generateSSECustomerKey());
}
break;
default:
}
}
protected void setOptionalCopyObjectRequestParameters(
CopyObjectRequest copyObjectRequest) throws IOException {
switch (serverSideEncryptionAlgorithm) {
case SSE_KMS:
copyObjectRequest.setSSEAwsKeyManagementParams(
generateSSEAwsKeyParams()
);
break;
case SSE_C:
if (StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
//at the moment, only supports copy using the same key
SSECustomerKey customerKey = generateSSECustomerKey();
copyObjectRequest.setSourceSSECustomerKey(customerKey);
copyObjectRequest.setDestinationSSECustomerKey(customerKey);
}
break;
default:
}
}
private void setOptionalPutRequestParameters(PutObjectRequest request) {
switch (serverSideEncryptionAlgorithm) {
case SSE_KMS:
request.setSSEAwsKeyManagementParams(generateSSEAwsKeyParams());
break;
case SSE_C:
if (StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
request.setSSECustomerKey(generateSSECustomerKey());
}
break;
default:
}
}
private void setOptionalObjectMetadata(ObjectMetadata metadata) {
if (S3AEncryptionMethods.SSE_S3.equals(serverSideEncryptionAlgorithm)) {
metadata.setSSEAlgorithm(serverSideEncryptionAlgorithm.getMethod());
}
}
private SSEAwsKeyManagementParams generateSSEAwsKeyParams() {
//Use specified key, otherwise default to default master aws/s3 key by AWS
SSEAwsKeyManagementParams sseAwsKeyManagementParams =
new SSEAwsKeyManagementParams();
if (StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
sseAwsKeyManagementParams =
new SSEAwsKeyManagementParams(
getServerSideEncryptionKey(getConf())
);
}
return sseAwsKeyManagementParams;
}
private SSECustomerKey generateSSECustomerKey() {
SSECustomerKey customerKey = new SSECustomerKey(
getServerSideEncryptionKey(getConf())
);
return customerKey;
}
/**
* Perform post-write actions.
* This operation MUST be called after any PUT/multipart PUT completes
* successfully.
* This includes
* <ol>
* <li>Calling {@link #deleteUnnecessaryFakeDirectories(Path)}</li>
* <li>Updating any metadata store with details on the newly created
* object.</li>
* </ol>
* @param key key written to
* @param length total length of file written
*/
@InterfaceAudience.Private
void finishedWrite(String key, long length) {
LOG.debug("Finished write to {}, len {}", key, length);
Path p = keyToQualifiedPath(key);
deleteUnnecessaryFakeDirectories(p.getParent());
Preconditions.checkArgument(length >= 0, "content length is negative");
// See note about failure semantics in S3Guard documentation
try {
if (hasMetadataStore()) {
S3Guard.addAncestors(metadataStore, p, username);
S3AFileStatus status = createUploadFileStatus(p,
S3AUtils.objectRepresentsDirectory(key, length), length,
getDefaultBlockSize(p), username);
S3Guard.putAndReturn(metadataStore, status, instrumentation);
}
} catch (IOException e) {
LOG.error("S3Guard: Error updating MetadataStore for write to {}:",
key, e);
instrumentation.errorIgnored();
}
}
/**
* Delete mock parent directories which are no longer needed.
* This code swallows IO exceptions encountered
* @param path path
*/
private void deleteUnnecessaryFakeDirectories(Path path) {
List<DeleteObjectsRequest.KeyVersion> keysToRemove = new ArrayList<>();
while (!path.isRoot()) {
String key = pathToKey(path);
key = (key.endsWith("/")) ? key : (key + "/");
LOG.trace("To delete unnecessary fake directory {} for {}", key, path);
keysToRemove.add(new DeleteObjectsRequest.KeyVersion(key));
path = path.getParent();
}
try {
removeKeys(keysToRemove, false, true);
} catch(AmazonClientException | InvalidRequestException e) {
instrumentation.errorIgnored();
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
for(DeleteObjectsRequest.KeyVersion kv : keysToRemove) {
sb.append(kv.getKey()).append(",");
}
LOG.debug("While deleting keys {} ", sb.toString(), e);
}
}
}
private void createFakeDirectory(final String objectName)
throws AmazonClientException, AmazonServiceException,
InterruptedIOException {
if (!objectName.endsWith("/")) {
createEmptyObject(objectName + "/");
} else {
createEmptyObject(objectName);
}
}
// Used to create an empty file that represents an empty directory
private void createEmptyObject(final String objectName)
throws AmazonClientException, AmazonServiceException,
InterruptedIOException {
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
PutObjectRequest putObjectRequest = newPutObjectRequest(objectName,
newObjectMetadata(0L),
im);
UploadInfo info = putObject(putObjectRequest);
try {
info.getUpload().waitForUploadResult();
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted creating " + objectName);
}
incrementPutProgressStatistics(objectName, 0);
instrumentation.directoryCreated();
}
/**
* Creates a copy of the passed {@link ObjectMetadata}.
* Does so without using the {@link ObjectMetadata#clone()} method,
* to avoid copying unnecessary headers.
* @param source the {@link ObjectMetadata} to copy
* @return a copy of {@link ObjectMetadata} with only relevant attributes
*/
private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) {
// This approach may be too brittle, especially if
// in future there are new attributes added to ObjectMetadata
// that we do not explicitly call to set here
ObjectMetadata ret = newObjectMetadata(source.getContentLength());
// Possibly null attributes
// Allowing nulls to pass breaks it during later use
if (source.getCacheControl() != null) {
ret.setCacheControl(source.getCacheControl());
}
if (source.getContentDisposition() != null) {
ret.setContentDisposition(source.getContentDisposition());
}
if (source.getContentEncoding() != null) {
ret.setContentEncoding(source.getContentEncoding());
}
if (source.getContentMD5() != null) {
ret.setContentMD5(source.getContentMD5());
}
if (source.getContentType() != null) {
ret.setContentType(source.getContentType());
}
if (source.getExpirationTime() != null) {
ret.setExpirationTime(source.getExpirationTime());
}
if (source.getExpirationTimeRuleId() != null) {
ret.setExpirationTimeRuleId(source.getExpirationTimeRuleId());
}
if (source.getHttpExpiresDate() != null) {
ret.setHttpExpiresDate(source.getHttpExpiresDate());
}
if (source.getLastModified() != null) {
ret.setLastModified(source.getLastModified());
}
if (source.getOngoingRestore() != null) {
ret.setOngoingRestore(source.getOngoingRestore());
}
if (source.getRestoreExpirationTime() != null) {
ret.setRestoreExpirationTime(source.getRestoreExpirationTime());
}
if (source.getSSEAlgorithm() != null) {
ret.setSSEAlgorithm(source.getSSEAlgorithm());
}
if (source.getSSECustomerAlgorithm() != null) {
ret.setSSECustomerAlgorithm(source.getSSECustomerAlgorithm());
}
if (source.getSSECustomerKeyMd5() != null) {
ret.setSSECustomerKeyMd5(source.getSSECustomerKeyMd5());
}
for (Map.Entry<String, String> e : source.getUserMetadata().entrySet()) {
ret.addUserMetadata(e.getKey(), e.getValue());
}
return ret;
}
/**
* Return the number of bytes that large input files should be optimally
* be split into to minimize I/O time.
* @deprecated use {@link #getDefaultBlockSize(Path)} instead
*/
@Deprecated
public long getDefaultBlockSize() {
return getConf().getLongBytes(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"S3AFileSystem{");
sb.append("uri=").append(uri);
sb.append(", workingDir=").append(workingDir);
sb.append(", inputPolicy=").append(inputPolicy);
sb.append(", partSize=").append(partSize);
sb.append(", enableMultiObjectsDelete=").append(enableMultiObjectsDelete);
sb.append(", maxKeys=").append(maxKeys);
if (cannedACL != null) {
sb.append(", cannedACL=").append(cannedACL.toString());
}
sb.append(", readAhead=").append(readAhead);
sb.append(", blockSize=").append(getDefaultBlockSize());
sb.append(", multiPartThreshold=").append(multiPartThreshold);
if (serverSideEncryptionAlgorithm != null) {
sb.append(", serverSideEncryptionAlgorithm='")
.append(serverSideEncryptionAlgorithm)
.append('\'');
}
if (blockFactory != null) {
sb.append(", blockFactory=").append(blockFactory);
}
sb.append(", metastore=").append(metadataStore);
sb.append(", authoritative=").append(allowAuthoritative);
sb.append(", boundedExecutor=").append(boundedThreadPool);
sb.append(", unboundedExecutor=").append(unboundedThreadPool);
sb.append(", statistics {")
.append(statistics)
.append("}");
if (instrumentation != null) {
sb.append(", metrics {")
.append(instrumentation.dump("{", "=", "} ", true))
.append("}");
}
sb.append('}');
return sb.toString();
}
/**
* Get the partition size for multipart operations.
* @return the value as set during initialization
*/
public long getPartitionSize() {
return partSize;
}
/**
* Get the threshold for multipart files.
* @return the value as set during initialization
*/
public long getMultiPartThreshold() {
return multiPartThreshold;
}
/**
* Get the maximum key count.
* @return a value, valid after initialization
*/
int getMaxKeys() {
return maxKeys;
}
/**
* Increments the statistic {@link Statistic#INVOCATION_GLOB_STATUS}.
* {@inheritDoc}
*/
@Override
public FileStatus[] globStatus(Path pathPattern) throws IOException {
incrementStatistic(INVOCATION_GLOB_STATUS);
return super.globStatus(pathPattern);
}
/**
* Override superclass so as to add statistic collection.
* {@inheritDoc}
*/
@Override
public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
throws IOException {
incrementStatistic(INVOCATION_GLOB_STATUS);
return super.globStatus(pathPattern, filter);
}
/**
* Override superclass so as to add statistic collection.
* {@inheritDoc}
*/
@Override
public boolean exists(Path f) throws IOException {
incrementStatistic(INVOCATION_EXISTS);
return super.exists(f);
}
/**
* Override superclass so as to add statistic collection.
* {@inheritDoc}
*/
@Override
public boolean isDirectory(Path f) throws IOException {
incrementStatistic(INVOCATION_IS_DIRECTORY);
return super.isDirectory(f);
}
/**
* Override superclass so as to add statistic collection.
* {@inheritDoc}
*/
@Override
public boolean isFile(Path f) throws IOException {
incrementStatistic(INVOCATION_IS_FILE);
return super.isFile(f);
}
/**
* {@inheritDoc}.
*
* This implementation is optimized for S3, which can do a bulk listing
* off all entries under a path in one single operation. Thus there is
* no need to recursively walk the directory tree.
*
* Instead a {@link ListObjectsRequest} is created requesting a (windowed)
* listing of all entries under the given path. This is used to construct
* an {@code ObjectListingIterator} instance, iteratively returning the
* sequence of lists of elements under the path. This is then iterated
* over in a {@code FileStatusListingIterator}, which generates
* {@link S3AFileStatus} instances, one per listing entry.
* These are then translated into {@link LocatedFileStatus} instances.
*
* This is essentially a nested and wrapped set of iterators, with some
* generator classes; an architecture which may become less convoluted
* using lambda-expressions.
* @param f a path
* @param recursive if the subdirectories need to be traversed recursively
*
* @return an iterator that traverses statuses of the files/directories
* in the given path
* @throws FileNotFoundException if {@code path} does not exist
* @throws IOException if any I/O error occurred
*/
@Override
public RemoteIterator<LocatedFileStatus> listFiles(Path f,
boolean recursive) throws FileNotFoundException, IOException {
return innerListFiles(f, recursive,
new Listing.AcceptFilesOnly(qualify(f)));
}
public RemoteIterator<LocatedFileStatus> listFilesAndEmptyDirectories(Path f,
boolean recursive) throws IOException {
return innerListFiles(f, recursive,
new Listing.AcceptAllButS3nDirs());
}
private RemoteIterator<LocatedFileStatus> innerListFiles(Path f, boolean
recursive, Listing.FileStatusAcceptor acceptor) throws IOException {
incrementStatistic(INVOCATION_LIST_FILES);
Path path = qualify(f);
LOG.debug("listFiles({}, {})", path, recursive);
try {
// lookup dir triggers existence check
final FileStatus fileStatus = getFileStatus(path);
if (fileStatus.isFile()) {
// simple case: File
LOG.debug("Path is a file");
return new Listing.SingleStatusRemoteIterator(
toLocatedFileStatus(fileStatus));
} else {
// directory: do a bulk operation
String key = maybeAddTrailingSlash(pathToKey(path));
String delimiter = recursive ? null : "/";
LOG.debug("Requesting all entries under {} with delimiter '{}'",
key, delimiter);
final RemoteIterator<FileStatus> cachedFilesIterator;
final Set<Path> tombstones;
if (recursive) {
final PathMetadata pm = metadataStore.get(path, true);
// shouldn't need to check pm.isDeleted() because that will have
// been caught by getFileStatus above.
MetadataStoreListFilesIterator metadataStoreListFilesIterator =
new MetadataStoreListFilesIterator(metadataStore, pm,
allowAuthoritative);
tombstones = metadataStoreListFilesIterator.listTombstones();
cachedFilesIterator = metadataStoreListFilesIterator;
} else {
DirListingMetadata meta = metadataStore.listChildren(path);
if (meta != null) {
tombstones = meta.listTombstones();
} else {
tombstones = null;
}
cachedFilesIterator = listing.createProvidedFileStatusIterator(
S3Guard.dirMetaToStatuses(meta), ACCEPT_ALL, acceptor);
if (allowAuthoritative && meta != null && meta.isAuthoritative()) {
// metadata listing is authoritative, so return it directly
return listing.createLocatedFileStatusIterator(cachedFilesIterator);
}
}
return listing.createTombstoneReconcilingIterator(
listing.createLocatedFileStatusIterator(
listing.createFileStatusListingIterator(path,
createListObjectsRequest(key, delimiter),
ACCEPT_ALL,
acceptor,
cachedFilesIterator)),
tombstones);
}
} catch (AmazonClientException e) {
// TODO S3Guard: retry on file not found exception
throw translateException("listFiles", path, e);
}
}
/**
* Override superclass so as to add statistic collection.
* {@inheritDoc}
*/
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f)
throws FileNotFoundException, IOException {
return listLocatedStatus(f, ACCEPT_ALL);
}
/**
* {@inheritDoc}.
*
* S3 Optimized directory listing. The initial operation performs the
* first bulk listing; extra listings will take place
* when all the current set of results are used up.
* @param f a path
* @param filter a path filter
* @return an iterator that traverses statuses of the files/directories
* in the given path
* @throws FileNotFoundException if {@code path} does not exist
* @throws IOException if any I/O error occurred
*/
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
final PathFilter filter)
throws FileNotFoundException, IOException {
incrementStatistic(INVOCATION_LIST_LOCATED_STATUS);
Path path = qualify(f);
LOG.debug("listLocatedStatus({}, {}", path, filter);
try {
// lookup dir triggers existence check
final FileStatus fileStatus = getFileStatus(path);
if (fileStatus.isFile()) {
// simple case: File
LOG.debug("Path is a file");
return new Listing.SingleStatusRemoteIterator(
filter.accept(path) ? toLocatedFileStatus(fileStatus) : null);
} else {
// directory: trigger a lookup
final String key = maybeAddTrailingSlash(pathToKey(path));
final Listing.FileStatusAcceptor acceptor =
new Listing.AcceptAllButSelfAndS3nDirs(path);
DirListingMetadata meta = metadataStore.listChildren(path);
final RemoteIterator<FileStatus> cachedFileStatusIterator =
listing.createProvidedFileStatusIterator(
S3Guard.dirMetaToStatuses(meta), filter, acceptor);
return (allowAuthoritative && meta != null && meta.isAuthoritative())
? listing.createLocatedFileStatusIterator(cachedFileStatusIterator)
: listing.createLocatedFileStatusIterator(
listing.createFileStatusListingIterator(path,
createListObjectsRequest(key, "/"),
filter,
acceptor,
cachedFileStatusIterator));
}
} catch (AmazonClientException e) {
throw translateException("listLocatedStatus", path, e);
}
}
/**
* Build a {@link LocatedFileStatus} from a {@link FileStatus} instance.
* @param status file status
* @return a located status with block locations set up from this FS.
* @throws IOException IO Problems.
*/
LocatedFileStatus toLocatedFileStatus(FileStatus status)
throws IOException {
return new LocatedFileStatus(status,
status.isFile() ?
getFileBlockLocations(status, 0, status.getLen())
: null);
}
/**
* Helper for an ongoing write operation.
* <p>
* It hides direct access to the S3 API from the output stream,
* and is a location where the object upload process can be evolved/enhanced.
* <p>
* Features
* <ul>
* <li>Methods to create and submit requests to S3, so avoiding
* all direct interaction with the AWS APIs.</li>
* <li>Some extra preflight checks of arguments, so failing fast on
* errors.</li>
* <li>Callbacks to let the FS know of events in the output stream
* upload process.</li>
* </ul>
*
* Each instance of this state is unique to a single output stream.
*/
final class WriteOperationHelper {
private final String key;
private WriteOperationHelper(String key) {
this.key = key;
}
/**
* Create a {@link PutObjectRequest} request.
* If {@code length} is set, the metadata is configured with the size of
* the upload.
* @param inputStream source data.
* @param length size, if known. Use -1 for not known
* @return the request
*/
PutObjectRequest newPutRequest(InputStream inputStream, long length) {
PutObjectRequest request = newPutObjectRequest(key,
newObjectMetadata(length), inputStream);
return request;
}
/**
* Create a {@link PutObjectRequest} request to upload a file.
* @param sourceFile source file
* @return the request
*/
PutObjectRequest newPutRequest(File sourceFile) {
int length = (int) sourceFile.length();
PutObjectRequest request = newPutObjectRequest(key,
newObjectMetadata(length), sourceFile);
return request;
}
/**
* Callback on a successful write.
*/
void writeSuccessful(long length) {
finishedWrite(key, length);
}
/**
* Callback on a write failure.
* @param e Any exception raised which triggered the failure.
*/
void writeFailed(Exception e) {
LOG.debug("Write to {} failed", this, e);
}
/**
* Create a new object metadata instance.
* Any standard metadata headers are added here, for example:
* encryption.
* @param length size, if known. Use -1 for not known
* @return a new metadata instance
*/
public ObjectMetadata newObjectMetadata(long length) {
return S3AFileSystem.this.newObjectMetadata(length);
}
/**
* Start the multipart upload process.
* @return the upload result containing the ID
* @throws IOException IO problem
*/
String initiateMultiPartUpload() throws IOException {
LOG.debug("Initiating Multipart upload");
final InitiateMultipartUploadRequest initiateMPURequest =
new InitiateMultipartUploadRequest(bucket,
key,
newObjectMetadata(-1));
initiateMPURequest.setCannedACL(cannedACL);
setOptionalMultipartUploadRequestParameters(initiateMPURequest);
try {
return s3.initiateMultipartUpload(initiateMPURequest)
.getUploadId();
} catch (AmazonClientException ace) {
throw translateException("initiate MultiPartUpload", key, ace);
}
}
/**
* Complete a multipart upload operation.
* @param uploadId multipart operation Id
* @param partETags list of partial uploads
* @return the result
* @throws AmazonClientException on problems.
*/
CompleteMultipartUploadResult completeMultipartUpload(String uploadId,
List<PartETag> partETags) throws AmazonClientException {
Preconditions.checkNotNull(uploadId);
Preconditions.checkNotNull(partETags);
Preconditions.checkArgument(!partETags.isEmpty(),
"No partitions have been uploaded");
LOG.debug("Completing multipart upload {} with {} parts",
uploadId, partETags.size());
// a copy of the list is required, so that the AWS SDK doesn't
// attempt to sort an unmodifiable list.
return s3.completeMultipartUpload(
new CompleteMultipartUploadRequest(bucket,
key,
uploadId,
new ArrayList<>(partETags)));
}
/**
* Abort a multipart upload operation.
* @param uploadId multipart operation Id
* @throws AmazonClientException on problems.
*/
void abortMultipartUpload(String uploadId) throws AmazonClientException {
LOG.debug("Aborting multipart upload {}", uploadId);
s3.abortMultipartUpload(
new AbortMultipartUploadRequest(bucket, key, uploadId));
}
/**
* Create and initialize a part request of a multipart upload.
* Exactly one of: {@code uploadStream} or {@code sourceFile}
* must be specified.
* @param uploadId ID of ongoing upload
* @param partNumber current part number of the upload
* @param size amount of data
* @param uploadStream source of data to upload
* @param sourceFile optional source file.
* @return the request.
*/
UploadPartRequest newUploadPartRequest(String uploadId,
int partNumber, int size, InputStream uploadStream, File sourceFile) {
Preconditions.checkNotNull(uploadId);
// exactly one source must be set; xor verifies this
Preconditions.checkArgument((uploadStream != null) ^ (sourceFile != null),
"Data source");
Preconditions.checkArgument(size > 0, "Invalid partition size %s", size);
Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000,
"partNumber must be between 1 and 10000 inclusive, but is %s",
partNumber);
LOG.debug("Creating part upload request for {} #{} size {}",
uploadId, partNumber, size);
UploadPartRequest request = new UploadPartRequest()
.withBucketName(bucket)
.withKey(key)
.withUploadId(uploadId)
.withPartNumber(partNumber)
.withPartSize(size);
if (uploadStream != null) {
// there's an upload stream. Bind to it.
request.setInputStream(uploadStream);
} else {
request.setFile(sourceFile);
}
return request;
}
/**
* The toString method is intended to be used in logging/toString calls.
* @return a string description.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"{bucket=").append(bucket);
sb.append(", key='").append(key).append('\'');
sb.append('}');
return sb.toString();
}
/**
* PUT an object directly (i.e. not via the transfer manager).
* @param putObjectRequest the request
* @return the upload initiated
* @throws IOException on problems
*/
PutObjectResult putObject(PutObjectRequest putObjectRequest)
throws IOException {
try {
return putObjectDirect(putObjectRequest);
} catch (AmazonClientException e) {
throw translateException("put", putObjectRequest.getKey(), e);
}
}
}
}
|
package bio.knowledge.server.model;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.swagger.annotations.ApiModelProperty;
/**
* ServerConceptWithDetailsBeaconEntry
*/
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2018-09-17T21:48:57.324-07:00")
public class ServerConceptWithDetailsBeaconEntry {
@JsonProperty("beacon")
private Integer beacon = null;
@JsonProperty("id")
private String id = null;
@JsonProperty("synonyms")
private List<String> synonyms = new ArrayList<String>();
@JsonProperty("definition")
private String definition = null;
@JsonProperty("details")
private List<ServerConceptDetail> details = new ArrayList<ServerConceptDetail>();
public ServerConceptWithDetailsBeaconEntry beacon(Integer beacon) {
this.beacon = beacon;
return this;
}
/**
* Index number of beacon providing these concept details
* @return beacon
**/
@ApiModelProperty(value = "Index number of beacon providing these concept details ")
public Integer getBeacon() {
return beacon;
}
public void setBeacon(Integer beacon) {
this.beacon = beacon;
}
public ServerConceptWithDetailsBeaconEntry id(String id) {
this.id = id;
return this;
}
/**
* CURIE identifying the specific beacon source concept being described.
* @return id
**/
@ApiModelProperty(value = "CURIE identifying the specific beacon source concept being described. ")
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public ServerConceptWithDetailsBeaconEntry synonyms(List<String> synonyms) {
this.synonyms = synonyms;
return this;
}
public ServerConceptWithDetailsBeaconEntry addSynonymsItem(String synonymsItem) {
this.synonyms.add(synonymsItem);
return this;
}
/**
* List of synonymous names or identifiers for the concept
* @return synonyms
**/
@ApiModelProperty(value = "List of synonymous names or identifiers for the concept ")
public List<String> getSynonyms() {
return synonyms;
}
public void setSynonyms(List<String> synonyms) {
this.synonyms = synonyms;
}
public ServerConceptWithDetailsBeaconEntry definition(String definition) {
this.definition = definition;
return this;
}
/**
* Concept definition provided by a given beacon
* @return definition
**/
@ApiModelProperty(value = "Concept definition provided by a given beacon ")
public String getDefinition() {
return definition;
}
public void setDefinition(String definition) {
this.definition = definition;
}
public ServerConceptWithDetailsBeaconEntry details(List<ServerConceptDetail> details) {
this.details = details;
return this;
}
public ServerConceptWithDetailsBeaconEntry addDetailsItem(ServerConceptDetail detailsItem) {
this.details.add(detailsItem);
return this;
}
/**
* Get details
* @return details
**/
@ApiModelProperty(value = "")
public List<ServerConceptDetail> getDetails() {
return details;
}
public void setDetails(List<ServerConceptDetail> details) {
this.details = details;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ServerConceptWithDetailsBeaconEntry serverConceptWithDetailsBeaconEntry = (ServerConceptWithDetailsBeaconEntry) o;
return Objects.equals(this.beacon, serverConceptWithDetailsBeaconEntry.beacon) &&
Objects.equals(this.id, serverConceptWithDetailsBeaconEntry.id) &&
Objects.equals(this.synonyms, serverConceptWithDetailsBeaconEntry.synonyms) &&
Objects.equals(this.definition, serverConceptWithDetailsBeaconEntry.definition) &&
Objects.equals(this.details, serverConceptWithDetailsBeaconEntry.details);
}
@Override
public int hashCode() {
return Objects.hash(beacon, id, synonyms, definition, details);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class ServerConceptWithDetailsBeaconEntry {\n");
sb.append(" beacon: ").append(toIndentedString(beacon)).append("\n");
sb.append(" id: ").append(toIndentedString(id)).append("\n");
sb.append(" synonyms: ").append(toIndentedString(synonyms)).append("\n");
sb.append(" definition: ").append(toIndentedString(definition)).append("\n");
sb.append(" details: ").append(toIndentedString(details)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}
|
/*
* Copyright (c) 2021 AcadiaSoft, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.acadiasoft.im.simm.model.param.commodity;
import com.acadiasoft.im.simm.model.SensitivityIdentifier;
import com.acadiasoft.im.simm.model.imtree.identifiers.BucketType;
import com.acadiasoft.im.simm.model.param.SimmRiskWeight;
import java.math.BigDecimal;
import java.util.HashMap;
import java.util.Map;
/**
* As defined in Appendix 1 of ISDA_SIMM_2.0_(PUBLIC).pdf
*/
public class CommodityRiskWeight1d implements SimmRiskWeight {
private static final Map<BucketType, BigDecimal> WEIGHTS = new HashMap<>();
static {
WEIGHTS.put(BucketType.CM1, new BigDecimal("6.3"));
WEIGHTS.put(BucketType.CM2, new BigDecimal("9.1"));
WEIGHTS.put(BucketType.CM3, new BigDecimal("8.1"));
WEIGHTS.put(BucketType.CM4, new BigDecimal("7.2"));
WEIGHTS.put(BucketType.CM5, new BigDecimal("10") );
WEIGHTS.put(BucketType.CM6, new BigDecimal("8") );
WEIGHTS.put(BucketType.CM7, new BigDecimal("7.1"));
WEIGHTS.put(BucketType.CM8, new BigDecimal("11") );
WEIGHTS.put(BucketType.CM9, new BigDecimal("8.1"));
WEIGHTS.put(BucketType.CM10, new BigDecimal("16") );
WEIGHTS.put(BucketType.CM11, new BigDecimal("6.2"));
WEIGHTS.put(BucketType.CM12, new BigDecimal("6.2"));
WEIGHTS.put(BucketType.CM13, new BigDecimal("4.7"));
WEIGHTS.put(BucketType.CM14, new BigDecimal("4.8"));
WEIGHTS.put(BucketType.CM15, new BigDecimal("3.8"));
WEIGHTS.put(BucketType.CM16, new BigDecimal("16") );
WEIGHTS.put(BucketType.CM17, new BigDecimal("5.1"));
}
private static final BigDecimal VEGA = new BigDecimal("0.13");
@Override
public BigDecimal getDeltaRiskWeight(SensitivityIdentifier s) {
return WEIGHTS.get(s.getBucketIdentifier().getBucketType());
}
@Override
public BigDecimal getVegaRiskWeight(SensitivityIdentifier s) {
return VEGA;
}
}
|
/*
* Copyright 2014 The MITRE Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mitre.createEbts;
import java.awt.image.BufferedImage;
import java.io.*;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.commons.io.IOUtils;
import org.mitre.jet.ebts.Ebts;
import org.mitre.jet.ebts.EbtsBuilder;
import org.mitre.jet.ebts.field.Field;
import org.mitre.jet.ebts.records.BinaryHeaderImageRecord;
import org.mitre.jet.ebts.records.GenericRecord;
import javax.imageio.ImageIO;
public class createEbts {
public static void main(String[] args){
Ebts ebts = new Ebts();
try {
//Create Type 1 Record
String tot = "CAR";
String dest = "DESTORI01";
String ori = "TESTWV123";
String tcn = "TESTWV123-20140930163518-JET1-0001-07171";
ebts.addRecord(createType1Record(tot, dest, ori, tcn));
//Create Type 2 Record
String name = "JOHNSON, SALLY";
String dob = "19921201";
String sex = "F";
String rfp = "TEST TRANSACTION";
String height = "510";
String weight = "150";
ebts.addRecord(createType2Record(name, dob, sex, rfp, height, weight));
//Create Type 4 Record
InputStream is4 = createEbts.class.getClassLoader().getResourceAsStream("a001.wsq");
byte[] imgData4 = IOUtils.toByteArray(is4);
boolean markRolled = true;
String position = "1";
ebts.addRecord(createType4Record(markRolled, position, imgData4, ebts));
//Create Type 10 Record
String imgType = "FACE";
String agency = "BATTT499Z0";
InputStream is10 = createEbts.class.getClassLoader().getResourceAsStream("face.jpg");
byte[] imgData10 = IOUtils.toByteArray(is10);
ebts.addRecord(createType10Record(imgType, agency, imgData10, ebts));
//Create Type 14 Record
InputStream is14 = createEbts.class.getClassLoader().getResourceAsStream("a001.wsq");
byte[] imgData14 = IOUtils.toByteArray(is14);
ebts.addRecord(createType14Record(markRolled, position, imgData14, ebts));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
saveEbts(ebts);
}
//type 1
public static GenericRecord createType1Record(String tot, String dest, String ori, String tcn){
GenericRecord type1Record = new GenericRecord(1);
type1Record.setField(2, new Field("0201"));
type1Record.setField(3, new Field());//file content
type1Record.setField(4, new Field(tot));
type1Record.setField(5, new Field(todaysDate()));
type1Record.setField(7, new Field(dest));
type1Record.setField(8, new Field(ori));
type1Record.setField(9, new Field(tcn));
type1Record.setField(11, new Field("19.69"));
type1Record.setField(12, new Field("19.69"));
return type1Record;
}
//type 2
public static GenericRecord createType2Record(String name, String dob, String sex, String RFP, String height, String weight){
GenericRecord type2Record = new GenericRecord(2);
type2Record.setField(2, new Field("00"));
type2Record.setField(18, new Field(name));
type2Record.setField(20, new Field(dob));
type2Record.setField(24, new Field(sex));
type2Record.setField(37, new Field(RFP));
type2Record.setField(27, new Field(height));
type2Record.setField(29, new Field(weight));
return type2Record;
}
//type 4
public static BinaryHeaderImageRecord createType4Record(boolean markRolled, String fgp, byte[] imgData, Ebts ebts) throws IOException{
BinaryHeaderImageRecord type4Record = new BinaryHeaderImageRecord(4);
BufferedImage image = createImageFromBytes(imgData);
int length = 18 + imgData.length;
Integer idc = 01;
type4Record.setField(1, new Field((String.valueOf(length))));
type4Record.setField(2, new Field(idc.toString()));
if (markRolled) {
type4Record.setField(3, new Field("1"));
} else {
type4Record.setField(3, new Field("0"));
}
type4Record.setField(4, new Field(fgp));
type4Record.setField(5, new Field("0"));
type4Record.setField(6, new Field(String.valueOf(image.getWidth())));
type4Record.setField(7, new Field(String.valueOf(image.getHeight())));
type4Record.setField(8, new Field("1")); //Scale Units - Value of (1) Denotes that 1.011 and 1.012 designate scale.
type4Record.setImageData(imgData);
return type4Record;
}
//type 2
public static GenericRecord createType10Record(String imgType, String agency, byte[] imgData, Ebts ebts) throws IOException{
GenericRecord type10Record = new GenericRecord(10);
BufferedImage image = createImageFromBytes(imgData);
int length = 12 + imgData.length;
Integer idc = 02;
type10Record.setField(1, new Field((String.valueOf(length))));
type10Record.setField(2, new Field(idc.toString()));
type10Record.setField(3, new Field(imgType));
type10Record.setField(4, new Field(agency));
type10Record.setField(5, new Field(todaysDate()));
type10Record.setField(6, new Field(String.valueOf(image.getWidth())));
type10Record.setField(7, new Field(String.valueOf(image.getHeight())));
type10Record.setField(8, new Field("1")); //Scale Units - Vale of (1) denotes type 1 information is correct 1.011/1.012
type10Record.setField(9, new Field("1"));
type10Record.setField(10, new Field("1"));
type10Record.setField(11, new Field("JPEGB"));
type10Record.setField(12, new Field("YCC"));
type10Record.setImageData(imgData);
return type10Record;
}
//type 14
public static GenericRecord createType14Record(boolean markRolled, String pos, byte[] imgData, Ebts ebts) throws IOException{
GenericRecord type14Record = new GenericRecord(14);
BufferedImage image = createImageFromBytes(imgData);
Integer idc = 03;
type14Record.setField(2, new Field(idc.toString()));
if (markRolled) {
type14Record.setField(3, new Field("1"));
} else {
type14Record.setField(3, new Field("0"));
}
type14Record.setField(5, new Field(todaysDate()));
type14Record.setField(6, new Field(String.valueOf(image.getWidth())));
type14Record.setField(7, new Field(String.valueOf(image.getHeight())));
type14Record.setField(12, new Field("8"));
type14Record.setField(13, new Field(pos));
type14Record.setImageData(imgData);
return type14Record;
}
//save Ebts
public static void saveEbts(Ebts ebts){
try{
EbtsBuilder ebtsBuilder = new EbtsBuilder();
byte [] sample = ebtsBuilder.build(ebts);
String fileName = "testEbts.eft";
File outputFile = new File(fileName);
FileOutputStream fos = new FileOutputStream(outputFile);
BufferedOutputStream bos = new BufferedOutputStream(fos);
bos.write(sample);
System.out.println("Created "+fileName);
bos.close();
}catch(Exception ex){
ex.printStackTrace();
}
}
public static BufferedImage createImageFromBytes(byte[] imageData) throws IOException {
ByteArrayInputStream bais = new ByteArrayInputStream(imageData);
ImageIO.setUseCache(false);
try {
return ImageIO.read(bais);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static String todaysDate() {
String todaysDate = null;
Date date = new Date();
SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMdd");
todaysDate = formatter.format(new java.sql.Timestamp(date.getTime()));
return todaysDate;
}
}
|
package com.security.user.model;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableField;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.core.toolkit.CollectionUtils;
import lombok.Data;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
* 角色信息
* @author: stars
* @date 2020年 07月 09日 11:55
**/
@Data
public class SysRole implements Serializable {
@TableId(type = IdType.AUTO)
private Long id;
/**
* 角色名称
*/
private String name;
/**
* 角色描述
*/
private String remark;
private Date createDate;
private Date updateDate;
/**
* 存储当前角色的权限资源对象集合
* 修改角色时用到
*/
@TableField(exist = false)
private List<SysPermission> perList = new ArrayList<SysPermission>();
/**
* 存储当前角色的权限资源ID集合
* 修改角色时用到
*/
@TableField(exist = false)
private List<Long> perIds = new ArrayList<Long>();
public List<Long> getPerIds() {
if(CollectionUtils.isNotEmpty(perList)) {
perIds =new ArrayList<Long>();
for(SysPermission per : perList) {
perIds.add(per.getId());
}
}
return perIds;
}
}
|
package com.project.messagesend;
import net.nurigo.java_sdk.api.Message;
import net.nurigo.java_sdk.exceptions.CoolsmsException;
import org.json.simple.JSONObject;
import java.util.HashMap;
/**
* @class ExampleSend
* @brief This sample code demonstrate how to send sms through CoolSMS Rest API PHP
*/
public class CodeMessage {
public static void sms_send(String phoneNum,String send_message) {
String api_key = "";
String api_secret = "";
Message coolsms = new Message(api_key, api_secret);
// 4 params(to, from, type, text) are mandatory. must be filled
HashMap<String, String> params = new HashMap<String, String>();
params.put("to", phoneNum);
params.put("from", "");
params.put("type", "SMS");
params.put("text", send_message);
params.put("app_version", "test app 1.2"); // application name and version
try {
JSONObject obj = (JSONObject) coolsms.send(params);
System.out.println(obj.toString());
} catch (CoolsmsException e) {
System.out.println(e.getMessage());
System.out.println(e.getCode());
}
}
}
|
package simpleircserver.talker;
/*
*
* IrcTalker
* is part of Simple Irc Server
*
*
* Copyright (С) 2012, 2015, Nikolay Kirdin
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License Version 3.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License Version 3 for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License Version 3 along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
import java.util.logging.*;
import simpleircserver.base.Globals;
import simpleircserver.base.Recipient;
import simpleircserver.connection.Connection;
import simpleircserver.parser.IrcCommandReport;
import simpleircserver.parser.IrcIncomingMessage;
import simpleircserver.tools.IrcAvgMeter;
import java.util.concurrent.*;
import java.util.concurrent.atomic.*;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.net.*;
/**
* Класс, являющийся родителем классов, хранящих информацию о
* клиентах данного сервера IRC.
*
* Объект этого класса характеризуется состояниями, которые определены в
* {@link IrcTalkerState}.
*
* @version 0.5 2012-02-12
* @version 0.5.3 2015-11-05 Program units were moved from default package into packages with names. Unit tests were added.
* @author Nikolay Kirdin
*/
public abstract class IrcTalker implements Recipient {
/** Счетчик генератора уникальных идентификаторов. */
private static AtomicLong seq = new AtomicLong(0);
/** Состояние соединения. */
private IrcTalkerState state;
/** Время перехода в текущее состояние {@link #state}. */
public AtomicLong stateTime = new AtomicLong();
/** ReadWrite Lock для IrcTalkerState. */
private final ReentrantReadWriteLock stateRWLock =
new ReentrantReadWriteLock();
/** ReadLock для state. */
private final Lock stateRLock = stateRWLock.readLock();
/** WriteLock для state. */
private final Lock stateWLock = stateRWLock.writeLock();
/** Уникальный идентификатор. */
private final long id;
/** Текстовое представление уникального идентификатора в
* 36-ричной системе счисления. Представление создается с помощью
* метода {@link #genCanonicalId}.
*/
private String idString;
/** Никнэйм. */
private String nickname = "";
/** Сетевой идентификатор. */
private InetAddress networkId;
/** FQDN хоста. */
private String hostname = "";
/** Количество хопов. */
private int hopcount;
/** Признак регистрации. */
private boolean registered = false;
/** Пароль клиента, который используется в команде PASS. */
private String password;
/**
* Соединение - объект класса Connection, который обслуживает
* сетевой сокет, к которому подключен этот клиент.
*/
private Connection connection;
/** Время чтения последнего сообщения.*/
private long lastMessageTime;
/**
* Максимально допустимая сокорость вывода сообщений (сообщение/секунда).
*/
public AtomicInteger maxOutputRate = new AtomicInteger(10);
/** Средняя скорость вывода сообщений. */
public IrcAvgMeter avgOutputRate = new IrcAvgMeter(300);
/**
* Максимально допустимая средняя скорость ввода сообщений в
* (сообщение/секунда).
*/
public AtomicInteger maxInputRate = new AtomicInteger(1);
/** Средняя скорость вывода сообщений. */
public IrcAvgMeter avgInputRate = new IrcAvgMeter(300);
/**
* Конструктор.
* При создании объекта, создается уникальный идентификатор и его
* 36-ричное представление, Представление создается с помощью
* метода {@link #genCanonicalId}. Состояние объекта устанавливается
* в состояние {@link IrcTalkerState#NEW}, сохраняется время
* установки состояния. Соединением объекта устанавливается
* служебное псевдосоединение {@link Globals#nullConnection}.
*/
public IrcTalker() {
id = seq.getAndIncrement();
idString = genCanonicalId(id);
setState(IrcTalkerState.NEW);
stateTime.set(System.currentTimeMillis());
connection = Globals.nullConnection.get();
avgInputRate.setValue(stateTime.get());
avgOutputRate.setValue(stateTime.get());
}
/**
* Получение 36-ричного представления уникального идентификатора.
* @return 36-ричное представление уникального идентификатора.
*/
public String getIdString() {
return idString;
}
/**
* Получение уникального идентификатора.
* @return уникальный идентификатор.
*/
public long getId() {
return id;
}
/**
* Получение никнэйма.
* @return никнэйм.
*/
public synchronized String getNickname() {
return nickname;
}
/**
* Задание никнэйма.
* @param name никнэйм.
*/
public synchronized void setNickname(String name) {
nickname = name;
}
/**
* Задание сетевого идентификатора.
* @param networkId сетевой идентификатор.
*/
public synchronized void setNetworkId(InetAddress networkId) {
this.networkId = networkId;
}
/**
* Получение сетевого идентификатора.
* @return сетевой идентификатор.
*/
public synchronized InetAddress getNetworkId() {
return networkId;
}
/**
* Задание FQDN хоста.
* @param hostname FQDN хоста.
*/
public synchronized void setHostname(String hostname) {
this.hostname = hostname;
}
/**
* Получение FQDN хоста.
* @return hostname FQDN хоста.
*/
public synchronized String getHostname() {
return hostname;
}
/**
* Получение количества хопов.
* @return количество хопов.
*/
public synchronized int getHopcount() {
return hopcount;
}
/**
* Задание количества хопов.
* @param hopcount количество хопов.
*/
public synchronized void setHopcount(int hopcount) {
this.hopcount = hopcount;
}
/**
* Получение признака регистрации клиента.
* @return true если клиент зарегистрирован.
*/
public boolean isRegistered() {
return getState() == IrcTalkerState.OPERATIONAL;
}
/**
* Задание признака регистрации клиента.
* @param registered признак регистрации клиента.
*/
public void setRegistered(boolean registered) {
if (registered) {
setState(IrcTalkerState.OPERATIONAL);
} else {
setState(IrcTalkerState.REGISTERING);
}
stateTime.set(System.currentTimeMillis());
}
/**
* Задание пароля клиента.
* @param password пароль клиента.
*/
public synchronized void setPassword(String password) {
this.password = password;
}
/**
* Получение пароля клиента.
* @return пароль клиента.
*/
public synchronized String getPassword() {
return password;
}
/**
* Проверка пароля клиента (Не реализовано).
* @param password пароль предоставляемый для проверки.
* @return всегда возвращается false.
*/
public synchronized boolean checkPassword(String password) {
boolean result = false;
return result;
}
/**
* Задание соединения.
* @param connection соединение.
*/
public synchronized void setConnection(Connection connection) {
this.connection = connection;
}
/**
* Получение соединения
* @return соединение.
*/
public synchronized Connection getConnection() {
return connection;
}
/**
* Получение длительности промежутка времени с момента получения
* последнего сообщения клиента.
* @return длительность промежутка времени с момента получения
* последнего сообщения клиента.
*/
public synchronized long getIdle() {
return (System.currentTimeMillis() - lastMessageTime) / 1000;
}
/**
* Задание времени получения последнего сообщения клиента.
* @param time длительность промежутка времени с момента получения
* последнего сообщения клиента.
*/
public synchronized void setLastMessageTime(long time) {
lastMessageTime = time;
}
/**
* Действия выполняемые при разрыве связи с клиентом.
*/
public void disconnect() {
// getConnection().running.set(false);
}
/**
* Текстовое представление объекта. Объект представляется следующим
* образом:
* <P><code>
* "<уникальный идентификатор> <никнэйм> <сетевой идентификатор> Registered: <признак регистрации>"
* </code>
* <P>Поля разделены пробелом.
*/
public String toString() {
return "id: " + String.valueOf(id) +
" nickname: " + nickname +
" networkId: " + networkId +
" Registered: " + registered;
}
/**
* Преобразователь целого положительного числа в 36-ричное
* представление. Символы в порядке возрастания:
* "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789".
* @param numb число для преобразования.
* @return 36-ричное представление параметра.
*/
public static String genCanonicalId(long numb) {
String symbs = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
int sLen = symbs.length();
StringBuilder sb = new StringBuilder();
do {
sb.append(symbs.charAt((int)(numb % sLen)));
numb /= sLen;
} while (numb != 0);
sb.append("AAAAA");
sb.reverse();
sb.substring(sb.length() - 5);
return sb.toString();
}
/**
* Сохранение времени приема сообщения IRC PONG от клиента.
*/
public void receivePong() {
getConnection().pongTime.set(System.currentTimeMillis());
}
/**
* Отправка сообщения в клиенту.
* @param ircTalker отправитель сообщения.
* @param message сообщение.
* @return результат отправки сообщения.
*/
public boolean send(IrcTalker ircTalker, String message) {
boolean result = false;
IrcCommandReport ircCommandReport = null;
ircCommandReport = new IrcCommandReport(message,
IrcTalker.this, ircTalker);
result = send(ircCommandReport);
return result;
}
/**
* Отправка сообщения в клиенту.
* @param ircCommandReport сообщение.
* @return результат отправки сообщения.
*/
public boolean send(IrcCommandReport ircCommandReport) {
boolean result = false;
String message = ircCommandReport.getReport();
String nick = ircCommandReport.getSender().getNickname();
if (message.charAt(0) != ':') {
message = ":" + nick + " " + message;
ircCommandReport.setReport(message);
}
result = IrcTalker.this.offerToOutputQueue(ircCommandReport);
return result;
}
/**
* Метод используется для помещения сообщения во входную очередь
* клиента IRC. Сообщение будет помещено в очередь в том случае,
* если размер входной очереди меньше максимальной длины очереди
* {@link Connection#maxInputQueueSize}.
* @param ircIncomingMessage сообщение.
* @return true признак успеха выполнения метода,
* false поместить сообщение в очередь не удалось.
*/
public boolean offerToInputQueue(IrcIncomingMessage ircIncomingMessage) {
return getConnection().offerToInputQueue(ircIncomingMessage);
}
/**
* Метод используется для очистки входной очереди клиента IRC.
*/
public void dropInputQueue() {
getConnection().dropInputQueue();
}
/**
* Метод используется для получения доступа к входной очереди
* клиента IRC.
* @return BlockingQueue<IrcCommandReport> входная очередь
* клиента IRC.
public BlockingQueue<IrcIncomingMessage> getInputQueue() {
return getConnection().getInputQueue();
}
*/
/**
* Метод используется для получения количества элементов во входной
* очереди.
* @return количества элементов во входной очереди.
*/
public int getInputQueueSize() {
return getConnection().getInputQueueSize();
}
/**
* Метод используется для помещения сообщения в выходную очередь
* клиента IRC. Сообщение будет помещено в очередь в том случае,
* если размер выходной очереди меньше максимальной длины очереди
* {@link Connection#maxOutputQueueSize}.
* @param ircCommandReport сообщение.
* @return true признак успеха выполнения метода,
* false поместить сообщение в очередь не удалось.
*/
public boolean offerToOutputQueue(IrcCommandReport ircCommandReport) {
return getConnection().offerToOutputQueue(ircCommandReport);
}
/**
* Метод используется для очистки выходной очереди клиента IRC.
*/
public void dropOutputQueue() {
getConnection().dropOutputQueue();
}
/**
* Метод используется для получения доступа к выходной очереди
* клиента IRC.
* @return BlockingQueue<IrcCommandReport> выходная очередь
* клиента IRC.
*/
public BlockingQueue<IrcCommandReport> getOutputQueue() {
return getConnection().getOutputQueue();
}
/**
* Метод используется для получения количества элементов в выходной
* очереди.
* @return количество элементов в выходной очереди.
*/
public int getOutputQueueSize() {
return getConnection().getOutputQueueSize();
}
public int compareTo(Object object) {
int result = 0;
if (!(object instanceof IrcTalker)) {
throw new ClassCastException();
}
if (this.hashCode() < object.hashCode()) {
result = -1;
} else if (this.hashCode() > object.hashCode()) {
result = 1;
} else {
result = 0;
}
return result;
}
/**
* Проверка того, что клиент может послать сообщение в этому клиенту,
* на основании его полномочий и режимов этого клиента и того, (что
* средняя скорость сообщений передаваемых клиенту не превышает
* величину {@link #maxOutputRate} - не реализовано). Этот клиент
* примет сообщение от клиента в следующих случаях:
* <UL>
* <LI> клиент является оператором {@link UserMode#o};</LI>
* <LI> клиент является этим сервером
* {@link Globals#thisIrcServer};</LI>
* <LI> если средняя скорость сообщений клиента не превышает
* величину {@link #maxOutputRate}.</LI>
* </UL>
*
* @param requestor клиент.
* @return true если клиент может послать сообщение в этому клиенту.
public boolean canReceive(IrcTalker requestor) {
boolean result = false;
if (requestor instanceof IrcServer &&
((IrcServer) requestor) == Globals.thisIrcServer.get()) {
result = true;
} else if (requestor instanceof User &&
((User) requestor).isOperator()) {
result = true;
} else if (avgOutputRate.getAvgInterval() < 10000 / maxOutputRate.get()) {
result = false;
} else {
result = true;
}
return result;
}
*/
/**
* Установка state.
* @param state
*/
public void setState(IrcTalkerState state) {
stateWLock.lock();
try {
this.state = state;
} finally {
stateWLock.unlock();
}
}
/** Получение state.
* @return state
*/
public IrcTalkerState getState() {
IrcTalkerState result = null;
stateRLock.lock();
try {
result = state;
} finally {
stateRLock.unlock();
}
return result;
}
/** Перевод клиента в состояние {@link IrcTalkerState#CLOSE}. */
public void close() {
stateWLock.lock();
try {
if (getState() != IrcTalkerState.CLOSE &&
getState() != IrcTalkerState.CLOSING &&
getState() != IrcTalkerState.CLOSED &&
getState() != IrcTalkerState.BROKEN) {
setState(IrcTalkerState.CLOSE);
stateTime.set(System.currentTimeMillis());
Globals.logger.get().log(Level.FINER, "ircTalker:" +
IrcTalker.this + " connection:" + getConnection()
+ " ircTalker set CLOSE");
}
} finally {
stateWLock.unlock();
}
}
/** Перевод клиента в состояние {@link IrcTalkerState#BROKEN}. */
public void setBroken() {
stateWLock.lock();
try {
if (getState() != IrcTalkerState.CLOSE &&
getState() != IrcTalkerState.CLOSING &&
getState() != IrcTalkerState.CLOSED &&
getState() != IrcTalkerState.BROKEN) {
setState(IrcTalkerState.BROKEN);
stateTime.set(System.currentTimeMillis());
Globals.logger.get().log(Level.FINER, "ircTalker:" +
IrcTalker.this + " connection:" + getConnection()
+ " ircTalker set BROKEN");
}
} finally {
stateWLock.unlock();
}
}
}
|
package fr.javatronic.blog.massive.annotation1.sub1;
import fr.javatronic.blog.processor.Annotation_001;
@Annotation_001
public class Class_8566 {
}
|
package com.ccb.sorted.compare.merge;
/**
* 分而治之
* 二路归并排序, 递归二分,依次排序
*/
public class MergeSort {
private static boolean isUp = true;
/**
* 归并排序——升序
* @param arr 要排序的数组
*/
public static void sortUp(int[] arr){
isUp = true;
mergeSort(arr, 0, arr.length - 1);
}
/**
* 归并排序——降序
* @param arr 要排序的数组
*/
public static void sortDown(int[] arr){
isUp = false;
mergeSort(arr, 0, arr.length - 1);
}
/**
* 先把数组一分为二,然后按顺序合并
* @param arr 要排序的数组
* @param left 分的起始点
* @param right 分的末尾点
*/
private static void mergeSort(int[] arr, int left, int right){
if(left >= right) return;
int mid = left + (right - left) / 2;
mergeSort(arr, left, mid);
mergeSort(arr, mid + 1, right);
if(isUp){
mergeUp(arr, left , mid, right);
}else{
mergeDown(arr, left, mid, right);
}
}
/**
* 按递减的顺序合并起来
* @param arr 要排序的数组
* @param left 左边开始下标
* @param mid 左边结束下标
* @param right 右边结束下标
*/
private static void mergeDown(int[] arr, int left, int mid, int right) {
int[] sorted = new int[right - left + 1];
int cur = 0;
int leftStart = left;
int rightStart = mid + 1;
// 先从左右依次找最小值放入sorted数组中
while(leftStart <= mid && rightStart <= right){
if(arr[leftStart] > arr[rightStart]){
sorted[cur++] = arr[leftStart++];
}else{
sorted[cur++] = arr[rightStart++];
}
}
// 如果左边遍历完,把右边剩下的赋值给排序数组
if(leftStart > mid){
while(rightStart <= right){
sorted[cur++] = arr[rightStart++];
}
}
// 如果右边遍历完, 把左边剩下的赋值给排序数组
if(rightStart > right){
while(leftStart <= mid){
sorted[cur++] = arr[leftStart++];
}
}
// 将排序数组拷贝到原数组里
System.arraycopy(sorted, 0, arr, left, right - left + 1);
}
/**
* 按递增的顺序合并起来
* 仿照java的Arrays.sort的源码改进----------------------------------------------------------------------------
* @param arr 要排序的数组
* @param left 左边开始下标
* @param mid 左边结束下标
* @param right 右边结束下标
*/
private static void mergeUp(int[] arr, int left, int mid, int right) {
// 如果左边的最后一个比右边的第一个小,那就不需要继续排了
if(arr[mid] < arr[mid + 1]){
return;
}
int[] sorted = new int[right - left + 1];// 用来放排序的数组
// 将左右列表的最小数依次入排序数组中
for(int i = 0, p = left, q = mid + 1; i < sorted.length; i++) {// i为排序数组下标,p为左边列表指针,q为右边列表指针
if (q > right || p <= mid && arr[p] < arr[q])// 如果右边列表遍历完 或者 左边列表还没遍历完且左边当前值小于右边当前值
sorted[i] = arr[p++];
else
sorted[i] = arr[q++];
}
// 将排序数组拷贝到原数组里
System.arraycopy(sorted, 0, arr, left, right - left + 1);
}
}
|
package sk.filo.plantdiary.service.so;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.Getter;
import lombok.Setter;
import lombok.ToString;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.Size;
@Schema(name = "Auth")
@Getter
@Setter
@ToString(exclude = {"password"})
public class AuthSO {
@NotBlank
@Size(max = 25)
private String username;
@NotBlank
@Size(max = 255)
private String password;
}
|
/*
* Copyright 1999-2012 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package fm.liu.timo.exception;
/**
* 未知事物隔离级别异常
*
* @author xianmao.hexm
*/
public class UnknownTxIsolationException extends RuntimeException {
private static final long serialVersionUID = -3911059999308980358L;
public UnknownTxIsolationException() {
super();
}
public UnknownTxIsolationException(String message, Throwable cause) {
super(message, cause);
}
public UnknownTxIsolationException(String message) {
super(message);
}
public UnknownTxIsolationException(Throwable cause) {
super(cause);
}
}
|
package com.innogrid.gedge.coreedge.model;
import com.google.api.client.util.Key;
import lombok.Data;
import java.io.Serializable;
/**
* Created by kkm on 15. 4. 24.
*/
@Data
public class NodeUsageinfo implements Serializable {
private static final long serialVersionUID = 1779366790605206372L;
@Key private NodeUsageItemInfo[] items;
public NodeUsageinfo(){}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.net;
import java.io.IOError;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.*;
import java.nio.ByteBuffer;
import java.nio.channels.AsynchronousCloseException;
import java.nio.channels.ServerSocketChannel;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import com.google.common.base.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.ConfigurationException;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.config.EncryptionOptions;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.io.util.DataOutputBuffer;
import org.apache.cassandra.locator.ILatencySubscriber;
import org.apache.cassandra.net.io.SerializerType;
import org.apache.cassandra.net.sink.SinkManager;
import org.apache.cassandra.security.SSLFactory;
import org.apache.cassandra.security.streaming.SSLFileStreamTask;
import org.apache.cassandra.service.ReadCallback;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.streaming.FileStreamTask;
import org.apache.cassandra.streaming.StreamHeader;
import org.apache.cassandra.utils.*;
import org.cliffc.high_scale_lib.NonBlockingHashMap;
public final class MessagingService implements MessagingServiceMBean
{
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=MessagingService";
public static final int VERSION_07 = 1;
public static final int version_ = 2;
//TODO: make this parameter dynamic somehow. Not sure if config is appropriate.
private SerializerType serializerType_ = SerializerType.BINARY;
/** we preface every message with this number so the recipient can validate the sender is sane */
private static final int PROTOCOL_MAGIC = 0xCA552DFA;
/* This records all the results mapped by message Id */
private final ExpiringMap<String, Pair<InetAddress, IMessageCallback>> callbacks;
/* Lookup table for registering message handlers based on the verb. */
private final Map<StorageService.Verb, IVerbHandler> verbHandlers_;
/* Thread pool to handle messaging write activities */
private final ExecutorService streamExecutor_;
private final NonBlockingHashMap<InetAddress, OutboundTcpConnectionPool> connectionManagers_ = new NonBlockingHashMap<InetAddress, OutboundTcpConnectionPool>();
private static final Logger logger_ = LoggerFactory.getLogger(MessagingService.class);
private static final int LOG_DROPPED_INTERVAL_IN_MS = 5000;
private SocketThread socketThread;
private final SimpleCondition listenGate;
/**
* Verbs it's okay to drop if the request has been queued longer than RPC_TIMEOUT. These
* all correspond to client requests or something triggered by them; we don't want to
* drop internal messages like bootstrap or repair notifications.
*/
public static final EnumSet<StorageService.Verb> DROPPABLE_VERBS = EnumSet.of(StorageService.Verb.BINARY,
StorageService.Verb.MUTATION,
StorageService.Verb.READ_REPAIR,
StorageService.Verb.READ,
StorageService.Verb.RANGE_SLICE,
StorageService.Verb.REQUEST_RESPONSE);
// total dropped message counts for server lifetime
private final Map<StorageService.Verb, AtomicInteger> droppedMessages = new EnumMap<StorageService.Verb, AtomicInteger>(StorageService.Verb.class);
// dropped count when last requested for the Recent api. high concurrency isn't necessary here.
private final Map<StorageService.Verb, Integer> lastDropped = Collections.synchronizedMap(new EnumMap<StorageService.Verb, Integer>(StorageService.Verb.class));
private final Map<StorageService.Verb, Integer> lastDroppedInternal = new EnumMap<StorageService.Verb, Integer>(StorageService.Verb.class);
private long totalTimeouts = 0;
private long recentTotalTimeouts = 0;
private final Map<String, AtomicLong> timeoutsPerHost = new HashMap<String, AtomicLong>();
private final Map<String, AtomicLong> recentTimeoutsPerHost = new HashMap<String, AtomicLong>();
private final List<ILatencySubscriber> subscribers = new ArrayList<ILatencySubscriber>();
private static final long DEFAULT_CALLBACK_TIMEOUT = (long) (1.1 * DatabaseDescriptor.getRpcTimeout());
private static class MSHandle
{
public static final MessagingService instance = new MessagingService();
}
public static MessagingService instance()
{
return MSHandle.instance;
}
private MessagingService()
{
for (StorageService.Verb verb : DROPPABLE_VERBS)
{
droppedMessages.put(verb, new AtomicInteger());
lastDropped.put(verb, 0);
lastDroppedInternal.put(verb, 0);
}
listenGate = new SimpleCondition();
verbHandlers_ = new EnumMap<StorageService.Verb, IVerbHandler>(StorageService.Verb.class);
streamExecutor_ = new DebuggableThreadPoolExecutor("Streaming", DatabaseDescriptor.getCompactionThreadPriority());
Runnable logDropped = new Runnable()
{
public void run()
{
logDroppedMessages();
}
};
StorageService.scheduledTasks.scheduleWithFixedDelay(logDropped, LOG_DROPPED_INTERVAL_IN_MS, LOG_DROPPED_INTERVAL_IN_MS, TimeUnit.MILLISECONDS);
Function<Pair<String, Pair<InetAddress, IMessageCallback>>, ?> timeoutReporter = new Function<Pair<String, Pair<InetAddress, IMessageCallback>>, Object>()
{
public Object apply(Pair<String, Pair<InetAddress, IMessageCallback>> pair)
{
Pair<InetAddress, IMessageCallback> expiredValue = pair.right;
maybeAddLatency(expiredValue.right, expiredValue.left, (double) DatabaseDescriptor.getRpcTimeout());
totalTimeouts++;
String ip = expiredValue.left.getHostAddress();
AtomicLong c = timeoutsPerHost.get(ip);
if (c == null)
{
c = new AtomicLong();
timeoutsPerHost.put(ip, c);
}
c.incrementAndGet();
// we only create AtomicLong instances here, so that the write
// access to the hashmap happens single-threadedly.
if (recentTimeoutsPerHost.get(ip) == null)
recentTimeoutsPerHost.put(ip, new AtomicLong());
return null;
}
};
callbacks = new ExpiringMap<String, Pair<InetAddress, IMessageCallback>>(DEFAULT_CALLBACK_TIMEOUT, timeoutReporter);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try
{
mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
/**
* Track latency information for the dynamic snitch
* @param cb: the callback associated with this message -- this lets us know if it's a message type we're interested in
* @param address: the host that replied to the message
* @param latency
*/
public void maybeAddLatency(IMessageCallback cb, InetAddress address, double latency)
{
if (cb.isLatencyForSnitch())
addLatency(address, latency);
}
public void addLatency(InetAddress address, double latency)
{
for (ILatencySubscriber subscriber : subscribers)
subscriber.receiveTiming(address, latency);
}
/** called from gossiper when it notices a node is not responding. */
public void convict(InetAddress ep)
{
logger_.debug("Resetting pool for " + ep);
getConnectionPool(ep).reset();
}
/**
* Listen on the specified port.
* @param localEp InetAddress whose port to listen on.
*/
public void listen(InetAddress localEp) throws IOException, ConfigurationException
{
socketThread = new SocketThread(getServerSocket(localEp), "ACCEPT-" + localEp);
socketThread.start();
listenGate.signalAll();
}
private ServerSocket getServerSocket(InetAddress localEp) throws IOException, ConfigurationException
{
final ServerSocket ss;
if (DatabaseDescriptor.getEncryptionOptions() != null && DatabaseDescriptor.getEncryptionOptions().internode_encryption == EncryptionOptions.InternodeEncryption.all)
{
ss = SSLFactory.getServerSocket(DatabaseDescriptor.getEncryptionOptions(), localEp, DatabaseDescriptor.getStoragePort());
// setReuseAddress happens in the factory.
logger_.info("Starting Encrypted Messaging Service on port {}", DatabaseDescriptor.getStoragePort());
}
else
{
ServerSocketChannel serverChannel = ServerSocketChannel.open();
ss = serverChannel.socket();
ss.setReuseAddress(true);
InetSocketAddress address = new InetSocketAddress(localEp, DatabaseDescriptor.getStoragePort());
try
{
ss.bind(address);
}
catch (BindException e)
{
if (e.getMessage().contains("in use"))
throw new ConfigurationException(address + " is in use by another process. Change listen_address:storage_port in cassandra.yaml to values that do not conflict with other services");
else if (e.getMessage().contains("Cannot assign requested address"))
throw new ConfigurationException("Unable to bind to address " + address + ". Set listen_address in cassandra.yaml to an interface you can bind to, e.g., your private IP address on EC2");
else
throw e;
}
logger_.info("Starting Messaging Service on {}", address);
}
return ss;
}
public void waitUntilListening()
{
try
{
listenGate.await();
}
catch (InterruptedException ie)
{
logger_.debug("await interrupted");
}
}
public OutboundTcpConnectionPool getConnectionPool(InetAddress to)
{
OutboundTcpConnectionPool cp = connectionManagers_.get(to);
if (cp == null)
{
connectionManagers_.putIfAbsent(to, new OutboundTcpConnectionPool(to));
cp = connectionManagers_.get(to);
}
return cp;
}
public OutboundTcpConnection getConnection(InetAddress to, Message msg)
{
return getConnectionPool(to).getConnection(msg);
}
/**
* Register a verb and the corresponding verb handler with the
* Messaging Service.
* @param verb
* @param verbHandler handler for the specified verb
*/
public void registerVerbHandlers(StorageService.Verb verb, IVerbHandler verbHandler)
{
assert !verbHandlers_.containsKey(verb);
verbHandlers_.put(verb, verbHandler);
}
/**
* This method returns the verb handler associated with the registered
* verb. If no handler has been registered then null is returned.
* @param type for which the verb handler is sought
* @return a reference to IVerbHandler which is the handler for the specified verb
*/
public IVerbHandler getVerbHandler(StorageService.Verb type)
{
return verbHandlers_.get(type);
}
private void addCallback(IMessageCallback cb, String messageId, InetAddress to)
{
addCallback(cb, messageId, to, DEFAULT_CALLBACK_TIMEOUT);
}
private void addCallback(IMessageCallback cb, String messageId, InetAddress to, long timeout)
{
Pair<InetAddress, IMessageCallback> previous = callbacks.put(messageId, new Pair<InetAddress, IMessageCallback>(to, cb), timeout);
assert previous == null;
}
private static AtomicInteger idGen = new AtomicInteger(0);
// TODO make these integers to avoid unnecessary int -> string -> int conversions
private static String nextId()
{
return Integer.toString(idGen.incrementAndGet());
}
/*
* @see #sendRR(Message message, InetAddress to, IMessageCallback cb, long timeout)
*/
public String sendRR(Message message, InetAddress to, IMessageCallback cb)
{
return sendRR(message, to, cb, DEFAULT_CALLBACK_TIMEOUT);
}
/**
* Send a message to a given endpoint. This method specifies a callback
* which is invoked with the actual response.
* @param message message to be sent.
* @param to endpoint to which the message needs to be sent
* @param cb callback interface which is used to pass the responses or
* suggest that a timeout occurred to the invoker of the send().
* suggest that a timeout occurred to the invoker of the send().
* @param timeout the timeout used for expiration
* @return an reference to message id used to match with the result
*/
public String sendRR(Message message, InetAddress to, IMessageCallback cb, long timeout)
{
String id = nextId();
addCallback(cb, id, to, timeout);
sendOneWay(message, id, to);
return id;
}
public void sendOneWay(Message message, InetAddress to)
{
sendOneWay(message, nextId(), to);
}
public void sendReply(Message message, String id, InetAddress to)
{
sendOneWay(message, id, to);
}
/**
* Send a message to a given endpoint. similar to sendRR(Message, InetAddress, IAsyncCallback)
* @param producer pro
* @param to endpoing to which the message needs to be sent
* @param cb callback that processes responses.
* @return a reference to the message id use to match with the result.
*/
public String sendRR(MessageProducer producer, InetAddress to, IAsyncCallback cb)
{
try
{
return sendRR(producer.getMessage(Gossiper.instance.getVersion(to)), to, cb);
}
catch (IOException ex)
{
// happened during message creation.
throw new IOError(ex);
}
}
/**
* Send a message to a given endpoint. This method adheres to the fire and forget
* style messaging.
* @param message messages to be sent.
* @param to endpoint to which the message needs to be sent
*/
private void sendOneWay(Message message, String id, InetAddress to)
{
if (logger_.isTraceEnabled())
logger_.trace(FBUtilities.getLocalAddress() + " sending " + message.getVerb() + " to " + id + "@" + to);
// do local deliveries
if ( message.getFrom().equals(to) )
{
receive(message, id);
return;
}
// message sinks are a testing hook
Message processedMessage = SinkManager.processClientMessage(message, id, to);
if (processedMessage == null)
{
return;
}
// get pooled connection (really, connection queue)
OutboundTcpConnection connection = getConnection(to, message);
// pack message with header in a bytebuffer
byte[] data;
try
{
DataOutputBuffer buffer = new DataOutputBuffer();
buffer.writeUTF(id);
Message.serializer().serialize(message, buffer, message.getVersion());
data = buffer.getData();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
assert data.length > 0;
ByteBuffer buffer = packIt(data , false, message.getVersion());
// write it
connection.write(buffer);
}
public IAsyncResult sendRR(Message message, InetAddress to)
{
IAsyncResult iar = new AsyncResult();
sendRR(message, to, iar);
return iar;
}
/**
* Stream a file from source to destination. This is highly optimized
* to not hold any of the contents of the file in memory.
* @param header Header contains file to stream and other metadata.
* @param to endpoint to which we need to stream the file.
*/
public void stream(StreamHeader header, InetAddress to)
{
/* Streaming asynchronously on streamExector_ threads. */
EncryptionOptions encryption = DatabaseDescriptor.getEncryptionOptions();
if (encryption != null && encryption.internode_encryption == EncryptionOptions.InternodeEncryption.all)
streamExecutor_.execute(new SSLFileStreamTask(header, to));
else
streamExecutor_.execute(new FileStreamTask(header, to));
}
public void register(ILatencySubscriber subcriber)
{
subscribers.add(subcriber);
}
/** blocks until the processing pools are empty and done. */
public void waitFor() throws InterruptedException
{
while (!streamExecutor_.isTerminated())
streamExecutor_.awaitTermination(5, TimeUnit.SECONDS);
}
public void shutdown()
{
logger_.info("Shutting down MessageService...");
try
{
socketThread.close();
}
catch (IOException e)
{
throw new IOError(e);
}
streamExecutor_.shutdownNow();
callbacks.shutdown();
logger_.info("Shutdown complete (no further commands will be processed)");
}
public void receive(Message message, String id)
{
if (logger_.isTraceEnabled())
logger_.trace(FBUtilities.getLocalAddress() + " received " + message.getVerb()
+ " from " + id + "@" + message.getFrom());
message = SinkManager.processServerMessage(message, id);
if (message == null)
return;
Runnable runnable = new MessageDeliveryTask(message, id);
ExecutorService stage = StageManager.getStage(message.getMessageType());
assert stage != null : "No stage for message type " + message.getMessageType();
stage.execute(runnable);
}
public Pair<InetAddress, IMessageCallback> removeRegisteredCallback(String messageId)
{
return callbacks.remove(messageId);
}
public long getRegisteredCallbackAge(String messageId)
{
return callbacks.getAge(messageId);
}
public static void validateMagic(int magic) throws IOException
{
if (magic != PROTOCOL_MAGIC)
throw new IOException("invalid protocol header");
}
public static int getBits(int x, int p, int n)
{
return x >>> (p + 1) - n & ~(-1 << n);
}
public ByteBuffer packIt(byte[] bytes, boolean compress, int version)
{
/*
Setting up the protocol header. This is 4 bytes long
represented as an integer. The first 2 bits indicate
the serializer type. The 3rd bit indicates if compression
is turned on or off. It is turned off by default. The 4th
bit indicates if we are in streaming mode. It is turned off
by default. The 5th-8th bits are reserved for future use.
The next 8 bits indicate a version number. Remaining 15 bits
are not used currently.
*/
int header = 0;
// Setting up the serializer bit
header |= serializerType_.ordinal();
// set compression bit.
if (compress)
header |= 4;
// Setting up the version bit
header |= (version << 8);
ByteBuffer buffer = ByteBuffer.allocate(4 + 4 + 4 + bytes.length);
buffer.putInt(PROTOCOL_MAGIC);
buffer.putInt(header);
buffer.putInt(bytes.length);
buffer.put(bytes);
buffer.flip();
return buffer;
}
public ByteBuffer constructStreamHeader(StreamHeader streamHeader, boolean compress, int version)
{
/*
Setting up the protocol header. This is 4 bytes long
represented as an integer. The first 2 bits indicate
the serializer type. The 3rd bit indicates if compression
is turned on or off. It is turned off by default. The 4th
bit indicates if we are in streaming mode. It is turned off
by default. The following 4 bits are reserved for future use.
The next 8 bits indicate a version number. Remaining 15 bits
are not used currently.
*/
int header = 0;
// Setting up the serializer bit
header |= serializerType_.ordinal();
// set compression bit.
if ( compress )
header |= 4;
// set streaming bit
header |= 8;
// Setting up the version bit
header |= (version << 8);
/* Finished the protocol header setup */
/* Adding the StreamHeader which contains the session Id along
* with the pendingfile info for the stream.
* | Session Id | Pending File Size | Pending File | Bool more files |
* | No. of Pending files | Pending Files ... |
*/
byte[] bytes;
try
{
DataOutputBuffer buffer = new DataOutputBuffer();
StreamHeader.serializer().serialize(streamHeader, buffer, version);
bytes = buffer.getData();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
assert bytes.length > 0;
ByteBuffer buffer = ByteBuffer.allocate(4 + 4 + 4 + bytes.length);
buffer.putInt(PROTOCOL_MAGIC);
buffer.putInt(header);
buffer.putInt(bytes.length);
buffer.put(bytes);
buffer.flip();
return buffer;
}
public void incrementDroppedMessages(StorageService.Verb verb)
{
assert DROPPABLE_VERBS.contains(verb) : "Verb " + verb + " should not legally be dropped";
droppedMessages.get(verb).incrementAndGet();
}
private void logDroppedMessages()
{
boolean logTpstats = false;
for (Map.Entry<StorageService.Verb, AtomicInteger> entry : droppedMessages.entrySet())
{
AtomicInteger dropped = entry.getValue();
StorageService.Verb verb = entry.getKey();
int recent = dropped.get() - lastDroppedInternal.get(verb);
if (recent > 0)
{
logTpstats = true;
logger_.info("{} {} messages dropped in server lifetime", recent, verb);
lastDroppedInternal.put(verb, dropped.get());
}
}
if (logTpstats)
StatusLogger.log();
}
private static class SocketThread extends Thread
{
private final ServerSocket server;
SocketThread(ServerSocket server, String name)
{
super(name);
this.server = server;
}
public void run()
{
while (true)
{
try
{
Socket socket = server.accept();
new IncomingTcpConnection(socket).start();
}
catch (AsynchronousCloseException e)
{
// this happens when another thread calls close().
logger_.info("MessagingService shutting down server thread.");
break;
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
}
void close() throws IOException
{
server.close();
}
}
public Map<String, Integer> getCommandPendingTasks()
{
Map<String, Integer> pendingTasks = new HashMap<String, Integer>();
for (Map.Entry<InetAddress, OutboundTcpConnectionPool> entry : connectionManagers_.entrySet())
pendingTasks.put(entry.getKey().getHostAddress(), entry.getValue().cmdCon.getPendingMessages());
return pendingTasks;
}
public Map<String, Long> getCommandCompletedTasks()
{
Map<String, Long> completedTasks = new HashMap<String, Long>();
for (Map.Entry<InetAddress, OutboundTcpConnectionPool> entry : connectionManagers_.entrySet())
completedTasks.put(entry.getKey().getHostAddress(), entry.getValue().cmdCon.getCompletedMesssages());
return completedTasks;
}
public Map<String, Integer> getResponsePendingTasks()
{
Map<String, Integer> pendingTasks = new HashMap<String, Integer>();
for (Map.Entry<InetAddress, OutboundTcpConnectionPool> entry : connectionManagers_.entrySet())
pendingTasks.put(entry.getKey().getHostAddress(), entry.getValue().ackCon.getPendingMessages());
return pendingTasks;
}
public Map<String, Long> getResponseCompletedTasks()
{
Map<String, Long> completedTasks = new HashMap<String, Long>();
for (Map.Entry<InetAddress, OutboundTcpConnectionPool> entry : connectionManagers_.entrySet())
completedTasks.put(entry.getKey().getHostAddress(), entry.getValue().ackCon.getCompletedMesssages());
return completedTasks;
}
public static long getDefaultCallbackTimeout()
{
return DEFAULT_CALLBACK_TIMEOUT;
}
public Map<String, Integer> getDroppedMessages()
{
Map<String, Integer> map = new HashMap<String, Integer>();
for (Map.Entry<StorageService.Verb, AtomicInteger> entry : droppedMessages.entrySet())
map.put(entry.getKey().toString(), entry.getValue().get());
return map;
}
public Map<String, Integer> getRecentlyDroppedMessages()
{
Map<String, Integer> map = new HashMap<String, Integer>();
for (Map.Entry<StorageService.Verb, AtomicInteger> entry : droppedMessages.entrySet())
{
StorageService.Verb verb = entry.getKey();
Integer dropped = entry.getValue().get();
Integer recentlyDropped = dropped - lastDropped.get(verb);
map.put(verb.toString(), recentlyDropped);
lastDropped.put(verb, dropped);
}
return map;
}
public long getTotalTimeouts()
{
return totalTimeouts;
}
public long getRecentTotalTimouts()
{
long recent = totalTimeouts - recentTotalTimeouts;
recentTotalTimeouts = totalTimeouts;
return recent;
}
public Map<String, Long> getTimeoutsPerHost()
{
Map<String, Long> result = new HashMap<String, Long>();
for (Map.Entry<String, AtomicLong> entry: timeoutsPerHost.entrySet())
{
result.put(entry.getKey(), entry.getValue().get());
}
return result;
}
public Map<String, Long> getRecentTimeoutsPerHost()
{
Map<String, Long> result = new HashMap<String, Long>();
for (Map.Entry<String, AtomicLong> entry: recentTimeoutsPerHost.entrySet())
{
String ip = entry.getKey();
AtomicLong recent = entry.getValue();
Long timeout = timeoutsPerHost.get(ip).get();
result.put(ip, timeout - recent.getAndSet(timeout));
}
return result;
}
}
|
package demo_enjoy;
import demo_enjoy.dso.AuthProcessorImpl;
import org.noear.solon.annotation.Bean;
import org.noear.solon.annotation.Configuration;
import org.noear.solon.auth.AuthUtil;
/**
* @author noear 2021/6/12 created
*/
@Configuration
public class Config {
@Bean
public void authAdapter(){
AuthUtil.adapter()
.processor(new AuthProcessorImpl());
}
}
|
package com.jakewharton.rxbinding2.support.v17.leanback.widget;
import android.app.Activity;
import android.os.Bundle;
import android.support.v17.leanback.widget.SearchBar;
import android.support.v17.leanback.widget.SearchEditText;
import android.support.v17.leanback.widget.SearchOrbView;
import com.jakewharton.rxbinding2.support.v17.leanback.R;
public final class RxSearchBarTestActivity extends Activity {
SearchBar searchBar;
SearchEditText searchEditText;
@Override protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.lb_search_fragment);
searchBar = (SearchBar) findViewById(R.id.lb_search_bar);
searchEditText = (SearchEditText) searchBar.findViewById(R.id.lb_search_text_editor);
// reduce flakiness
SearchOrbView searchOrbView = (SearchOrbView) searchBar.findViewById(R.id.lb_search_bar_speech_orb);
searchOrbView.enableOrbColorAnimation(false);
searchOrbView.setSoundEffectsEnabled(false);
searchOrbView.clearAnimation();
}
}
|
import java.util.Stack;
public class leetcode_232 {
class MyQueue {
private Stack<Integer> one;
private Stack<Integer> two;
/** Initialize your data structure here. */
public MyQueue() {
one=new Stack<>();
two=new Stack<>();
}
/** Push element x to the back of queue. */
public void push(int x) {
one.push(x);
}
/** Removes the element from in front of queue and returns that element. */
public int pop() {
if (two.isEmpty()){
while (!one.isEmpty()){
two.push(one.pop());
}
}
return two.pop();
}
/** Get the front element. */
public int peek() {
if (two.isEmpty()){
while (!one.isEmpty()){
two.push(one.pop());
}
}
return two.peek();
}
/** Returns whether the queue is empty. */
public boolean empty() {
return one.isEmpty()&&two.isEmpty();
}
}
}
|
package guru.springframework.domain;
import lombok.Data;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
/**
* Created by jesussarco on 06/09/2020
*/
@Data
@Entity
public class Vendor {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;
private String name;
}
|
package com.zhcs.dao;
import java.util.List;
import java.util.Map;
import com.zhcs.entity.DriverEntity;
//*****************************************************************************
/**
* <p>Title:DriverDao</p>
* <p>Description: 司机管理</p>
* <p>Copyright: Copyright (c) 2017</p>
* <p>Company: 深圳市智慧城市管家信息科技有限公司 </p>
* @author 刘晓东 - Alter
* @version v1.0 2017年2月23日
*/
//*****************************************************************************
public interface DriverDao extends BaseDao<DriverEntity> {
List<Map<String, Object>> queryFullList(Map<String, Object> map);
List<Map<String, Object>> queryFullList1(Map<String, Object> map);
}
|
package com.alipay.api.domain;
import com.alipay.api.AlipayObject;
import com.alipay.api.internal.mapping.ApiField;
/**
* 行业信息
*
* @author auto create
* @since 1.0, 2020-07-06 10:08:06
*/
public class AmpeCategoryInfo extends AlipayObject {
private static final long serialVersionUID = 3815169998369861184L;
/**
* 行业描述
*/
@ApiField("category_desc")
private String categoryDesc;
/**
* 行业id
*/
@ApiField("category_id")
private String categoryId;
public String getCategoryDesc() {
return this.categoryDesc;
}
public void setCategoryDesc(String categoryDesc) {
this.categoryDesc = categoryDesc;
}
public String getCategoryId() {
return this.categoryId;
}
public void setCategoryId(String categoryId) {
this.categoryId = categoryId;
}
}
|
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.java.psi;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.ex.PathManagerEx;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.util.Ref;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.*;
import com.intellij.psi.impl.JavaPsiFacadeEx;
import com.intellij.psi.search.GlobalSearchScope;
import com.intellij.psi.util.PsiUtil;
import com.intellij.testFramework.PsiTestUtil;
import com.intellij.util.ref.GCWatcher;
import java.io.File;
/**
* @author dsl
*/
public class TypesTest extends GenericsTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
setupGenericSampleClasses();
final String testPath = PathManagerEx.getTestDataPath().replace(File.separatorChar, '/') + "/psi/types/" + getTestName(true);
final VirtualFile[] testRoot = { null };
ApplicationManager.getApplication().runWriteAction(() -> {
testRoot[0] = LocalFileSystem.getInstance().refreshAndFindFileByPath(testPath);
});
if (testRoot[0] != null) {
PsiTestUtil.addSourceRoot(myModule, testRoot[0]);
}
}
public void testSimpleStuff() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.getMethods()[0];
final PsiStatement[] methodStatements = method.getBody().getStatements();
final PsiDeclarationStatement declarationStatement = (PsiDeclarationStatement) methodStatements[0];
final PsiVariable varList = (PsiVariable) declarationStatement.getDeclaredElements()[0];
final PsiType typeListOfA = factory.createTypeFromText("test.List<java.lang.String>", null);
assertEquals(varList.getType(), typeListOfA);
final PsiType typeListOfObject = factory.createTypeFromText("test.List<java.lang.Object>", null);
assertFalse(varList.getType().equals(typeListOfObject));
final PsiReferenceExpression methodExpression
= ((PsiMethodCallExpression) ((PsiExpressionStatement) methodStatements[1]).getExpression()).getMethodExpression();
final JavaResolveResult resolveResult = methodExpression.advancedResolve(false);
assertTrue(resolveResult.getElement() instanceof PsiMethod);
final PsiMethod methodFromList = (PsiMethod) resolveResult.getElement();
final PsiType typeOfFirstParameterOfAdd = methodFromList.getParameterList().getParameters()[0].getType();
final PsiType substitutedType = resolveResult.getSubstitutor().substitute(typeOfFirstParameterOfAdd);
final PsiClassType typeA = factory.createTypeByFQClassName("java.lang.String");
assertEquals(typeA, substitutedType);
assertTrue(typeA.equalsToText("java.lang.String"));
final PsiType aListIteratorType = ((PsiExpressionStatement) methodStatements[2]).getExpression().getType();
final PsiType aIteratorType = factory.createTypeFromText("test.Iterator<java.lang.String>", null);
assertEquals(aIteratorType, aListIteratorType);
final PsiType objectIteratorType = factory.createTypeFromText("test.Iterator<java.lang.Object>", null);
assertFalse(objectIteratorType.equals(aListIteratorType));
}
public void testRawTypes() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.getMethods()[0];
final PsiStatement[] methodStatements = method.getBody().getStatements();
final PsiDeclarationStatement declarationStatement = (PsiDeclarationStatement) methodStatements[0];
final PsiVariable varList = (PsiVariable) declarationStatement.getDeclaredElements()[0];
final PsiType typeFromText = factory.createTypeFromText("test.List", null);
assertEquals(varList.getType(), typeFromText);
final PsiReferenceExpression methodExpression = ((PsiMethodCallExpression) ((PsiExpressionStatement) methodStatements[1]).getExpression()).getMethodExpression();
final JavaResolveResult resolveResult = methodExpression.advancedResolve(false);
assertTrue(resolveResult.getElement() instanceof PsiMethod);
final PsiMethod methodFromList = (PsiMethod) resolveResult.getElement();
final PsiType typeOfFirstParameterOfAdd = methodFromList.getParameterList().getParameters()[0].getType();
final PsiType substitutedType = resolveResult.getSubstitutor().substitute(typeOfFirstParameterOfAdd);
assertEquals(PsiType.getJavaLangObject(getPsiManager(), method.getResolveScope()), substitutedType);
final PsiType methodCallType = ((PsiExpressionStatement) methodStatements[2]).getExpression().getType();
final PsiType rawIteratorType = factory.createTypeFromText("test.Iterator", null);
assertEquals(rawIteratorType, methodCallType);
}
public void testSubstWithInheritor() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.getMethods()[0];
final PsiStatement[] methodStatements = method.getBody().getStatements();
final PsiDeclarationStatement declarationStatement = (PsiDeclarationStatement) methodStatements[0];
final PsiVariable varList = (PsiVariable) declarationStatement.getDeclaredElements()[0];
final PsiType typeFromText = factory.createTypeFromText("test.IntList", null);
assertEquals(varList.getType(), typeFromText);
final PsiReferenceExpression methodExpression
= ((PsiMethodCallExpression) ((PsiExpressionStatement) methodStatements[1]).getExpression()).getMethodExpression();
final JavaResolveResult resolveResult = methodExpression.advancedResolve(false);
assertTrue(resolveResult.getElement() instanceof PsiMethod);
final PsiMethod methodFromList = (PsiMethod) resolveResult.getElement();
final PsiType typeOfFirstParameterOfAdd = methodFromList.getParameterList().getParameters()[0].getType();
final PsiType substitutedType = resolveResult.getSubstitutor().substitute(typeOfFirstParameterOfAdd);
final PsiType javaLangInteger = factory.createTypeFromText("java.lang.Integer", null);
assertEquals(javaLangInteger, substitutedType);
final PsiType intListIteratorReturnType = ((PsiExpressionStatement) methodStatements[2]).getExpression().getType();
final PsiType integerIteratorType = factory.createTypeFromText("test.Iterator<java.lang.Integer>", null);
assertEquals(integerIteratorType, intListIteratorReturnType);
final PsiType objectIteratorType = factory.createTypeFromText("test.Iterator<java.lang.Object>", null);
assertFalse(objectIteratorType.equals(integerIteratorType));
}
public void testSimpleRawTypeInMethodArg() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.getMethods()[0];
final PsiStatement[] methodStatements = method.getBody().getStatements();
final PsiVariable variable = (PsiVariable) ((PsiDeclarationStatement) methodStatements[0]).getDeclaredElements()[0];
final PsiClassType type = (PsiClassType) variable.getType();
final PsiClassType.ClassResolveResult resolveClassTypeResult = type.resolveGenerics();
assertNotNull(resolveClassTypeResult.getElement());
final PsiReferenceExpression methodExpression
= ((PsiMethodCallExpression) ((PsiExpressionStatement) methodStatements[2]).getExpression()).getMethodExpression();
final PsiExpression qualifierExpression = methodExpression.getQualifierExpression();
final PsiClassType qualifierType = (PsiClassType) qualifierExpression.getType();
assertFalse(qualifierType.hasParameters());
final PsiType typeFromText = factory.createTypeFromText("test.List", null);
assertEquals(qualifierType, typeFromText);
final PsiElement psiElement = ((PsiReferenceExpression) qualifierExpression).resolve();
assertTrue(psiElement instanceof PsiVariable);
final JavaResolveResult resolveResult = methodExpression.advancedResolve(false);
assertTrue(resolveResult.getElement() instanceof PsiMethod);
final PsiMethod methodFromList = (PsiMethod) resolveResult.getElement();
assertEquals("add", methodFromList.getName());
assertEquals("test.List", methodFromList.getContainingClass().getQualifiedName());
}
public void testRawTypeInMethodArg() {
final PsiClass classA = getJavaFacade().findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.getMethods()[0];
final PsiStatement[] methodStatements = method.getBody().getStatements();
final PsiReferenceExpression methodExpression
= ((PsiMethodCallExpression) ((PsiExpressionStatement) methodStatements[2]).getExpression()).getMethodExpression();
final JavaResolveResult resolveResult = methodExpression.advancedResolve(false);
assertTrue(resolveResult.getElement() instanceof PsiMethod);
final PsiMethod methodFromList = (PsiMethod) resolveResult.getElement();
assertEquals("putAll", methodFromList.getName());
assertEquals("test.List", methodFromList.getContainingClass().getQualifiedName());
}
public void testBoundedParams() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.getMethods()[0];
final PsiStatement[] statements = method.getBody().getStatements();
final PsiVariable var = (PsiVariable) ((PsiDeclarationStatement) statements[0]).getDeclaredElements()[0];
final PsiType varType = var.getType();
final PsiType typeRawIterator = factory.createTypeFromText("test.Iterator", null);
assertEquals(varType, typeRawIterator);
final PsiType initializerType = var.getInitializer().getType();
assertEquals(initializerType, typeRawIterator);
assertTrue(varType.isAssignableFrom(initializerType));
}
public void testRawTypeExtension() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.getMethods()[0];
final PsiStatement[] statements = method.getBody().getStatements();
final PsiVariable var = (PsiVariable) ((PsiDeclarationStatement) statements[0]).getDeclaredElements()[0];
final PsiType varType = var.getType();
final PsiType typeRawIterator = factory.createTypeFromText("test.Iterator", null);
assertEquals(varType, typeRawIterator);
final PsiType initializerType = var.getInitializer().getType();
assertEquals(initializerType, typeRawIterator);
assertTrue(varType.isAssignableFrom(initializerType));
}
public void testTypesInGenericClass() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiTypeParameter parameterT = classA.getTypeParameters()[0];
assertEquals("T", parameterT.getName());
final PsiMethod method = classA.findMethodsByName("method", false)[0];
final PsiType type = ((PsiExpressionStatement) method.getBody().getStatements()[0]).getExpression().getType();
final PsiClassType typeT = factory.createType(parameterT);
assertEquals("T", typeT.getPresentableText());
assertEquals(typeT, type);
}
public void testAssignableSubInheritor() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classCollection = psiManager.findClass("test.Collection");
final PsiClass classList = psiManager.findClass("test.List");
final PsiType collectionType = factory.createType(classCollection, PsiSubstitutor.EMPTY);
final PsiType listType = factory.createType(classList, PsiSubstitutor.EMPTY);
assertEquals(collectionType.getCanonicalText(), "test.Collection<E>");
assertEquals(listType.getCanonicalText(), "test.List<T>");
final PsiType typeListOfString = factory.createTypeFromText("test.List<java.lang.String>", null);
final PsiType typeCollectionOfString = factory.createTypeFromText("test.Collection<java.lang.String>", null);
assertTrue(typeCollectionOfString.isAssignableFrom(typeListOfString));
}
public void testComplexInheritance() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.findMethodsByName("method", false)[0];
final PsiExpression expression = ((PsiExpressionStatement) method.getBody().getStatements()[1]).getExpression();
assertEquals("l.get(0)", expression.getText());
final PsiType type = expression.getType();
final PsiType listOfInteger = factory.createTypeFromText("test.List<java.lang.Integer>", null);
assertEquals(listOfInteger, type);
final PsiType collectionOfInteger = factory.createTypeFromText("test.Collection<java.lang.Integer>", null);
assertTrue(collectionOfInteger.isAssignableFrom(type));
}
public void testListListInheritance() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.findMethodsByName("method", false)[0];
final PsiExpression expression1 = ((PsiExpressionStatement) method.getBody().getStatements()[1]).getExpression();
assertEquals("l.get(0)", expression1.getText());
final PsiType type1 = expression1.getType();
final PsiType typeListOfInteger = factory.createTypeFromText("test.List<java.lang.Integer>", null);
assertEquals(typeListOfInteger, type1);
assertTrue(typeListOfInteger.isAssignableFrom(type1));
final PsiExpression expression2 = ((PsiExpressionStatement) method.getBody().getStatements()[3]).getExpression();
assertEquals("b.get(0)", expression2.getText());
final PsiType type2 = expression2.getType();
assertEquals(typeListOfInteger, type2);
}
public void testSpaceInTypeParameterList() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.findMethodsByName("method", false)[0];
final PsiVariable variable = (PsiVariable) ((PsiDeclarationStatement) method.getBody().getStatements()[0]).getDeclaredElements()[0];
final PsiType type = variable.getType();
final PsiType typeListOfListOfInteger = factory.createTypeFromText("test.List<test.List<java.lang.Integer>>", null);
assertEquals(typeListOfListOfInteger, type);
}
public void testMethodTypeParameter() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.findMethodsByName("method", false)[0];
final PsiStatement[] statements = method.getBody().getStatements();
final PsiMethodCallExpression methodCallExpression = (PsiMethodCallExpression) ((PsiExpressionStatement) statements[1]).getExpression();
isCollectionUtilSort(methodCallExpression, factory.createTypeFromText("java.lang.Integer", null));
final PsiMethodCallExpression methodCallExpression1 = (PsiMethodCallExpression) ((PsiExpressionStatement) statements[3]).getExpression();
isCollectionUtilSort(methodCallExpression1, null);
}
private static void isCollectionUtilSort(final PsiMethodCallExpression methodCallExpression,
final PsiType typeParameterValue) {
final PsiReferenceExpression methodExpression = methodCallExpression.getMethodExpression();
final JavaResolveResult methodResolve = methodExpression.advancedResolve(false);
assertTrue(methodResolve.getElement() instanceof PsiMethod);
final PsiMethod methodSort = (PsiMethod) methodResolve.getElement();
assertEquals("sort", methodSort.getName());
assertEquals("test.CollectionUtil", methodSort.getContainingClass().getQualifiedName());
final PsiTypeParameter methodSortTypeParameter = methodSort.getTypeParameters()[0];
final PsiType sortParameterActualType = methodResolve.getSubstitutor().substitute(methodSortTypeParameter);
assertTrue(Comparing.equal(sortParameterActualType, typeParameterValue));
assertTrue(
PsiUtil.isApplicable(methodSort, methodResolve.getSubstitutor(), methodCallExpression.getArgumentList()));
}
public void testRawArrayTypes() {
final JavaPsiFacadeEx psiManager = getJavaFacade();
final PsiElementFactory factory = psiManager.getElementFactory();
final PsiClass classA = psiManager.findClass("A");
assertNotNull(classA);
final PsiMethod method = classA.findMethodsByName("method", false)[0];
final PsiStatement[] statements = method.getBody().getStatements();
final PsiDeclarationStatement declarationStatement = (PsiDeclarationStatement) statements[0];
final PsiClassType typeOfL = (PsiClassType) ((PsiVariable) declarationStatement.getDeclaredElements()[0]).getType();
final PsiType typeRawList = factory.createTypeFromText("test.List", null);
assertTrue(Comparing.equal(typeOfL, typeRawList));
final PsiSubstitutor typeOfLSubstitutor = typeOfL.resolveGenerics().getSubstitutor();
final PsiMethodCallExpression exprGetArray = (PsiMethodCallExpression) ((PsiExpressionStatement) statements[1]).getExpression();
final PsiType typeOfGetArrayCall = exprGetArray.getType();
final PsiType objectArrayType = factory.createTypeFromText("java.lang.Object[]", null);
assertTrue(Comparing.equal(typeOfGetArrayCall, objectArrayType));
final PsiMethod methodGetArray = (PsiMethod) exprGetArray.getMethodExpression().resolve();
final PsiType subtitutedGetArrayReturnType = typeOfLSubstitutor.substitute(methodGetArray.getReturnType());
assertTrue(Comparing.equal(subtitutedGetArrayReturnType, objectArrayType));
final PsiMethodCallExpression exprGetListOfArray = (PsiMethodCallExpression) ((PsiExpressionStatement) statements[2]).getExpression();
final PsiMethod methodGetListOfArray = (PsiMethod) exprGetListOfArray.getMethodExpression().resolve();
final PsiType returnType = methodGetListOfArray.getReturnType();
final PsiType substitutedReturnType = typeOfLSubstitutor.substitute(returnType);
assertTrue(Comparing.equal(substitutedReturnType, typeRawList));
final PsiType typeOfGetListOfArrayCall = exprGetListOfArray.getType();
assertTrue(Comparing.equal(typeOfGetListOfArrayCall, typeRawList));
}
public void testWildcardTypeParsing() {
final GlobalSearchScope scope = GlobalSearchScope.moduleWithDependenciesAndLibrariesScope(myModule);
final PsiClassType javaLangObject = PsiType.getJavaLangObject(myPsiManager, scope);
PsiElement element = ((PsiDeclarationStatement)myJavaFacade.getElementFactory().createStatementFromText("X<? extends Y, ? super Z<A,B>, ?> x;", null)).getDeclaredElements()[0];
PsiJavaCodeReferenceElement referenceElement = ((PsiVariable) element).getTypeElement().getInnermostComponentReferenceElement();
PsiType[] typeArguments = referenceElement.getTypeParameters();
assertEquals(3, typeArguments.length);
assertTrue(typeArguments[0] instanceof PsiWildcardType);
assertTrue(typeArguments[1] instanceof PsiWildcardType);
assertTrue(typeArguments[2] instanceof PsiWildcardType);
PsiWildcardType extendsWildcard = (PsiWildcardType)typeArguments[0];
PsiWildcardType superWildcard = (PsiWildcardType)typeArguments[1];
PsiWildcardType unboundedWildcard = (PsiWildcardType)typeArguments[2];
// extends wildcard test
assertTrue(extendsWildcard.isExtends());
assertFalse(extendsWildcard.isSuper());
assertEquals("Y", extendsWildcard.getBound().getCanonicalText());
assertEquals("Y", extendsWildcard.getExtendsBound().getCanonicalText());
assertEquals(extendsWildcard.getSuperBound(), PsiType.NULL);
// super wildcard test
assertFalse(superWildcard.isExtends());
assertTrue(superWildcard.isSuper());
assertEquals("Z<A,B>", superWildcard.getBound().getCanonicalText());
assertEquals(superWildcard.getExtendsBound(), javaLangObject);
assertEquals("Z<A,B>", superWildcard.getSuperBound().getCanonicalText());
// unbounded wildcard test
assertFalse(unboundedWildcard.isExtends());
assertFalse(unboundedWildcard.isSuper());
assertNull(unboundedWildcard.getBound());
assertEquals(unboundedWildcard.getExtendsBound(), javaLangObject);
assertEquals(unboundedWildcard.getSuperBound(), PsiType.NULL);
}
public void testWildcardTypesAssignable() {
PsiClassType listOfExtendsBase = (PsiClassType)myJavaFacade.getElementFactory().createTypeFromText("test.List<? extends usages.Base>", null);
PsiClassType.ClassResolveResult classResolveResult = listOfExtendsBase.resolveGenerics();
PsiClass listClass = classResolveResult.getElement();
assertNotNull(listClass);
PsiTypeParameter listTypeParameter = PsiUtil.typeParametersIterator(listClass).next();
PsiType listParameterTypeValue = classResolveResult.getSubstitutor().substitute(listTypeParameter);
assertTrue(listParameterTypeValue instanceof PsiWildcardType);
assertTrue(((PsiWildcardType)listParameterTypeValue).isExtends());
assertEquals("usages.Base", ((PsiWildcardType)listParameterTypeValue).getBound().getCanonicalText());
PsiClassType listOfIntermediate = (PsiClassType)myJavaFacade.getElementFactory().createTypeFromText("test.List<usages.Intermediate>", null);
assertNotNull(listOfIntermediate.resolve());
assertTrue(listOfExtendsBase.isAssignableFrom(listOfIntermediate));
}
public void testEllipsisType() {
PsiElementFactory factory = myJavaFacade.getElementFactory();
PsiMethod method = factory.createMethodFromText("void foo (int ... args) {}", null);
PsiType paramType = method.getParameterList().getParameters()[0].getType();
assertTrue(paramType instanceof PsiEllipsisType);
PsiType arrayType = ((PsiEllipsisType)paramType).getComponentType().createArrayType();
assertTrue(paramType.isAssignableFrom(arrayType));
assertTrue(arrayType.isAssignableFrom(paramType));
PsiType typeFromText = factory.createTypeFromText("int ...", null);
assertTrue(typeFromText instanceof PsiEllipsisType);
}
public void testBinaryNumericPromotion() {
PsiElementFactory factory = myJavaFacade.getElementFactory();
final PsiExpression conditional = factory.createExpressionFromText("b ? new Integer (0) : new Double(0.0)", null);
assertEquals(PsiType.DOUBLE, conditional.getType());
final PsiExpression shift = factory.createExpressionFromText("Integer.valueOf(0) << 2", null);
assertEquals(PsiType.INT, shift.getType());
}
public void testUnaryExpressionType() {
final PsiElementFactory factory = myJavaFacade.getElementFactory();
final PsiExpression plusPrefix = factory.createExpressionFromText("+Integer.valueOf(1)", null);
assertEquals(PsiType.INT, plusPrefix.getType());
final PsiExpression plusBytePrefix = factory.createExpressionFromText("+Byte.valueOf(1)", null);
assertEquals(PsiType.INT, plusBytePrefix.getType());
final PsiStatement declaration = factory.createStatementFromText("Byte b = 1;", null);
final PsiExpression plusPlusPostfix = factory.createExpressionFromText("b++", declaration);
assertEquals(PsiType.BYTE.getBoxedType(declaration), plusPlusPostfix.getType());
}
public void testVariableTypeInvalidation() {
PsiElementFactory factory = myJavaFacade.getElementFactory();
PsiStatement statement = factory.createStatementFromText("String s;", null);
PsiLocalVariable var = (PsiLocalVariable)((PsiDeclarationStatement)statement).getDeclaredElements()[0];
PsiType type = var.getType();
assertTrue(type.isValid());
Ref<PsiTypeElement> ref = Ref.create(var.getTypeElement());
ref.get().replace(factory.createTypeElement(PsiType.INT));
assertFalse(type.isValid());
GCWatcher.fromClearedRef(ref).ensureCollected();
assertFalse(type.isValid()); // shouldn't throw
}
}
|
package is2011.reproductor.vista;
import java.awt.*;
import java.awt.event.*;
import java.util.*;
import is2011.app.controlador.IAppController;
import is2011.biblioteca.contenedores.CancionContainer;
import is2011.biblioteca.search.*;
import is2011.reproductor.modelo.ListaReproduccion.ModoReproduccionEnum;
import is2011.reproductor.modelo.listeners.*;
import javax.swing.*;
import javax.swing.border.TitledBorder;
import javax.swing.table.DefaultTableModel;
import javax.swing.table.JTableHeader;
import javax.swing.table.TableColumnModel;
/**
* Vista que implementa al listener de la lista de reproduccion y que se
* encargara de mostrarla la misma por pantalla. Esto es, la cancion actual,
* y las canciones con sus campos.
*
*/
@SuppressWarnings("serial")
public class VistaListaReproduccion extends JPanel implements
ListaReproduccionListener {
// ********************************************************************** //
// ************* ATRIBUTOS Y CONSTANTES ************* //
// ********************************************************************** //
/** Rutas de las imagene*/
private static final String SEARCH_PNG = "/Recursos/search.png";
/** Referencia al controlador de la aplicacion*/
private IAppController controlador;
/** Atributo que contendra las canciones*/
private JTable tabla;
/** Panel con scroll que contiene a la tabla */
private JScrollPane panelScroll;
/** Layout de JPanel principal de la vista */
private BorderLayout border;
/** Panel que contiene los elementos necesarios para realizar la busqueda */
private JPanel panelBusqueda;
/** Boton que genera la accion de buscar */
private JButton buscar;
/** Area de texto donde insertar los valores a buscar */
private JRoundTextField textoBusqueda;
/** Campo sobre el que quieres realizar la busqueda */
private Choice tipoBusqueda;
/** Modelo de la tabla*/
private DefaultTableModel modelo;
/**Label que contiene el valor de aleatorio*/
private JLabel modoReproduccion;
/** El menu popUp*/
private JPopupMenu popup;
/** Columna en la que muestra si se esta reproduciendo la cancion*/
private static final int NUM_COLUMNA_REPRODUCIENDO = 0;
/** Columna en la que se almacena titulo*/
private static final int NUM_COLUMNA_TITULO =1;
/** Columna en la que se almacena genero*/
private static final int NUM_COLUMNA_GENERO = 2;
/** Columna en la que se almacena artista*/
private static final int NUM_COLUMNA_ARTISTA = 3;
/** Columna en la que se almacena album*/
private static final int NUM_COLUMNA_ALBUM = 4;
/** Columna en la que se almacena Trak Nº*/
private static final int NUM_COLUMNA_TRACKNO = 5;
/** Columna en la que se almacena duracion*/
private static final int NUM_COLUMNA_DURACION = 6;
/** Numero de campos*/
private static final int NUM_CAMPOS = 7;
/** Posición del choice para el titulo */
private final int titulo = 0;
/** Posición del choice para el genero */
private final int genero = 1;
/** Posición del choice para el artista */
private final int artista = 2;
/** Posición del choice para el album */
private final int album = 3;
// ********************************************************************** //
// ************* CONSTRUCTORES ************* //
// ********************************************************************** //
/**
* Prepara a la vista de reproduccion para mostrar las canciones.
*/
public VistaListaReproduccion() {
panelScroll = new JScrollPane();
border = new BorderLayout();
this.setLayout(border);
panelBusqueda = new JPanel();
buscar = new JButton();
buscar.setBorder(BorderFactory.createEmptyBorder());
buscar.setIcon(new ImageIcon(getClass().getResource(SEARCH_PNG)));
// Creamos el campo del texto de búsqueda
textoBusqueda = new JRoundTextField("Buscar...",0);
// añadimos el listener para el evento de pulsar teclas
textoBusqueda.addKeyListener(new KeyListener(){
/**
* No es necesario implementar este método
*/
@Override
public void keyPressed(KeyEvent arg0) {
//No es necesario implementarlo
}
/**
* Cada vez que se libera una tecla se realiza la búsqueda
*/
@Override
public void keyReleased(KeyEvent arg0) {
switch (tipoBusqueda.getSelectedIndex()) {
case titulo : controlador.buscaListaReproduccionAvanzada(
new BuscarTitulo(textoBusqueda.getText()));
break;
case genero : controlador.buscaListaReproduccionAvanzada(
new BuscarGenero(textoBusqueda.getText()));
break;
case artista : controlador.buscaListaReproduccionAvanzada(
new BuscarArtista(textoBusqueda.getText()));
break;
case album : controlador.buscaListaReproduccionAvanzada(
new BuscarAlbum(textoBusqueda.getText()));
break;
}
}
/**
* No es necesario implementar este método
*/
@Override
public void keyTyped(KeyEvent arg0) {
//No es necesario implementarlo
}
});
tipoBusqueda = new Choice();
tipoBusqueda.add("Título");
tipoBusqueda.add("Género");
tipoBusqueda.add("Artista");
tipoBusqueda.add("Álbum");
panelBusqueda.add(textoBusqueda);
panelBusqueda.add(tipoBusqueda);
//panelBusqueda.add(buscar);
//panelBusqueda.add(buscarAvanzada);
modelo = new DefaultTableModel()
{@Override
public boolean isCellEditable (int fila, int columna) {
return false;
}
};
//Añadimos las columnas del modelo
modelo.addColumn("Actual");
modelo.addColumn("Título");
modelo.addColumn("Género");
modelo.addColumn("Artista");
modelo.addColumn("Álbum");
modelo.addColumn("Pista");
modelo.addColumn("Duración");
//Creamos la tabla
tabla = new JTable(modelo);
tabla.setShowHorizontalLines(true);
//Configuramos el tamaño
TableColumnModel cm = tabla.getColumnModel();
cm.getColumn(NUM_COLUMNA_REPRODUCIENDO).setPreferredWidth(75);
cm.getColumn(NUM_COLUMNA_TITULO).setPreferredWidth(170);
cm.getColumn(NUM_COLUMNA_GENERO).setPreferredWidth(81);
cm.getColumn(NUM_COLUMNA_ARTISTA).setPreferredWidth(150);
cm.getColumn(NUM_COLUMNA_ALBUM).setPreferredWidth(150);
cm.getColumn(NUM_COLUMNA_TRACKNO).setPreferredWidth(75);
cm.getColumn(NUM_COLUMNA_DURACION).setPreferredWidth(75);
//Le añadimos el scroll
panelScroll.setViewportView(tabla);
JTableHeader header = tabla.getTableHeader() ;
header.addMouseListener(
new MouseAdapter()
{
public void mouseClicked(MouseEvent e)
{
JTableHeader h = (JTableHeader)e.getSource() ;
int nColumn = h.columnAtPoint(e.getPoint());
switch (nColumn) {
case NUM_COLUMNA_ALBUM:
controlador.ordenarPorAlbum();
break;
case NUM_COLUMNA_ARTISTA:
controlador.ordenarPorArtista();
break;
case NUM_COLUMNA_DURACION:
controlador.ordenarPorDuracion();
break;
case NUM_COLUMNA_GENERO:
controlador.ordenarPorGenero();
break;
case NUM_COLUMNA_TITULO:
controlador.ordenarPorTitulo();
break;
}
}
}
) ;
//La cabecera de la tabla.
this.modoReproduccion = new JLabel("Modo de reproducción NORMAL");
setVisible(true);
tabla.setVisible(true);
this.setBorder(new TitledBorder("Está sonando"));
//Menu desplegable
popup = new JPopupMenu();
JMenuItem guardar = new JMenuItem("Guardar");
popup.add(guardar);
JMenuItem borrar = new JMenuItem("Borrar");
popup.add(borrar);
JMenu ordenar = new JMenu("Ordenar");
JMenuItem ordenarAlbum = new JMenuItem("Álbum");
JMenuItem ordenarArtista = new JMenuItem("Artista");
JMenuItem ordenarGenero = new JMenuItem("Género");
JMenuItem ordenarDuracion = new JMenuItem("Duración");
JMenuItem ordenarTitulo = new JMenuItem("Título");
ordenar.add(ordenarAlbum);
ordenar.add(ordenarArtista);
ordenar.add(ordenarGenero);
ordenar.add(ordenarDuracion);
ordenar.add(ordenarTitulo);
popup.add(ordenar);
guardar.addActionListener(new ActionListener(){
@Override
public void actionPerformed(ActionEvent e) {
controlador.guardarListaReproduccion();
}
});
borrar.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent event) {
int[] rows = tabla.getSelectedRows();
//Hay que notificar de la cancion mayor a la menor para
//poder borrar en bloques.
ArrayList<Integer> rowsOrdenadas = new ArrayList<Integer>();
for (int row: rows){
rowsOrdenadas.add(row);
}
Collections.sort(rowsOrdenadas);
for (int i = rowsOrdenadas.size()-1 ; i >= 0; i--) {
controlador.borrarCancion(rowsOrdenadas.get(i));
}
}
});
ordenarAlbum.addActionListener(new ActionListener(){
@Override
public void actionPerformed(ActionEvent e) {
controlador.ordenarPorAlbum();
}
});
ordenarArtista.addActionListener(new ActionListener(){
@Override
public void actionPerformed(ActionEvent e) {
controlador.ordenarPorArtista();
}
});
ordenarGenero.addActionListener(new ActionListener(){
@Override
public void actionPerformed(ActionEvent e) {
controlador.ordenarPorGenero();
}
});
ordenarDuracion.addActionListener(new ActionListener(){
@Override
public void actionPerformed(ActionEvent e) {
controlador.ordenarPorDuracion();
}
});
ordenarTitulo.addActionListener(new ActionListener(){
@Override
public void actionPerformed(ActionEvent e) {
controlador.ordenarPorTitulo();
}
});
//Añadimos el oyente del raton
tabla.addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(MouseEvent e) {
//Si es boton derecho
if ( SwingUtilities.isRightMouseButton(e)){
//int row = Math.round(e.getY() / tabla.getRowHeight());
//tabla.setRowSelectionInterval(row, row);
popup.show(e.getComponent(), e.getX(), e.getY());
}
else if(e.getClickCount() == 2) {
int cancionDeseada = e.getY()/tabla.getRowHeight();
controlador.play(cancionDeseada);
}
}
});
this.add(panelBusqueda, BorderLayout.NORTH);
this.add(panelScroll, BorderLayout.CENTER);
}
// ********************************************************************** //
// ************* METODOS PRIVADOS ************* //
// ********************************************************************** //
/**
* Recibe un numero de segundos y lo transforma a un string de HH:MM:SS
* @param segundos el numero de segundos.
* @return El estring con formato HH:MM:SS
*/
private String toHora(int segundos) {
int horas;
int minutos;
horas = segundos/ 3600;
segundos -= horas*3600;
minutos = segundos / 60;
segundos -= minutos*60;
return "" + ((horas > 0)? horas+":" : "") + ((minutos>9)? minutos : "0"
+minutos)+ ":" + ((segundos > 9)? segundos : "0"+segundos);
}
// ********************************************************************** //
// ************* METODOS PUBLICOS ************* //
// ********************************************************************** //
@Override
/**
* quita la cancion e de la tabla
*/
public void borrarCancion(BorrarCancionEvent e) {
modelo.removeRow(e.getPosicion());
}
@Override
/**
* añade una cancion a la tabla
*/
public void nuevaCancion(NuevaCancionEvent e) {
int pos = e.getPosicion();
Object [] rowData = new Object[NUM_CAMPOS];
rowData[NUM_COLUMNA_REPRODUCIENDO] = "";
rowData[NUM_COLUMNA_TITULO] = e.getTitulo();
rowData[NUM_COLUMNA_GENERO] = e.getGenero();
rowData[NUM_COLUMNA_ARTISTA] = e.getArtista();
rowData[NUM_COLUMNA_ALBUM] = e.getAlbum();
rowData[NUM_COLUMNA_TRACKNO] = e.getPista();
rowData[NUM_COLUMNA_DURACION] = toHora(e.getDuracion());
modelo.insertRow(pos, rowData);
}
/**
* Reinicia todos los datos del modelo.
*/
@Override
public void reinicia() {
modelo.setNumRows(0);
}
@Override
/**
* informa en la tabla de que cancion está reproduciendose
*/
public void setActual(int actualNuevo, int actualViejo) {
if(modelo.getRowCount() >= (actualViejo) && actualViejo > 0 ) {
modelo.setValueAt("", actualViejo-1, NUM_COLUMNA_REPRODUCIENDO);
}
if(modelo.getRowCount() >= (actualNuevo) && actualNuevo >0) {
modelo.setValueAt(" -> ", actualNuevo-1,
NUM_COLUMNA_REPRODUCIENDO);
}
}
@Override
/**
* informa del tipo de reproduccion
*/
public void cambioTipoReproduccion(ModoReproduccionEnum modo) {
this.modoReproduccion.setText("Modo de reproducción " + modo);
}
public JLabel getInfoReproduccion() {
return this.modoReproduccion;
}
// ********************************************************************** //
// ************* GETTERS / SETTERS ************* //
// ********************************************************************** //
/**
* Establece el controlador.
* @param contorlador El controlador
*/
public void setControlador(IAppController controlador) {
this.controlador = controlador;
}
/**
* Devuelve la cancion seleccionada, empezando desde el 0.
* -1 si no hay nada seleccionado.
* @return
*/
public int getCancionSeleccionada() {
return tabla.getSelectedRow();
}
@Override
/**
* añade una lista de reproduccion a la tabla
*/
public void nuevaListaReproduccion
(ArrayList<CancionContainer> listaCanciones) {
int pos = 0;
for(CancionContainer cancion : listaCanciones) {
Object [] rowData = new Object[NUM_CAMPOS];
rowData[NUM_COLUMNA_REPRODUCIENDO] = "";
rowData[NUM_COLUMNA_TITULO] = cancion.getTitulo();
rowData[NUM_COLUMNA_GENERO] = cancion.getGenero();
rowData[NUM_COLUMNA_ARTISTA] = cancion.getArtista();
rowData[NUM_COLUMNA_ALBUM] = cancion.getAlbum();
rowData[NUM_COLUMNA_TRACKNO] = cancion.getPista();
rowData[NUM_COLUMNA_DURACION] = toHora(cancion.getDuracion());
modelo.insertRow(pos, rowData);
pos++;
}
}
/**
* muestra todas las canciones de la lista de reproduccion en la tabla,
* la limpiamos antes para no mostrar las anteriores
*/
public void mostrarTodas() {
ArrayList<CancionContainer> cancionesLr = controlador.
getCancionesListaReproduccion();
Iterator<CancionContainer> itr = cancionesLr.iterator();
CancionContainer aux=null;
// Eliminamos lo que contiene la tabla para no mostrar lo anterior
// y lo nuevo
for (int i = tabla.getRowCount()-1 ; i>=0 ; i--) {
modelo.removeRow(i);
}
int pos = 0;
while (itr.hasNext()){
aux = itr.next();
nuevaCancion(new NuevaCancionEvent(aux.getTitulo(), aux.getAlbum(),
aux.getPista(), aux.getArtista(), aux.getGenero(),
aux.getDuracion(), pos++));
}
}
@Override
public void desactivaCancion(int indice) {
if(indice < tabla.getRowCount() && indice >= 0){
modelo.setValueAt("00:00", indice,NUM_COLUMNA_DURACION);
}
}
}
|
// This is a generated file. Not intended for manual editing.
package com.vyperplugin.psi.impl;
import com.intellij.extapi.psi.ASTWrapperPsiElement;
import com.intellij.lang.ASTNode;
import com.intellij.psi.PsiElementVisitor;
import com.vyperplugin.psi.VyperExpression;
import com.vyperplugin.psi.VyperVisitor;
import org.jetbrains.annotations.NotNull;
public abstract class VyperExpressionImpl extends ASTWrapperPsiElement implements VyperExpression {
public VyperExpressionImpl(@NotNull ASTNode node) {
super(node);
}
public void accept(@NotNull VyperVisitor visitor) {
visitor.visitExpression(this);
}
public void accept(@NotNull PsiElementVisitor visitor) {
if (visitor instanceof VyperVisitor) accept((VyperVisitor)visitor);
else super.accept(visitor);
}
}
|
package com.study.utils.bus;
import org.apache.commons.io.FileUtils;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.net.URLEncoder;
import java.util.Properties;
/**
* 文件上传/下载工具类
*
* @author Déjà vu
*/
public class AppFileUtils {
/**
* 得到配置文件中,配置好的文件上传的文件夹路径
*/
public static String PATH;
static {
InputStream stream = AppFileUtils.class.getClassLoader().getResourceAsStream("properties/file.properties");
Properties properties = new Properties();
try {
properties.load(stream);
PATH = properties.getProperty("path");
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 文件下载
*
* @param resp
* @param path
* @param oldName
* @return
*/
public static ResponseEntity<Object> downloadFile(HttpServletResponse resp, String path, String oldName) {
//通过文件夹路径+文件全称=组合成绝对路径,去寻找文件对象
File file = new File(AppFileUtils.PATH, path);
//判断文件是否存在
if (file.exists()) {
try {
//首先处理文件名乱码问题
oldName = URLEncoder.encode(oldName, "UTF-8");
//把文件转成一个byte数组
byte[] bytes = FileUtils.readFileToByteArray(file);
//封装响应的内容类型(APPLICATION_OCTET_STREAM 响应的内容类型不限定)
HttpHeaders header = new HttpHeaders();
header.setContentType(MediaType.APPLICATION_OCTET_STREAM);
//设置下载的文件的名称
header.setContentDispositionFormData("attachment",oldName);
//创建ResponseEntity对象
ResponseEntity<Object> entity = new ResponseEntity<Object>(bytes, header, HttpStatus.CREATED);
return entity;
} catch (IOException e) {
e.printStackTrace();
}
}else{
PrintWriter out;
try {
out = resp.getWriter();
out.write("文件不存在");
out.flush();
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return null;
}
/**
* 图片只显示不下载
*
* @param resp
* @param path
* @return
*/
public static ResponseEntity<Object> downloadFile(HttpServletResponse resp, String path) {
//通过文件夹路径+文件全称=组合成绝对路径,去寻找文件对象
File file = new File(AppFileUtils.PATH, path);
//判断文件是否存在
if (file.exists()) {
try {
//把文件转成一个byte数组
byte[] bytes = FileUtils.readFileToByteArray(file);
//封装响应的内容类型(APPLICATION_OCTET_STREAM 响应的内容类型不限定)
HttpHeaders header = new HttpHeaders();
header.setContentType(MediaType.APPLICATION_OCTET_STREAM);
//创建ResponseEntity对象
ResponseEntity<Object> entity = new ResponseEntity<Object>(bytes, header, HttpStatus.CREATED);
return entity;
} catch (IOException e) {
e.printStackTrace();
}
}else{
PrintWriter out;
try {
out = resp.getWriter();
out.write("文件不存在");
out.flush();
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return null;
}
/**
* 更改文件名
* @param carimg
* @param suffix
*/
public static String updateFileName(String carimg,String suffix) {
//找到文件
try{
File file = new File(PATH,carimg);
if (file.exists()){
file.renameTo(new File(PATH,carimg.replace(suffix,"")));
return carimg.replace(suffix,"");
}
}catch (Exception e){
e.printStackTrace();
}
return null;
}
/**
* 根据路径删除文件
* @param path
*/
public static void deleteFileUsePath( String path){
String realPath = PATH+path;
//通过绝对路径获取文件
File file = new File(realPath);
if (file.exists()){
file.delete();
}
}
/**
* 根据文件全程删除硬盘上的文件
* @param path
*/
public static void deleteFileUsePath(HttpServletResponse resp, String path){
String realPath = PATH+path;
//通过绝对路径获取文件
File file = new File(realPath);
if (file.exists()){
file.delete();
}else{
PrintWriter out;
try {
out = resp.getWriter();
out.write("文件不存在");
out.flush();
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
|
/*
* Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.client.impl.protocol.codec;
import com.hazelcast.client.impl.protocol.ClientMessage;
import com.hazelcast.client.impl.protocol.Generated;
import com.hazelcast.client.impl.protocol.codec.builtin.*;
import com.hazelcast.client.impl.protocol.codec.custom.*;
import javax.annotation.Nullable;
import static com.hazelcast.client.impl.protocol.ClientMessage.*;
import static com.hazelcast.client.impl.protocol.codec.builtin.FixedSizeTypesCodec.*;
/*
* This file is auto-generated by the Hazelcast Client Protocol Code Generator.
* To change this file, edit the templates or the protocol
* definitions on the https://github.com/hazelcast/hazelcast-client-protocol
* and regenerate it.
*/
/**
* Takes a thread dump of the member it's called on.
*/
@Generated("ca7ae20159260673f9df18e0d3a10823")
public final class MCGetThreadDumpCodec {
//hex: 0x200700
public static final int REQUEST_MESSAGE_TYPE = 2098944;
//hex: 0x200701
public static final int RESPONSE_MESSAGE_TYPE = 2098945;
private static final int REQUEST_DUMP_DEAD_LOCKS_FIELD_OFFSET = PARTITION_ID_FIELD_OFFSET + INT_SIZE_IN_BYTES;
private static final int REQUEST_INITIAL_FRAME_SIZE = REQUEST_DUMP_DEAD_LOCKS_FIELD_OFFSET + BOOLEAN_SIZE_IN_BYTES;
private static final int RESPONSE_INITIAL_FRAME_SIZE = RESPONSE_BACKUP_ACKS_FIELD_OFFSET + BYTE_SIZE_IN_BYTES;
private MCGetThreadDumpCodec() {
}
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({"URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD"})
public static class RequestParameters {
/**
* Whether only dead-locked threads or all threads should be dumped.
*/
public boolean dumpDeadLocks;
}
public static ClientMessage encodeRequest(boolean dumpDeadLocks) {
ClientMessage clientMessage = ClientMessage.createForEncode();
clientMessage.setRetryable(false);
clientMessage.setOperationName("MC.GetThreadDump");
ClientMessage.Frame initialFrame = new ClientMessage.Frame(new byte[REQUEST_INITIAL_FRAME_SIZE], UNFRAGMENTED_MESSAGE);
encodeInt(initialFrame.content, TYPE_FIELD_OFFSET, REQUEST_MESSAGE_TYPE);
encodeInt(initialFrame.content, PARTITION_ID_FIELD_OFFSET, -1);
encodeBoolean(initialFrame.content, REQUEST_DUMP_DEAD_LOCKS_FIELD_OFFSET, dumpDeadLocks);
clientMessage.add(initialFrame);
return clientMessage;
}
public static MCGetThreadDumpCodec.RequestParameters decodeRequest(ClientMessage clientMessage) {
ClientMessage.ForwardFrameIterator iterator = clientMessage.frameIterator();
RequestParameters request = new RequestParameters();
ClientMessage.Frame initialFrame = iterator.next();
request.dumpDeadLocks = decodeBoolean(initialFrame.content, REQUEST_DUMP_DEAD_LOCKS_FIELD_OFFSET);
return request;
}
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({"URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD"})
public static class ResponseParameters {
/**
* Thread dump of the member's JVM.
*/
public java.lang.String threadDump;
}
public static ClientMessage encodeResponse(java.lang.String threadDump) {
ClientMessage clientMessage = ClientMessage.createForEncode();
ClientMessage.Frame initialFrame = new ClientMessage.Frame(new byte[RESPONSE_INITIAL_FRAME_SIZE], UNFRAGMENTED_MESSAGE);
encodeInt(initialFrame.content, TYPE_FIELD_OFFSET, RESPONSE_MESSAGE_TYPE);
clientMessage.add(initialFrame);
StringCodec.encode(clientMessage, threadDump);
return clientMessage;
}
public static MCGetThreadDumpCodec.ResponseParameters decodeResponse(ClientMessage clientMessage) {
ClientMessage.ForwardFrameIterator iterator = clientMessage.frameIterator();
ResponseParameters response = new ResponseParameters();
//empty initial frame
iterator.next();
response.threadDump = StringCodec.decode(iterator);
return response;
}
}
|
package cn.edu.gdut.zaoying.Option.series.pie.markPoint.label.normal.textStyle;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface FontWeightString {
String value() default "";
}
|
// Targeted by JavaCPP version 1.5.4: DO NOT EDIT THIS FILE
package org.bytedeco.arrow_dataset;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
import static org.bytedeco.javacpp.presets.javacpp.*;
import org.bytedeco.arrow.*;
import static org.bytedeco.arrow.global.arrow.*;
import org.bytedeco.parquet.*;
import static org.bytedeco.arrow.global.parquet.*;
import static org.bytedeco.arrow.global.arrow_dataset.*;
/** Represents an expression tree */
@Namespace("arrow::dataset") @NoOffset @Properties(inherit = org.bytedeco.arrow.presets.arrow_dataset.class)
public class Expression extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Expression(Pointer p) { super(p); }
/** Returns true iff the expressions are identical; does not check for equivalence.
* For example, (A and B) is not equal to (B and A) nor is (A and not A) equal to
* (false). */
public native @Cast("bool") boolean Equals(@Const @ByRef Expression other);
/** Overload for the common case of checking for equality to a specific scalar. */
/** If true, this Expression is a ScalarExpression wrapping a null scalar. */
public native @Cast("bool") boolean IsNull();
/** Validate this expression for execution against a schema. This will check that all
* reference fields are present (fields not in the schema will be replaced with null)
* and all subexpressions are executable. Returns the type to which this expression
* will evaluate. */
///
///
///
///
///
///
///
///
///
public native @ByVal DataTypeResult Validate(@Const @ByRef Schema schema);
/** \brief Simplify to an equivalent Expression given assumed constraints on input.
* This can be used to do less filtering work using predicate push down.
*
* Both expressions must pass validation against a schema before Assume may be used.
*
* Two expressions can be considered equivalent for a given subset of possible inputs
* if they yield identical results. Formally, if given.Evaluate(input).Equals(input)
* then Assume guarantees that:
* expr.Assume(given).Evaluate(input).Equals(expr.Evaluate(input))
*
* For example if we are given that all inputs will
* satisfy ("a"_ == 1) then the expression ("a"_ > 0 and "b"_ > 0) is equivalent to
* ("b"_ > 0). It is impossible that the comparison ("a"_ > 0) will evaluate false
* given ("a"_ == 1), so both expressions will yield identical results. Thus we can
* write:
* ("a"_ > 0 and "b"_ > 0).Assume("a"_ == 1).Equals("b"_ > 0)
*
* filter.Assume(partition) is trivial if filter and partition are disjoint or if
* partition is a subset of filter. FIXME(bkietz) write this better
* - If the two are disjoint, then (false) may be substituted for filter.
* - If partition is a subset of filter then (true) may be substituted for filter.
*
* filter.Assume(partition) is straightforward if both filter and partition are simple
* comparisons.
* - filter may be a superset of partition, in which case the filter is
* satisfied by all inputs:
* ("a"_ > 0).Assume("a"_ == 1).Equals(true)
* - filter may be disjoint with partition, in which case there are no inputs which
* satisfy filter:
* ("a"_ < 0).Assume("a"_ == 1).Equals(false)
* - If neither of these is the case, partition provides no information which can
* simplify filter:
* ("a"_ == 1).Assume("a"_ > 0).Equals("a"_ == 1)
* ("a"_ == 1).Assume("b"_ == 1).Equals("a"_ == 1)
*
* If filter is compound, Assume can be distributed across the boolean operator. To
* prove this is valid, we again demonstrate that the simplified expression will yield
* identical results. For conjunction of filters lhs and rhs:
* (lhs.Assume(p) and rhs.Assume(p)).Evaluate(input)
* == Intersection(lhs.Assume(p).Evaluate(input), rhs.Assume(p).Evaluate(input))
* == Intersection(lhs.Evaluate(input), rhs.Evaluate(input))
* == (lhs and rhs).Evaluate(input)
* - The proof for disjunction is symmetric; just replace Intersection with Union. Thus
* we can write:
* (lhs and rhs).Assume(p).Equals(lhs.Assume(p) and rhs.Assume(p))
* (lhs or rhs).Assume(p).Equals(lhs.Assume(p) or rhs.Assume(p))
* - For negation:
* (not e.Assume(p)).Evaluate(input)
* == Difference(input, e.Assume(p).Evaluate(input))
* == Difference(input, e.Evaluate(input))
* == (not e).Evaluate(input)
* - Thus we can write:
* (not e).Assume(p).Equals(not e.Assume(p))
*
* If the partition expression is a conjunction then each of its subexpressions is
* true for all input and can be used independently:
* filter.Assume(lhs).Assume(rhs).Evaluate(input)
* == filter.Assume(lhs).Evaluate(input)
* == filter.Evaluate(input)
* - Thus we can write:
* filter.Assume(lhs and rhs).Equals(filter.Assume(lhs).Assume(rhs))
*
* FIXME(bkietz) disjunction proof
* filter.Assume(lhs or rhs).Equals(filter.Assume(lhs) and filter.Assume(rhs))
* - This may not result in a simpler expression so it is only used when
* filter.Assume(lhs).Equals(filter.Assume(rhs))
*
* If the partition expression is a negation then we can use the above relations by
* replacing comparisons with their complements and using the properties:
* (not (a and b)).Equals(not a or not b)
* (not (a or b)).Equals(not a and not b) */
public native @SharedPtr @ByVal Expression Assume(@Const @ByRef Expression given);
/** Indicates if the expression is satisfiable.
*
* This is a shortcut to check if the expression is neither null nor false. */
///
public native @Cast("bool") boolean IsSatisfiable();
/** Indicates if the expression is satisfiable given an other expression.
*
* This behaves like IsSatisfiable, but it simplifies the current expression
* with the given {@code other} information. */
public native @Cast("bool") boolean IsSatisfiableWith(@Const @ByRef Expression other);
/** returns a debug string representing this expression */
public native @StdString String ToString();
/** serialize/deserialize an Expression. */
public native @ByVal BufferResult Serialize();
public static native @ByVal ExpressionResult Deserialize(@Const @ByRef ArrowBuffer arg0);
/** \brief Return the expression's type identifier */
public native ExpressionType.type type();
/** Copy this expression into a shared pointer. */
public native @SharedPtr @ByVal Expression Copy();
public native @ByVal InExpression In(@SharedPtr @Cast({"", "std::shared_ptr<arrow::Array>"}) Array set);
public native @ByVal IsValidExpression IsValid();
public native @ByVal CastExpression CastTo(@SharedPtr @Cast({"", "std::shared_ptr<arrow::DataType>"}) DataType type,
@ByVal(nullValue = "arrow::compute::CastOptions()") CastOptions options);
public native @ByVal CastExpression CastTo(@SharedPtr @Cast({"", "std::shared_ptr<arrow::DataType>"}) DataType type);
public native @ByVal CastExpression CastLike(@Const @ByRef Expression expr,
@ByVal(nullValue = "arrow::compute::CastOptions()") CastOptions options);
public native @ByVal CastExpression CastLike(@Const @ByRef Expression expr);
}
|
package net.bytebuddy.implementation;
import net.bytebuddy.description.field.FieldDescription;
import net.bytebuddy.description.method.MethodDescription;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.description.type.TypeList;
import net.bytebuddy.dynamic.scaffold.InstrumentedType;
import net.bytebuddy.implementation.bytecode.ByteCodeAppender;
import net.bytebuddy.implementation.bytecode.StackManipulation;
import net.bytebuddy.implementation.bytecode.assign.Assigner;
import net.bytebuddy.implementation.bytecode.collection.ArrayFactory;
import net.bytebuddy.implementation.bytecode.constant.MethodConstant;
import net.bytebuddy.implementation.bytecode.member.FieldAccess;
import net.bytebuddy.implementation.bytecode.member.MethodInvocation;
import net.bytebuddy.implementation.bytecode.member.MethodReturn;
import net.bytebuddy.implementation.bytecode.member.MethodVariableAccess;
import org.objectweb.asm.MethodVisitor;
import org.objectweb.asm.Opcodes;
import java.lang.reflect.InvocationHandler;
import java.util.ArrayList;
import java.util.List;
import static net.bytebuddy.matcher.ElementMatchers.named;
/**
* An adapter for adapting an {@link java.lang.reflect.InvocationHandler}. The adapter allows the invocation handler
* to also intercept method calls to non-interface methods.
*/
public abstract class InvocationHandlerAdapter implements Implementation {
/**
* Indicates that a value should not be cached.
*/
private static final boolean NO_CACHING = false;
/**
* Indicates that a {@link java.lang.reflect.Method} should be cached.
*/
protected static final boolean CACHING = true;
/**
* The prefix for field that are created for storing the instrumented value.
*/
private static final String PREFIX = "invocationHandler";
/**
* A type description of the {@link InvocationHandler}.
*/
private static final TypeDescription.Generic INVOCATION_HANDLER_TYPE = new TypeDescription.Generic.OfNonGenericType.ForLoadedType(InvocationHandler.class);
/**
* The name of the field for storing an invocation handler.
*/
protected final String fieldName;
/**
* The assigner that is used for assigning the return invocation handler's return value to the
* intercepted method's return value.
*/
protected final Assigner assigner;
/**
* Determines if the {@link java.lang.reflect.Method} instances that are handed to the intercepted methods are
* cached in {@code static} fields.
*/
protected final boolean cacheMethods;
/**
* Creates a new invocation handler for a given field.
*
* @param fieldName The name of the field.
* @param cacheMethods Determines if the {@link java.lang.reflect.Method} instances that are handed to the
* intercepted methods are cached in {@code static} fields.
* @param assigner The assigner to apply when defining this implementation.
*/
protected InvocationHandlerAdapter(String fieldName, boolean cacheMethods, Assigner assigner) {
this.fieldName = fieldName;
this.cacheMethods = cacheMethods;
this.assigner = assigner;
}
/**
* Creates an implementation for any instance of an {@link java.lang.reflect.InvocationHandler} that delegates
* all method interceptions to the given instance which will be stored in a {@code static} field.
*
* @param invocationHandler The invocation handler to which all method calls are delegated.
* @return An implementation that delegates all method interceptions to the given invocation handler.
*/
public static InvocationHandlerAdapter of(InvocationHandler invocationHandler) {
return of(invocationHandler, String.format("%s$%d", PREFIX, Math.abs(invocationHandler.hashCode() % Integer.MAX_VALUE)));
}
/**
* Creates an implementation for any instance of an {@link java.lang.reflect.InvocationHandler} that delegates
* all method interceptions to the given instance which will be stored in a {@code static} field.
*
* @param invocationHandler The invocation handler to which all method calls are delegated.
* @param fieldName The name of the field.
* @return An implementation that delegates all method interceptions to the given invocation handler.
*/
public static InvocationHandlerAdapter of(InvocationHandler invocationHandler, String fieldName) {
return new ForStaticDelegation(fieldName, NO_CACHING, Assigner.DEFAULT, invocationHandler);
}
/**
* Creates an implementation for any {@link java.lang.reflect.InvocationHandler} that delegates
* all method interceptions to a {@code public} instance field with the given name. This field has to be
* set before any invocations are intercepted. Otherwise, a {@link java.lang.NullPointerException} will be
* thrown.
*
* @param fieldName The name of the field.
* @return An implementation that delegates all method interceptions to an instance field of the given name.
*/
public static InvocationHandlerAdapter toInstanceField(String fieldName) {
return new ForInstanceDelegation(fieldName, NO_CACHING, Assigner.DEFAULT);
}
/**
* Returns a list of stack manipulations that loads all arguments of an instrumented method.
*
* @param instrumentedMethod The method that is instrumented.
* @return A list of stack manipulation that loads all arguments of an instrumented method.
*/
private List<StackManipulation> argumentValuesOf(MethodDescription instrumentedMethod) {
TypeList.Generic parameterTypes = instrumentedMethod.getParameters().asTypeList();
List<StackManipulation> instruction = new ArrayList<StackManipulation>(parameterTypes.size());
int currentIndex = 1;
for (TypeDescription.Generic parameterType : parameterTypes) {
instruction.add(new StackManipulation.Compound(
MethodVariableAccess.of(parameterType).loadOffset(currentIndex),
assigner.assign(parameterType, TypeDescription.Generic.OBJECT, Assigner.Typing.STATIC)));
currentIndex += parameterType.getStackSize().getSize();
}
return instruction;
}
/**
* By default, any {@link java.lang.reflect.Method} instance that is handed over to an
* {@link java.lang.reflect.InvocationHandler} is created on each invocation of the method.
* {@link java.lang.reflect.Method} look-ups are normally cached by its defining {@link java.lang.Class} what
* makes a repeated look-up of a method little expensive. However, because {@link java.lang.reflect.Method}
* instances are mutable by their {@link java.lang.reflect.AccessibleObject} contact, any looked-up instance
* needs to be copied by its defining {@link java.lang.Class} before exposing it. This can cause performance
* deficits when a method is for example called repeatedly in a loop. By enabling the method cache, this
* performance penalty can be avoided by caching a single {@link java.lang.reflect.Method} instance for
* any intercepted method as a {@code static} field in the instrumented type.
*
* @return A similar invocation handler adapter which caches any {@link java.lang.reflect.Method} instance
* in form of a {@code static} field.
*/
public abstract AssignerConfigurable withMethodCache();
/**
* Applies an implementation that delegates to a invocation handler.
*
* @param methodVisitor The method visitor for writing the byte code to.
* @param implementationContext The implementation context for the current implementation.
* @param instrumentedMethod The method that is instrumented.
* @param instrumentedType The type that is instrumented.
* @param preparingManipulation A stack manipulation that applies any preparation to the operand stack.
* @return The size of the applied assignment.
*/
protected ByteCodeAppender.Size apply(MethodVisitor methodVisitor,
Context implementationContext,
MethodDescription instrumentedMethod,
TypeDescription instrumentedType,
StackManipulation preparingManipulation) {
StackManipulation.Size stackSize = new StackManipulation.Compound(
preparingManipulation,
FieldAccess.forField(instrumentedType.getDeclaredFields()
.filter((named(fieldName))).getOnly()).getter(),
MethodVariableAccess.REFERENCE.loadOffset(0),
cacheMethods
? MethodConstant.forMethod(instrumentedMethod.asDefined()).cached()
: MethodConstant.forMethod(instrumentedMethod.asDefined()),
ArrayFactory.forType(TypeDescription.Generic.OBJECT).withValues(argumentValuesOf(instrumentedMethod)),
MethodInvocation.invoke(INVOCATION_HANDLER_TYPE.getDeclaredMethods().getOnly()),
assigner.assign(TypeDescription.Generic.OBJECT, instrumentedMethod.getReturnType(), Assigner.Typing.DYNAMIC),
MethodReturn.returning(instrumentedMethod.getReturnType().asErasure())
).apply(methodVisitor, implementationContext);
return new ByteCodeAppender.Size(stackSize.getMaximalSize(), instrumentedMethod.getStackSize());
}
@Override
public boolean equals(Object other) {
if (this == other) return true;
if (other == null || getClass() != other.getClass()) return false;
InvocationHandlerAdapter that = (InvocationHandlerAdapter) other;
return cacheMethods == that.cacheMethods
&& assigner.equals(that.assigner)
&& fieldName.equals(that.fieldName);
}
@Override
public int hashCode() {
int result = fieldName.hashCode();
result = 31 * result + assigner.hashCode();
result = 31 * result + (cacheMethods ? 1 : 0);
return result;
}
/**
* Allows for the configuration of an {@link net.bytebuddy.implementation.bytecode.assign.Assigner}
* of an {@link net.bytebuddy.implementation.InvocationHandlerAdapter}.
*/
protected interface AssignerConfigurable extends Implementation {
/**
* Configures an assigner to use with this invocation handler adapter.
*
* @param assigner The assigner to apply when defining this implementation.
* @return This instrumentation with the given {@code assigner} configured.
*/
Implementation withAssigner(Assigner assigner);
}
/**
* An implementation of an {@link net.bytebuddy.implementation.InvocationHandlerAdapter} that delegates method
* invocations to an adapter that is stored in a static field.
*/
protected static class ForStaticDelegation extends InvocationHandlerAdapter implements AssignerConfigurable {
/**
* The invocation handler to which method interceptions are to be delegated.
*/
protected final InvocationHandler invocationHandler;
/**
* Creates a new invocation handler adapter for delegating invocations to an invocation handler that is stored
* in a static field.
*
* @param fieldName The name of the field.
* @param cacheMethods Determines if the {@link java.lang.reflect.Method} instances that are handed to the
* intercepted methods are cached in {@code static} fields.
* @param assigner The assigner to apply when defining this implementation.
* @param invocationHandler The invocation handler to which all method calls are delegated.
*/
protected ForStaticDelegation(String fieldName, boolean cacheMethods, Assigner assigner, InvocationHandler invocationHandler) {
super(fieldName, cacheMethods, assigner);
this.invocationHandler = invocationHandler;
}
@Override
public AssignerConfigurable withMethodCache() {
return new ForStaticDelegation(fieldName, CACHING, assigner, invocationHandler);
}
@Override
public Implementation withAssigner(Assigner assigner) {
return new ForStaticDelegation(fieldName, cacheMethods, assigner, invocationHandler);
}
@Override
public InstrumentedType prepare(InstrumentedType instrumentedType) {
return instrumentedType
.withField(new FieldDescription.Token(fieldName, Opcodes.ACC_SYNTHETIC | Opcodes.ACC_STATIC | Opcodes.ACC_PUBLIC, INVOCATION_HANDLER_TYPE))
.withInitializer(new LoadedTypeInitializer.ForStaticField(fieldName, invocationHandler));
}
@Override
public ByteCodeAppender appender(Target implementationTarget) {
return new Appender(implementationTarget.getInstrumentedType());
}
@Override
public boolean equals(Object other) {
return this == other || !(other == null || getClass() != other.getClass())
&& super.equals(other)
&& invocationHandler.equals(((ForStaticDelegation) other).invocationHandler);
}
@Override
public int hashCode() {
return 31 * super.hashCode() + invocationHandler.hashCode();
}
@Override
public String toString() {
return "InvocationHandlerAdapter.ForStaticDelegation{" +
"fieldName=" + fieldName +
", cacheMethods=" + cacheMethods +
", invocationHandler=" + invocationHandler +
'}';
}
/**
* An appender for implementing the {@link net.bytebuddy.implementation.InvocationHandlerAdapter.ForStaticDelegation}.
*/
protected class Appender implements ByteCodeAppender {
/**
* The instrumented type for which the methods are being intercepted.
*/
private final TypeDescription instrumentedType;
/**
* Creates a new appender.
*
* @param instrumentedType The type that is instrumented.
*/
protected Appender(TypeDescription instrumentedType) {
this.instrumentedType = instrumentedType;
}
@Override
public Size apply(MethodVisitor methodVisitor, Context implementationContext, MethodDescription instrumentedMethod) {
return ForStaticDelegation.this.apply(methodVisitor,
implementationContext,
instrumentedMethod,
instrumentedType,
StackManipulation.Trivial.INSTANCE);
}
/**
* Returns the outer class.
*
* @return The outer class of this instance.
*/
private InvocationHandlerAdapter getInvocationHandlerAdapter() {
return ForStaticDelegation.this;
}
@Override
public boolean equals(Object other) {
return this == other || !(other == null || getClass() != other.getClass())
&& instrumentedType.equals(((Appender) other).instrumentedType)
&& ForStaticDelegation.this.equals(((Appender) other).getInvocationHandlerAdapter());
}
@Override
public int hashCode() {
return 31 * ForStaticDelegation.this.hashCode() + instrumentedType.hashCode();
}
@Override
public String toString() {
return "InvocationHandlerAdapter.ForStaticDelegation.Appender{" +
"invocationHandlerAdapter=" + ForStaticDelegation.this +
"instrumentedType=" + instrumentedType +
'}';
}
}
}
/**
* An implementation of an {@link net.bytebuddy.implementation.InvocationHandlerAdapter} that delegates method
* invocations to an adapter that is stored in an instance field.
*/
protected static class ForInstanceDelegation extends InvocationHandlerAdapter implements AssignerConfigurable {
/**
* Creates a new invocation handler adapter for delegating invocations to an invocation handler that is stored
* in an instance field.
*
* @param fieldName The name of the field.
* @param cacheMethods Determines if the {@link java.lang.reflect.Method} instances that are handed to the
* intercepted methods are cached in {@code static} fields.
* @param assigner The assigner to apply when defining this implementation.
*/
protected ForInstanceDelegation(String fieldName, boolean cacheMethods, Assigner assigner) {
super(fieldName, cacheMethods, assigner);
}
@Override
public AssignerConfigurable withMethodCache() {
return new ForInstanceDelegation(fieldName, CACHING, assigner);
}
@Override
public Implementation withAssigner(Assigner assigner) {
return new ForInstanceDelegation(fieldName, cacheMethods, assigner);
}
@Override
public InstrumentedType prepare(InstrumentedType instrumentedType) {
return instrumentedType.withField(new FieldDescription.Token(fieldName, Opcodes.ACC_SYNTHETIC | Opcodes.ACC_PUBLIC, INVOCATION_HANDLER_TYPE));
}
@Override
public ByteCodeAppender appender(Target implementationTarget) {
return new Appender(implementationTarget.getInstrumentedType());
}
@Override
public String toString() {
return "InvocationHandlerAdapter.ForInstanceDelegation{" +
"fieldName=" + fieldName +
"cacheMethods=" + cacheMethods +
'}';
}
/**
* An appender for implementing the {@link net.bytebuddy.implementation.InvocationHandlerAdapter.ForInstanceDelegation}.
*/
protected class Appender implements ByteCodeAppender {
/**
* The type that is subject of the instrumentation.
*/
private final TypeDescription instrumentedType;
/**
* Creates a new appender.
*
* @param instrumentedType The type that is instrumented.
*/
protected Appender(TypeDescription instrumentedType) {
this.instrumentedType = instrumentedType;
}
@Override
public Size apply(MethodVisitor methodVisitor, Context implementationContext, MethodDescription instrumentedMethod) {
return ForInstanceDelegation.this.apply(methodVisitor,
implementationContext,
instrumentedMethod,
instrumentedType,
MethodVariableAccess.of(instrumentedType).loadOffset(0));
}
@Override
public boolean equals(Object other) {
return this == other || !(other == null || getClass() != other.getClass())
&& instrumentedType.equals(((Appender) other).instrumentedType)
&& ForInstanceDelegation.this.equals(((Appender) other).getInvocationHandlerAdapter());
}
/**
* Returns the outer class.
*
* @return The outer class.
*/
private InvocationHandlerAdapter getInvocationHandlerAdapter() {
return ForInstanceDelegation.this;
}
@Override
public int hashCode() {
return 31 * ForInstanceDelegation.this.hashCode() + instrumentedType.hashCode();
}
@Override
public String toString() {
return "InvocationHandlerAdapter.ForInstanceDelegation.Appender{" +
"invocationHandlerAdapter=" + ForInstanceDelegation.this +
"instrumentedType=" + instrumentedType +
'}';
}
}
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.catalina.webresources;
public class TestDirResourceSetReadOnly extends AbstractTestDirResourceSet {
public TestDirResourceSetReadOnly() {
super(true);
}
}
|
/*
* Copyright 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.headstartech.sermo;
import org.springframework.statemachine.action.Action;
/**
* Dialog excutor interface.
*
* A Dialog session is completed either if reaching an end state or if an {@link Action} throws an exception.
*
* @author Per Johansson
*/
public interface DialogExecutor<S, E extends DialogEvent> {
/**
* Applies an event for a session.
*
* If an exception is thrown by an {@code Action}, it's re-thrown out of this method.
*
* @param sessionId
* @param event
* @return the {@link DialogEventResult}
* @throws DialogPersisterException if there is an error loading/persisting the dialog state
* @throws RuntimeException thrown by {@link Action}s.
*/
DialogEventResult applyEvent(String sessionId, E event);
/**
* Adds the dialog listener.
*
* @param listener the listener
*/
void addListener(DialogListener<E> listener);
/**
* Removes the dialog listener.
*
* @param listener the listener
*/
void removeListener(DialogListener<E> listener);
}
|
package moe.banana.mmio.misc;
import android.databinding.DataBindingUtil;
import android.databinding.ViewDataBinding;
import android.support.v7.widget.RecyclerView;
import android.view.View;
import moe.banana.mmio.BR;
/**
* A {@link android.support.v7.widget.RecyclerView.ViewHolder} for Data Binding Library
* Help parses binding object and set a holder variable on target binding.
*/
public class BindingViewHolder<VM extends ViewDataBinding> extends RecyclerView.ViewHolder {
public static <VM extends ViewDataBinding> BindingViewHolder<VM> create(View view) {
return new BindingViewHolder<>(view);
}
public VM binding;
private BindingViewHolder(View itemView) {
super(itemView);
binding = DataBindingUtil.bind(itemView);
binding.setVariable(BR.holder, this);
}
}
|
package de.tum.in.www1.artemis.programmingexercise;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.doReturn;
import java.util.List;
import org.eclipse.jgit.lib.ObjectId;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.mockito.ArgumentMatchers;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.test.context.support.WithMockUser;
import de.tum.in.www1.artemis.AbstractSpringIntegrationBambooBitbucketJiraTest;
import de.tum.in.www1.artemis.config.Constants;
import de.tum.in.www1.artemis.domain.enumeration.ProgrammingLanguage;
import de.tum.in.www1.artemis.util.ModelFactory;
import de.tum.in.www1.artemis.util.ProgrammingExerciseResultTestService;
class ProgrammingExerciseResultBambooIntegrationTest extends AbstractSpringIntegrationBambooBitbucketJiraTest {
@Autowired
private ProgrammingExerciseResultTestService programmingExerciseResultTestService;
@BeforeEach
void setup() {
programmingExerciseResultTestService.setup();
String dummyHash = "9b3a9bd71a0d80e5bbc42204c319ed3d1d4f0d6d";
doReturn(ObjectId.fromString(dummyHash)).when(gitService).getLastCommitHash(ArgumentMatchers.any());
}
@AfterEach
void tearDown() {
programmingExerciseResultTestService.tearDown();
}
@Test
@WithMockUser(value = "instructor1", roles = "INSTRUCTOR")
void shouldUpdateFeedbackInSemiAutomaticResult() throws Exception {
var notification = ModelFactory.generateBambooBuildResult("assignment", List.of("test1"), List.of());
var loginName = "student1";
var planKey = (programmingExerciseResultTestService.getProgrammingExercise().getProjectKey() + "-" + loginName).toUpperCase();
notification.getPlan().setKey(planKey);
programmingExerciseResultTestService.shouldUpdateFeedbackInSemiAutomaticResult(notification, loginName);
}
@Test
@WithMockUser(value = "student1", roles = "USER")
public void shouldUpdateTestCasesAndResultScoreFromSolutionParticipationResult() {
var notification = ModelFactory.generateBambooBuildResult(Constants.ASSIGNMENT_REPO_NAME, List.of("test1", "test2", "test4"), List.of());
programmingExerciseResultTestService.shouldUpdateTestCasesAndResultScoreFromSolutionParticipationResult(notification, false);
}
@Test
@WithMockUser(value = "student1", roles = "USER")
public void shouldUpdateTestCasesAndResultScoreFromSolutionParticipationResultWithFailedTest() {
var notification = ModelFactory.generateBambooBuildResult(Constants.ASSIGNMENT_REPO_NAME, List.of("test1", "test2", "test4"), List.of("test3"));
programmingExerciseResultTestService.shouldUpdateTestCasesAndResultScoreFromSolutionParticipationResult(notification, true);
}
@ParameterizedTest(name = "{displayName} [{index}] {argumentsWithNames}")
@EnumSource(value = ProgrammingLanguage.class, names = { "JAVA", "SWIFT" })
@WithMockUser(value = "student1", roles = "USER")
public void shouldStoreFeedbackForResultWithStaticCodeAnalysisReport(ProgrammingLanguage programmingLanguage) {
programmingExerciseResultTestService.setupForProgrammingLanguage(programmingLanguage);
var notification = ModelFactory.generateBambooBuildResultWithStaticCodeAnalysisReport(Constants.ASSIGNMENT_REPO_NAME, List.of("test1"), List.of(), programmingLanguage);
var scaReports = notification.getBuild().getJobs().get(0).getStaticCodeAnalysisReports();
if (programmingLanguage == ProgrammingLanguage.SWIFT) {
// SwiftLint has only one category at the moment
assertThat(scaReports.size()).isEqualTo(1);
assertThat(scaReports.get(0).getIssues().get(0).getCategory()).isEqualTo("swiftLint");
}
else if (programmingLanguage == ProgrammingLanguage.JAVA) {
assertThat(scaReports.size()).isEqualTo(4);
scaReports.get(0).getIssues().forEach(issue -> assertThat(issue.getCategory()).isNotNull());
}
programmingExerciseResultTestService.shouldStoreFeedbackForResultWithStaticCodeAnalysisReport(notification, programmingLanguage);
}
@Test
@WithMockUser(value = "student1", roles = "USER")
public void shouldStoreBuildLogsForSubmission() {
var resultNotification = ModelFactory.generateBambooBuildResultWithLogs(Constants.ASSIGNMENT_REPO_NAME, List.of("test1"), List.of());
programmingExerciseResultTestService.shouldStoreBuildLogsForSubmission(resultNotification);
}
@Test
@WithMockUser(value = "student1", roles = "USER")
public void shouldSaveBuildLogsForSuccessfulBuildInBuildLogRepository() {
var resultNotification = ModelFactory.generateBambooBuildResultWithLogs(Constants.ASSIGNMENT_REPO_NAME, List.of("test1"), List.of());
programmingExerciseResultTestService.shouldSaveBuildLogsInBuildLogRepository(resultNotification);
}
@Test
@WithMockUser(value = "student1", roles = "USER")
public void shouldSaveBuildLogsForFailedBuildInBuildLogRepository() {
var resultNotification = ModelFactory.generateBambooBuildResultWithLogs(Constants.ASSIGNMENT_REPO_NAME, List.of("test1"), List.of("test2"));
programmingExerciseResultTestService.shouldSaveBuildLogsInBuildLogRepository(resultNotification);
}
@Test
@WithMockUser(value = "student1", roles = "USER")
public void shouldGenerateNewManualResultIfManualAssessmentExists() {
var notification = ModelFactory.generateBambooBuildResult(Constants.ASSIGNMENT_REPO_NAME, List.of("test1", "test2", "test4"), List.of());
programmingExerciseResultTestService.shouldGenerateNewManualResultIfManualAssessmentExists(notification);
}
}
|
/*
* Copyright (c) 2015-2020, www.dibo.ltd (service@dibo.ltd).
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
* <p>
* https://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.diboot.iam.vo;
import com.alibaba.fastjson.annotation.JSONField;
import com.diboot.core.binding.annotation.BindDict;
import com.diboot.core.binding.annotation.BindEntityList;
import com.diboot.core.binding.annotation.BindField;
import com.diboot.core.util.V;
import com.diboot.iam.config.Cons;
import com.diboot.iam.entity.IamFrontendPermission;
import lombok.Getter;
import lombok.Setter;
import lombok.experimental.Accessors;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
/**
* 前端菜单 VO定义
* @author yangzhao
* @version 2.0.0
* @date 2020-02-27
* Copyright © diboot.com
*/
@Getter
@Setter
@Accessors(chain = true)
public class IamFrontendPermissionVO extends IamFrontendPermission {
private static final long serialVersionUID = 6643651522844488124L;
// display_type字段的关联数据字典
public static final String DICT_FRONTEND_PERMISSION_TYPE = "FRONTEND_PERMISSION_TYPE";
// 字段关联:this.parent_id=id
@BindField(entity = IamFrontendPermission.class, field = "displayName", condition = "this.parent_id=id")
private String parentDisplayName;
// 关联数据字典:FRONTEND_PERMISSION_TYPE
@BindDict(type = DICT_FRONTEND_PERMISSION_TYPE, field = "displayType")
private String displayTypeLabel;
// 绑定iamFrontendPermissionList
@JSONField(serialize = false)
@BindEntityList(entity = IamFrontendPermission.class, condition = "this.id=parent_id")
private List<IamFrontendPermission> childrenList;
// 获取子菜单列表
public List<IamFrontendPermission> getChildren(){
if (V.isEmpty(childrenList)){
return Collections.emptyList();
}
return childrenList.stream()
.filter(item -> Cons.FRONTEND_PERMISSION_DISPLAY_TYPE.MENU.name().equals(item.getDisplayType()))
.collect(Collectors.toList());
}
// 获取按钮/权限列表
public List<IamFrontendPermission> getPermissionList(){
if (V.isEmpty(childrenList)){
return Collections.emptyList();
}
return childrenList.stream()
.filter(item -> Cons.FRONTEND_PERMISSION_DISPLAY_TYPE.PERMISSION.name().equals(item.getDisplayType()))
.collect(Collectors.toList());
}
}
|
/* Copyright 2018 Telstra Open Source
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openkilda.wfm.topology.ping;
import org.openkilda.persistence.PersistenceManager;
import org.openkilda.persistence.spi.PersistenceProvider;
import org.openkilda.wfm.LaunchEnvironment;
import org.openkilda.wfm.share.flow.resources.FlowResourcesConfig;
import org.openkilda.wfm.topology.AbstractTopology;
import org.openkilda.wfm.topology.ping.bolt.Blacklist;
import org.openkilda.wfm.topology.ping.bolt.ComponentId;
import org.openkilda.wfm.topology.ping.bolt.FailReporter;
import org.openkilda.wfm.topology.ping.bolt.FlowFetcher;
import org.openkilda.wfm.topology.ping.bolt.FlowStatusEncoder;
import org.openkilda.wfm.topology.ping.bolt.GroupCollector;
import org.openkilda.wfm.topology.ping.bolt.InputRouter;
import org.openkilda.wfm.topology.ping.bolt.MonotonicTick;
import org.openkilda.wfm.topology.ping.bolt.NorthboundEncoder;
import org.openkilda.wfm.topology.ping.bolt.OnDemandResultManager;
import org.openkilda.wfm.topology.ping.bolt.OtsdbEncoder;
import org.openkilda.wfm.topology.ping.bolt.PeriodicPingShaping;
import org.openkilda.wfm.topology.ping.bolt.PeriodicResultManager;
import org.openkilda.wfm.topology.ping.bolt.PingProducer;
import org.openkilda.wfm.topology.ping.bolt.PingRouter;
import org.openkilda.wfm.topology.ping.bolt.ResultDispatcher;
import org.openkilda.wfm.topology.ping.bolt.SpeakerEncoder;
import org.openkilda.wfm.topology.ping.bolt.StatsProducer;
import org.openkilda.wfm.topology.ping.bolt.TickDeduplicator;
import org.openkilda.wfm.topology.ping.bolt.TickId;
import org.openkilda.wfm.topology.ping.bolt.TimeoutManager;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.kafka.bolt.KafkaBolt;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;
import java.util.concurrent.TimeUnit;
public class PingTopology extends AbstractTopology<PingTopologyConfig> {
protected PingTopology(LaunchEnvironment env) {
super(env, "ping-topology", PingTopologyConfig.class);
}
/**
* Implement flow pings.
*
* <p>Topology sequence diagram plus design document:
* https://github.com/telstra/open-kilda/tree/master/docs/design/flow-ping/flow-ping.md
*/
@Override
public StormTopology createTopology() {
TopologyBuilder topology = new TopologyBuilder();
monotonicTick(topology);
tickDeduplicator(topology);
input(topology);
inputRouter(topology);
flowFetcher(topology);
periodicPingShaping(topology);
pingProducer(topology);
pingRouter(topology);
blacklist(topology);
timeoutManager(topology);
resultDispatcher(topology);
periodicResultManager(topology);
onDemandResultManager(topology);
groupCollector(topology);
statsProducer(topology);
failReporter(topology);
flowStatusEncoder(topology);
otsdbEncoder(topology);
speakerEncoder(topology);
northboundEncoder(topology);
return topology.createTopology();
}
private void monotonicTick(TopologyBuilder topology) {
MonotonicTick bolt = new MonotonicTick(
new MonotonicTick.ClockConfig()
.addTickInterval(TickId.PERIODIC_PING, topologyConfig.getPingInterval()));
declareBolt(topology, bolt, MonotonicTick.BOLT_ID);
}
private void tickDeduplicator(TopologyBuilder topology) {
declareBolt(topology, new TickDeduplicator(1, TimeUnit.SECONDS), TickDeduplicator.BOLT_ID)
.globalGrouping(MonotonicTick.BOLT_ID);
}
private void input(TopologyBuilder topology) {
declareKafkaSpout(topology, topologyConfig.getKafkaPingTopic(), ComponentId.INPUT.toString());
}
private void inputRouter(TopologyBuilder topology) {
InputRouter bolt = new InputRouter();
declareBolt(topology, bolt, InputRouter.BOLT_ID)
.shuffleGrouping(ComponentId.INPUT.toString());
}
private void flowFetcher(TopologyBuilder topology) {
PersistenceManager persistenceManager =
PersistenceProvider.getInstance().getPersistenceManager(configurationProvider);
FlowResourcesConfig flowResourcesConfig = configurationProvider.getConfiguration(FlowResourcesConfig.class);
FlowFetcher bolt = new FlowFetcher(persistenceManager, flowResourcesConfig,
topologyConfig.getPeriodicPingCacheExpirationInterval());
declareBolt(topology, bolt, FlowFetcher.BOLT_ID)
// NOTE(tdurakov): global grouping is responsible for proper handling parallelism of 2
.globalGrouping(TickDeduplicator.BOLT_ID, TickDeduplicator.STREAM_PING_ID)
.shuffleGrouping(InputRouter.BOLT_ID, InputRouter.STREAM_ON_DEMAND_REQUEST_ID)
.allGrouping(InputRouter.BOLT_ID, InputRouter.STREAM_PERIODIC_PING_UPDATE_REQUEST_ID);
}
private void periodicPingShaping(TopologyBuilder topology) {
PeriodicPingShaping bolt = new PeriodicPingShaping(topologyConfig.getPingInterval());
declareBolt(topology, bolt, PeriodicPingShaping.BOLT_ID)
.allGrouping(TickDeduplicator.BOLT_ID)
.shuffleGrouping(FlowFetcher.BOLT_ID);
}
private void pingProducer(TopologyBuilder topology) {
PingProducer bolt = new PingProducer();
declareBolt(topology, bolt, PingProducer.BOLT_ID)
.fieldsGrouping(PeriodicPingShaping.BOLT_ID, new Fields(PeriodicPingShaping.FIELD_ID_FLOW_ID));
}
private void pingRouter(TopologyBuilder topology) {
PingRouter bolt = new PingRouter();
declareBolt(topology, bolt, PingRouter.BOLT_ID)
.shuffleGrouping(PingProducer.BOLT_ID)
.shuffleGrouping(Blacklist.BOLT_ID)
.shuffleGrouping(InputRouter.BOLT_ID, InputRouter.STREAM_SPEAKER_PING_RESPONSE_ID)
.shuffleGrouping(
PeriodicResultManager.BOLT_ID, PeriodicResultManager.STREAM_BLACKLIST_ID);
}
private void blacklist(TopologyBuilder topology) {
Blacklist bolt = new Blacklist();
Fields grouping = new Fields(PingRouter.FIELD_ID_PING_MATCH);
declareBolt(topology, bolt, Blacklist.BOLT_ID)
.fieldsGrouping(PingRouter.BOLT_ID, PingRouter.STREAM_BLACKLIST_FILTER_ID, grouping)
.fieldsGrouping(PingRouter.BOLT_ID, PingRouter.STREAM_BLACKLIST_UPDATE_ID, grouping);
}
private void timeoutManager(TopologyBuilder topology) {
TimeoutManager bolt = new TimeoutManager(topologyConfig.getTimeout());
final Fields pingIdGrouping = new Fields(PingRouter.FIELD_ID_PING_ID);
declareBolt(topology, bolt, TimeoutManager.BOLT_ID)
.allGrouping(TickDeduplicator.BOLT_ID)
.fieldsGrouping(PingRouter.BOLT_ID, PingRouter.STREAM_REQUEST_ID, pingIdGrouping)
.fieldsGrouping(PingRouter.BOLT_ID, PingRouter.STREAM_RESPONSE_ID, pingIdGrouping);
}
private void resultDispatcher(TopologyBuilder topology) {
ResultDispatcher bolt = new ResultDispatcher();
declareBolt(topology, bolt, ResultDispatcher.BOLT_ID)
.shuffleGrouping(TimeoutManager.BOLT_ID, TimeoutManager.STREAM_RESPONSE_ID);
}
private void periodicResultManager(TopologyBuilder topology) {
PeriodicResultManager bolt = new PeriodicResultManager();
declareBolt(topology, bolt, PeriodicResultManager.BOLT_ID)
.shuffleGrouping(ResultDispatcher.BOLT_ID, ResultDispatcher.STREAM_PERIODIC_ID);
}
private void onDemandResultManager(TopologyBuilder topology) {
OnDemandResultManager bolt = new OnDemandResultManager();
declareBolt(topology, bolt, OnDemandResultManager.BOLT_ID)
.shuffleGrouping(ResultDispatcher.BOLT_ID, ResultDispatcher.STREAM_MANUAL_ID)
.shuffleGrouping(GroupCollector.BOLT_ID, GroupCollector.STREAM_ON_DEMAND_ID);
}
private void groupCollector(TopologyBuilder topology) {
GroupCollector bolt = new GroupCollector(topologyConfig.getTimeout());
declareBolt(topology, bolt, GroupCollector.BOLT_ID)
.allGrouping(TickDeduplicator.BOLT_ID)
.fieldsGrouping(
OnDemandResultManager.BOLT_ID, OnDemandResultManager.STREAM_GROUP_ID,
new Fields(OnDemandResultManager.FIELD_ID_GROUP_ID));
}
private void statsProducer(TopologyBuilder topology) {
StatsProducer bolt = new StatsProducer(topologyConfig.getMetricPrefix());
declareBolt(topology, bolt, StatsProducer.BOLT_ID)
.shuffleGrouping(PeriodicResultManager.BOLT_ID, PeriodicResultManager.STREAM_STATS_ID);
}
private void failReporter(TopologyBuilder topology) {
FailReporter bolt = new FailReporter(
topologyConfig.getFailDelay(), topologyConfig.getFailReset());
Fields groupBy = new Fields(PeriodicResultManager.FIELD_ID_FLOW_ID);
declareBolt(topology, bolt, FailReporter.BOLT_ID)
.allGrouping(TickDeduplicator.BOLT_ID)
.allGrouping(FlowFetcher.BOLT_ID, FlowFetcher.STREAM_EXPIRE_CACHE_ID)
.fieldsGrouping(PeriodicResultManager.BOLT_ID, PeriodicResultManager.STREAM_FAIL_ID, groupBy);
}
private void flowStatusEncoder(TopologyBuilder topology) {
FlowStatusEncoder bolt = new FlowStatusEncoder();
declareBolt(topology, bolt, FlowStatusEncoder.BOLT_ID)
.shuffleGrouping(FailReporter.BOLT_ID);
KafkaBolt output = buildKafkaBolt(topologyConfig.getKafkaFlowStatusTopic());
declareBolt(topology, output, ComponentId.FLOW_STATUS_OUTPUT.toString())
.shuffleGrouping(FlowStatusEncoder.BOLT_ID);
}
private void otsdbEncoder(TopologyBuilder topology) {
OtsdbEncoder bolt = new OtsdbEncoder();
declareBolt(topology, bolt, OtsdbEncoder.BOLT_ID)
.shuffleGrouping(StatsProducer.BOLT_ID);
KafkaBolt output = createKafkaBolt(topologyConfig.getKafkaOtsdbTopic());
declareBolt(topology, output, ComponentId.OTSDB_OUTPUT.toString())
.shuffleGrouping(OtsdbEncoder.BOLT_ID);
}
private void speakerEncoder(TopologyBuilder topology) {
SpeakerEncoder bolt = new SpeakerEncoder();
declareBolt(topology, bolt, SpeakerEncoder.BOLT_ID)
.shuffleGrouping(TimeoutManager.BOLT_ID, TimeoutManager.STREAM_REQUEST_ID);
KafkaBolt output = buildKafkaBolt(topologyConfig.getKafkaSpeakerFlowPingTopic());
declareBolt(topology, output, ComponentId.SPEAKER_OUTPUT.toString())
.shuffleGrouping(SpeakerEncoder.BOLT_ID);
}
private void northboundEncoder(TopologyBuilder topology) {
NorthboundEncoder bolt = new NorthboundEncoder();
declareBolt(topology, bolt, NorthboundEncoder.BOLT_ID)
.shuffleGrouping(FlowFetcher.BOLT_ID, FlowFetcher.STREAM_ON_DEMAND_RESPONSE_ID)
.shuffleGrouping(OnDemandResultManager.BOLT_ID);
KafkaBolt output = buildKafkaBolt(topologyConfig.getKafkaNorthboundTopic());
declareBolt(topology, output, ComponentId.NORTHBOUND_OUTPUT.toString())
.shuffleGrouping(NorthboundEncoder.BOLT_ID);
}
/**
* Topology entry point.
*/
public static void main(String[] args) {
try {
LaunchEnvironment env = new LaunchEnvironment(args);
(new PingTopology(env)).setup();
} catch (Exception e) {
System.exit(handleLaunchException(e));
}
}
}
|
package com.jsoniter.benchmark.with_10_int_fields;
import com.jsoniter.benchmark.All;
import org.apache.thrift.TException;
import org.apache.thrift.TSerializer;
import org.apache.thrift.protocol.TCompactProtocol;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.transport.TIOStreamTransport;
import org.junit.Test;
import org.openjdk.jmh.Main;
import org.openjdk.jmh.annotations.*;
import org.openjdk.jmh.infra.BenchmarkParams;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.RunnerException;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
/*
Benchmark Mode Cnt Score Error Units
SerThrift.ser thrpt 5 2128221.323 ± 117657.295 ops/s
*/
@State(Scope.Thread)
public class SerThrift {
private ThriftTestObject testObject;
private ByteArrayOutputStream byteArrayOutputStream;
private TProtocol protocol;
@Setup(Level.Trial)
public void benchSetup(BenchmarkParams params) throws TException {
testObject = new ThriftTestObject();
testObject.setField1(31415926);
testObject.setField2(61415923);
testObject.setField3(31415269);
testObject.setField4(53141926);
testObject.setField5(13145926);
testObject.setField6(43115926);
testObject.setField7(31419265);
testObject.setField8(23141596);
testObject.setField9(43161592);
testObject.setField10(112);
byteArrayOutputStream = new ByteArrayOutputStream();
TCompactProtocol.Factory protocolFactory = new TCompactProtocol.Factory();
protocol = protocolFactory.getProtocol(new TIOStreamTransport(byteArrayOutputStream));
}
@Test
public void test() throws TException {
byte[] output = new TSerializer(new TCompactProtocol.Factory()).serialize(testObject);
System.out.println(output.length);
}
@Benchmark
public void ser(Blackhole bh) throws TException {
byteArrayOutputStream.reset();
testObject.write(protocol);
bh.consume(byteArrayOutputStream);
}
public static void main(String[] args) throws IOException, RunnerException {
All.loadJMH();
Main.main(new String[]{
"with_10_int_fields.SerThrift",
"-i", "5",
"-wi", "5",
"-f", "1",
});
}
}
|
/**
* Copyright (c) 2014, impossibl.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of impossibl.com nor the names of its contributors may
* be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.impossibl.postgres.jdbc.xa;
import com.impossibl.postgres.api.jdbc.PGConnection;
import com.impossibl.postgres.jdbc.PGPooledConnectionDelegator;
import com.impossibl.postgres.jdbc.PGSQLSimpleException;
import java.sql.SQLException;
import java.sql.Savepoint;
/**
* Connection handle for PGXAConnection
* @author <a href="mailto:jesper.pedersen@redhat.com">Jesper Pedersen</a>
*/
public class PGXAConnectionDelegator extends PGPooledConnectionDelegator {
private PGXAConnection owner;
/**
* Constructor
* @param owner The owner
* @param delegator The delegator
*/
public PGXAConnectionDelegator(PGXAConnection owner, PGConnection delegator) {
super(owner, delegator);
this.owner = owner;
}
/**
* {@inheritDoc}
*/
@Override
public void commit() throws SQLException {
if (owner.getState() != PGXAConnection.STATE_IDLE) {
SQLException se = new PGSQLSimpleException("commit not allowed", "55000");
owner.fireConnectionError(se);
throw se;
}
super.commit();
}
/**
* {@inheritDoc}
*/
@Override
public void rollback() throws SQLException {
if (owner.getState() != PGXAConnection.STATE_IDLE) {
SQLException se = new PGSQLSimpleException("rollback not allowed", "55000");
owner.fireConnectionError(se);
throw se;
}
super.rollback();
}
/**
* {@inheritDoc}
*/
@Override
public void rollback(Savepoint savepoint) throws SQLException {
if (owner.getState() != PGXAConnection.STATE_IDLE) {
SQLException se = new PGSQLSimpleException("rollback(Savepoint) not allowed", "55000");
owner.fireConnectionError(se);
throw se;
}
super.rollback(savepoint);
}
/**
* {@inheritDoc}
*/
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
if (owner.getState() != PGXAConnection.STATE_IDLE && autoCommit) {
SQLException se = new PGSQLSimpleException("setAutoCommit(true) not allowed", "55000");
owner.fireConnectionError(se);
throw se;
}
super.setAutoCommit(autoCommit);
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return super.hashCode();
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || !(o instanceof PGXAConnectionDelegator))
return false;
PGXAConnectionDelegator other = (PGXAConnectionDelegator)o;
return owner.equals(other.owner);
}
}
|
package com.lukedeighton.typedadapter.adapter;
import java.util.HashMap;
import java.util.List;
/**
* Handles View Type Information for each Row Item
*/
class ViewTypeManager {
public static final int DYNAMIC_COUNT = -1;
private HashMap<Class<?>, Integer> mViewTypeMap;
private int mUsedViewTypeCount;
private final int mViewTypeCount;
public ViewTypeManager(List<?> objs) {
this(objs, DYNAMIC_COUNT);
}
public ViewTypeManager(List<?> objs, int viewTypeCount) {
mViewTypeMap = new HashMap<Class<?>, Integer>();
int usedViewTypeCount = updateViewTypeInfoInternal(objs);
if (viewTypeCount < 1) {
mViewTypeCount = usedViewTypeCount;
} else {
checkViewTypeCount(viewTypeCount);
mViewTypeCount = viewTypeCount;
}
}
//ViewTypeCount is only called once, therefore notifyDataSetChange can't dynamically change
//the ViewTypeCount - In that case you will have to create a new Adapter or follow the solution
//from here:
//http://stackoverflow.com/questions/13000708/dynamically-update-view-types-of-listview
private void checkViewTypeCount(int viewTypeCount) {
if (mUsedViewTypeCount > viewTypeCount) {
throw new IllegalStateException("Cannot dynamically add more ViewTypes: "
+ viewTypeCount + " current ViewTypeCount, requires " + mUsedViewTypeCount);
}
}
private int updateViewTypeInfoInternal(List<?> objs) {
int viewTypeCount = mUsedViewTypeCount;
for (Object obj : objs) {
Class<?> objClazz = obj.getClass();
if (mViewTypeMap.get(objClazz) == null) {
mViewTypeMap.put(objClazz, viewTypeCount++);
}
}
mUsedViewTypeCount = viewTypeCount;
return viewTypeCount;
}
public void updateViewTypeInfo(List<?> objs) {
updateViewTypeInfoInternal(objs);
checkViewTypeCount(mViewTypeCount);
}
public int getItemViewType(Object obj) {
return mUsedViewTypeCount < 2 ? 0 : mViewTypeMap.get(obj.getClass());
}
public int getViewTypeCount() {
return mViewTypeCount;
}
}
|
package com.jimmy.Util;
import java.util.*;
import java.util.regex.*;
/**
* ���Ǹ�������ʽӦ���࣬����ƥ����滻�ִ��õ�
* @author
* @version
*/
public class RegExUtil {
/**
* Ҫ���Сд��ƥ��������ʽ
* @param pattern ������ʽģʽ
* @param str Ҫƥ����ִ�
* @return booleanֵ
* @since 1.0
*/
public static final boolean ereg(String pattern, String str) throws PatternSyntaxException
{
try
{
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(str);
return m.find();
}
catch (PatternSyntaxException e)
{
throw e;
}
}
/**
* ƥ�����滻�ִ�
* @param pattern ������ʽģʽ
* @param newstr Ҫ�滻ƥ�䵽�����ִ�
* @param str ԭʼ�ִ�
* @return ƥ�����ַ���
* @since 1.0
*/
public static final String ereg_replace(String pattern, String newstr, String str) throws PatternSyntaxException
{
try
{
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(str);
return m.replaceAll(newstr);
}
catch (PatternSyntaxException e)
{
throw e;
}
}
/**
* ��Ҫ����ģ����ģ���Ƿ������� �Ѳ��ҵ���Ԫ�ؼӵ�vector��
* @param patternΪ������ʽģʽ
* @param str ԭʼ�ִ�
* @return vector
* @since 1.0
*/
public static final Vector splitTags2Vector(String pattern, String str) throws PatternSyntaxException
{
Vector vector = new Vector();
try {
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(str);
while (m.find())
{
vector.add(ereg_replace("(\\[\\#)|(\\#\\])", "", m.group()));
}
return vector;
}
catch (PatternSyntaxException e) {
throw e;
}
}
/**
* ģ���Ƿ�������
* ������Ҫ�ǰѲ��ҵ���Ԫ�ؼӵ�vector��
* @param patternΪ������ʽģʽ
* @param str ԭʼ�ִ�
* @since 1.0
*/
public static final String[] splitTags(String pattern, String str)
{
try {
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(str);
String[] array = new String[m.groupCount()];
int i = 0;
while (m.find())
{
array[i] = ereg_replace("(\\[\\#)|(\\#\\])", "", m.group());
i++;
}
return array;
}
catch (PatternSyntaxException e) {
throw e;
}
}
/**
* ƥ�����з���ģʽҪ����ִ����ӵ�ʸ��vector������
* @param patternΪ������ʽģʽ
* @param str ԭʼ�ִ�
* @return vector
* @since 1.0
*/
public static final Vector regMatchAll2Vector(String pattern, String str) throws PatternSyntaxException
{
Vector vector = new Vector();
try
{
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(str);
while (m.find())
{
vector.add(m.group());
}
return vector;
}
catch (PatternSyntaxException e)
{
throw e;
}
}
/**
* ƥ�����з���ģʽҪ����ִ����ӵ��ַ���������
* @param patternΪ������ʽģʽ
* @param str ԭʼ�ִ�
* @return array
* @since 1.0
*/
public static final String[] regMatchAll2Array(String pattern, String str) throws PatternSyntaxException
{
try
{
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(str);
String[] array = new String[m.groupCount()];
int i = 0;
while (m.find())
{
array[i] = m.group();
i++;
}
return array;
}
catch (PatternSyntaxException e)
{
throw e;
}
}
/**
* ת��������ʽ�ַ�(֮������Ҫ��\��$�ַ���escapeDollarBackslash�����ķ�ʽ����Ϊ��repalceAll�Dz��еģ�������"$".repalceAll("\\$","\\\\$")��ᷢ��������ûᵼ������Խ�����)
* @param patternΪ������ʽģʽ
* @param str ԭʼ�ִ�
* @return array
* @since 1.0
*/
public static String escapeDollarBackslash(String original) {
StringBuffer buffer=new StringBuffer(original.length());
for (int i=0;i<original.length();i++) {
char c=original.charAt(i);
if (c=='\\'||c=='$') {
buffer.append("\\").append(c);
} else{
buffer.append(c);
}
}
return buffer.toString();
}
/**
* ��ȡָ���ִ��ĺ���
* ������Ҫ�ǰѲ��ҵ���Ԫ��
* @param patternΪ������ʽģʽ
* @param str ԭʼ�ִ�
* @since 1.0
*/
public static final String fetchStr(String pattern, String str) {
String returnValue = null;
try {
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(str);
while (m.find()) {
returnValue = m.group();
}
return returnValue;
} catch (PatternSyntaxException e) {
return returnValue;
}
}
}
|
package be.aga.dominionSimulator.cards;
import be.aga.dominionSimulator.DomCard;
import be.aga.dominionSimulator.enums.DomCardName;
import be.aga.dominionSimulator.enums.DomCardType;
import java.util.ArrayList;
public class Trusty_SteedCard extends DomCard {
public Trusty_SteedCard () {
super( DomCardName.Trusty_Steed);
}
public void play() {
if (owner.isHumanOrPossessedByHuman()) {
handleHuman();
return;
}
if (owner.getActionsAndVillagersLeft()>1) {
owner.addAvailableCoins(2);
} else {
owner.addActions(2);
}
owner.drawCards(2);
}
private void handleHuman() {
ArrayList<String> theOptions = new ArrayList<String>();
theOptions.add("+2 Actions/+2 Cards");
theOptions.add("+2 Actions/+$2");
theOptions.add("+2 Actions/+4 Silvers");
theOptions.add("+2 Cards/+4 Silvers");
theOptions.add("+2 Cards/+$2");
theOptions.add("+$2/+$4 Silvers");
int theChoice = owner.getEngine().getGameFrame().askToSelectOption("Select for Trusty Steed", theOptions, "Mandatory!");
if (theChoice==0) {
owner.addActions(2);
owner.drawCards(2);
}
if (theChoice==1) {
owner.addActions(2);
owner.addAvailableCoins(2);
}
if (theChoice==2) {
owner.addActions(2);
for (int i=0;i<4;i++)
owner.gain(DomCardName.Silver);
owner.putDeckInDiscard();
}
if (theChoice==3) {
for (int i=0;i<4;i++)
owner.gain(DomCardName.Silver);
owner.putDeckInDiscard();
owner.drawCards(2);
}
if (theChoice==4) {
owner.addAvailableCoins(2);
owner.drawCards(2);
}
if (theChoice==5) {
owner.addAvailableCoins(2);
for (int i=0;i<4;i++)
owner.gain(DomCardName.Silver);
owner.putDeckInDiscard();
}
}
@Override
public boolean hasCardType(DomCardType aType) {
if (aType==DomCardType.Treasure && owner != null && owner.hasBuiltProject(DomCardName.Capitalism))
return true;
return super.hasCardType(aType);
}
}
|
package net.minecraft.scoreboard;
import java.util.Comparator;
public class Score {
public static final Comparator<Score> SCORE_COMPARATOR = new Comparator<Score>() {
public int compare(Score p_compare_1_, Score p_compare_2_) {
if (p_compare_1_.getScorePoints() > p_compare_2_.getScorePoints()) {
return 1;
} else {
return p_compare_1_.getScorePoints() < p_compare_2_.getScorePoints() ? -1 : p_compare_2_.getPlayerName().compareToIgnoreCase(p_compare_1_.getPlayerName());
}
}
};
private final Scoreboard scoreboard;
private final ScoreObjective objective;
private final String scorePlayerName;
private int scorePoints;
private boolean locked;
private boolean forceUpdate;
public Score(Scoreboard scoreboard, ScoreObjective objective, String playerName) {
this.scoreboard = scoreboard;
this.objective = objective;
this.scorePlayerName = playerName;
this.forceUpdate = true;
}
public void increaseScore(int amount) {
if (this.objective.getCriteria().isReadOnly()) {
throw new IllegalStateException("Cannot modify read-only score");
} else {
this.setScorePoints(this.getScorePoints() + amount);
}
}
public void decreaseScore(int amount) {
if (this.objective.getCriteria().isReadOnly()) {
throw new IllegalStateException("Cannot modify read-only score");
} else {
this.setScorePoints(this.getScorePoints() - amount);
}
}
public void incrementScore() {
if (this.objective.getCriteria().isReadOnly()) {
throw new IllegalStateException("Cannot modify read-only score");
} else {
this.increaseScore(1);
}
}
public int getScorePoints() {
return this.scorePoints;
}
public void setScorePoints(int points) {
int i = this.scorePoints;
this.scorePoints = points;
if (i != points || this.forceUpdate) {
this.forceUpdate = false;
this.getScoreScoreboard().onScoreUpdated(this);
}
}
public ScoreObjective getObjective() {
return this.objective;
}
public String getPlayerName() {
return this.scorePlayerName;
}
public Scoreboard getScoreScoreboard() {
return this.scoreboard;
}
public boolean isLocked() {
return this.locked;
}
public void setLocked(boolean locked) {
this.locked = locked;
}
}
|
/*
* Copyright: (c) Mayo Foundation for Medical Education and
* Research (MFMER). All rights reserved. MAYO, MAYO CLINIC, and the
* triple-shield Mayo logo are trademarks and service marks of MFMER.
*
* Distributed under the OSI-approved BSD 3-Clause License.
* See http://ncip.github.com/lexevs-remote/LICENSE.txt for details.
*/
package org.LexGrid.LexBIG.caCore.applicationservice.resource;
import java.lang.reflect.Method;
import java.util.Map;
import java.util.UUID;
import org.LexGrid.LexBIG.caCore.utils.LexEVSCaCoreUtils;
import org.LexGrid.annotations.LgClientSideSafe;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
public class RemoteResourceManager {
private Map<String,Object> resourceMap = new TimedMap<String,Object>();
private boolean enableRemoteShell;
public Object getResource(String uuid){
Object resource = this.resourceMap.get(uuid);
return resource;
}
public Object replaceWithShell(Object result) {
if(enableRemoteShell
&&
!(result instanceof RemoteShell)
&&
LexEVSCaCoreUtils.isLexBigClass(result.getClass())
&&
!result.getClass().isAnnotationPresent(LgClientSideSafe.class)
&&
!doMethodsContainClientSideSafeAnnotation(result.getClass())
){
Class<?>[] classes = ClassUtils.getAllInterfaces(result);
if(classes.length > 0){
String resourceUuid = UUID.randomUUID().toString();
resourceMap.put(resourceUuid, result);
RemoteShell shell = new RemoteShell(classes, result.getClass(), resourceUuid);
result = shell;
}
}
return result;
}
public Object unWrapShell(Object obj) {
if(obj instanceof RemoteShell){
RemoteShell shell = (RemoteShell)obj;
Object resource = this.getResource(shell.getResourceUuid());
if(resource == null){
throw new RuntimeException("Remote Resource has timed out on the Server -- please re-execute your query.");
}
return resource;
}
return obj;
}
private boolean doMethodsContainClientSideSafeAnnotation(Class<?> clazz){
for(Method method : clazz.getMethods()){
if(method.isAnnotationPresent(LgClientSideSafe.class)){
LgClientSideSafe css = method.getAnnotation(LgClientSideSafe.class);
if(css.force()){
return true;
}
}
}
return false;
}
public void setResourceMap(Map<String,Object> resourceMap) {
this.resourceMap = resourceMap;
}
public Map<String,Object> getResourceMap() {
return resourceMap;
}
public void setEnableRemoteShell(boolean enableRemoteShell) {
this.enableRemoteShell = enableRemoteShell;
}
public boolean isEnableRemoteShell() {
return enableRemoteShell;
}
}
|
package com.welearn.mapper;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.CachePut;
import org.springframework.cache.annotation.Cacheable;
import com.welearn.entity.po.common.Area;
import com.welearn.entity.qo.common.AreaQueryCondition;
import org.apache.ibatis.annotations.Mapper;
/**
* Area Mapper Interface : ryme_common : area
* @author Setsuna Jin Generate By CodeSmith 7.0 At 2019/5/6 9:48:57
* @see com.welearn.entity.po.common.Area
*/
@Mapper
public interface AreaMapper extends BaseMapper<Area, AreaQueryCondition> {
// --------------------------------------------------------------------------------------------
/**
* 按主键查询数据
* @param id entity Id
* @return 实体 对象
*/
@Override
// @Cacheable(value = "areaMapper", key = "'area:'+#id", unless = "#result == null")
Area selectByPK(String id);
/**
* 更新数据(全部)
* @param entity 实体
* @return 操作影响行数
*/
@Override
// @CachePut(value = "areaMapper", key = "'area:'+#entity.id", unless = "#entity.id == null")
int updateByPK(Area entity);
/**
* 更新数据(选择)
* @param entity 实体
* @return 操作影响行数
*/
@Override
// @CachePut(value = "areaMapper", key = "'area:'+#entity.id", unless = "#entity.id == null")
int updateByPKSelective(Area entity);
/**
* 标记数据可用
* @param id 主键
* @return 操作影响行数
*/
@Override
// @CacheEvict(value = "areaMapper", key = "'area:'+#id")
int enable(String id);
/**
* 标记数据不可用
* @param id 主键
* @return 操作影响行数
*/
@Override
// @CacheEvict(value = "areaMapper", key = "'area:'+#id")
int disable(String id);
/**
* 根据主键删除
* @param id entityId
* @return 操作影响行数
*/
@Override
// @CacheEvict(value = "areaMapper", key = "'area:'+#id")
int deleteByPK(String id);
/**
* 删除全部数据
* @return 操作影响行数
*/
@Override
// @CacheEvict(value = "areaMapper", allEntries = true)
int deleteAll();
// --------------------------------------------------------------------------------------------
}
|
package org.karthikkumar.dbcm.conn;
import javax.sql.DataSource;
/**
* DataSource based ConnectorFactory.
*
* @author Karthik Kumar Viswanathan <karthikkumar@gmail.com>
*
*/
public interface DataSourceConnectorFactory extends ConnectorFactory {
public DataSource getDataSource();
}
|
package cz.tomasdvorak.eet.client.utils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.slf4j.Logger;
import java.io.IOException;
import java.io.InputStream;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.CertificateException;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
import java.util.Date;
import java.util.concurrent.TimeUnit;
public class CertExpirationCheckerTest {
@Test
public void fromKeystoreExpiresSoon() throws Exception {
Logger mock = Mockito.mock(Logger.class);
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
final KeyStore keystore = getKeyStore("/keys/CZ1212121218.p12");
Date now = DateUtils.parse("2019-09-12T11:02:44+02:00");
CertExpirationChecker.of(keystore, "1")
.withCompareAgainstDate(now)
.whenExpiresIn(20, TimeUnit.DAYS)
.printWarningTo(mock);
final String expected = "\n" +
"#### WARNING ####\n" +
"Following certificate expires on 2019-09-30T11:02:44+02:00!\n" +
"{subject='OID.2.5.4.13=fyzicka osoba, CN=CZ1212121218, DC=CZ', issuer='CN=EET CA 1 Playground, O=Česká Republika – Generální finanční ředitelství, DC=CZ', SerialNumber=1446418224, validFrom=2016-09-30T11:02:44+02:00, validTo=2019-09-30T11:02:44+02:00}\n" +
"Please update your certificate as soon as possible. More info on https://github.com/todvora/eet-client#certificate-expiration\n" +
"##################";
Mockito.verify(mock, Mockito.times(1)).warn(captor.capture());
Assert.assertEquals(expected, captor.getValue());
}
@Test
public void fromKeystoreNotExpired() throws Exception {
Logger mock = Mockito.mock(Logger.class);
final KeyStore keystore = getKeyStore("/keys/CZ1212121218.p12");
Date now = DateUtils.parse("2019-09-12T11:02:44+02:00");
CertExpirationChecker.of(keystore, "1")
.withCompareAgainstDate(now)
.whenExpiresIn(15, TimeUnit.DAYS)
.printWarningTo(mock);
Mockito.verify(mock, Mockito.never()).warn(Mockito.anyString());
}
private KeyStore getKeyStore(String name) throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException {
final KeyStore keystore = KeyStore.getInstance("pkcs12");
final InputStream inputStream = getClass().getResourceAsStream(name);
keystore.load(inputStream, "eet".toCharArray());
inputStream.close();
return keystore;
}
@Test
public void fromCertificateExpiresSoon() throws Exception {
Logger mock = Mockito.mock(Logger.class);
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
final X509Certificate serverCertificate = getCertificate("/certificates/2qca16_rsa.der");
Date now = DateUtils.parse("2026-02-01T13:17:11+01:00");
CertExpirationChecker.of(serverCertificate)
.withCompareAgainstDate(now)
.whenExpiresIn(20, TimeUnit.DAYS)
.printWarningTo(mock);
final String expected = "\n#### WARNING ####\n" +
"Following certificate expires on 2026-02-08T13:17:11+01:00!\n" +
"{subject='SERIALNUMBER=NTRCZ-26439395, O=\"První certifikační autorita, a.s.\", CN=I.CA Qualified 2 CA/RSA 02/2016, C=CZ', issuer='SERIALNUMBER=NTRCZ-26439395, CN=I.CA Root CA/RSA, O=\"První certifikační autorita, a.s.\", C=CZ', SerialNumber=100001006, validFrom=2016-02-11T13:17:11+01:00, validTo=2026-02-08T13:17:11+01:00}\n" +
"Please update your certificate as soon as possible. More info on https://github.com/todvora/eet-client#certificate-expiration\n" +
"##################";
Mockito.verify(mock, Mockito.times(1)).warn(captor.capture());
Assert.assertEquals(expected, captor.getValue());
}
@Test
public void fromCertificateNotExpired() throws Exception {
Logger mock = Mockito.mock(Logger.class);
final X509Certificate serverCertificate = getCertificate("/certificates/2qca16_rsa.der");
Date now = DateUtils.parse("2019-08-15T07:00:00+02:00");
CertExpirationChecker.of(serverCertificate)
.withCompareAgainstDate(now)
.whenExpiresIn(15, TimeUnit.DAYS)
.printWarningTo(mock);
Mockito.verify(mock, Mockito.never()).warn(Mockito.anyString());
}
private X509Certificate getCertificate(final String path) throws CertificateException {
final InputStream is = getClass().getResourceAsStream(path);
final CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
return (X509Certificate) certificateFactory.generateCertificate(is);
}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.storage.generated;
import com.azure.core.util.Context;
/** Samples for ObjectReplicationPoliciesOperation Get. */
public final class ObjectReplicationPoliciesOperationGetSamples {
/*
* x-ms-original-file: specification/storage/resource-manager/Microsoft.Storage/stable/2021-09-01/examples/StorageAccountGetObjectReplicationPolicy.json
*/
/**
* Sample code: StorageAccountGetObjectReplicationPolicies.
*
* @param azure The entry point for accessing resource management APIs in Azure.
*/
public static void storageAccountGetObjectReplicationPolicies(
com.azure.resourcemanager.AzureResourceManager azure) {
azure
.storageAccounts()
.manager()
.serviceClient()
.getObjectReplicationPoliciesOperations()
.getWithResponse("res6977", "sto2527", "{objectReplicationPolicy-Id}", Context.NONE);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shardingsphere.core.rewrite.token;
import com.google.common.base.Optional;
import org.apache.shardingsphere.core.optimize.encrypt.segment.condition.EncryptCondition;
import org.apache.shardingsphere.core.optimize.sharding.segment.condition.ShardingCondition;
import org.apache.shardingsphere.core.optimize.sharding.segment.select.groupby.GroupBy;
import org.apache.shardingsphere.core.optimize.sharding.segment.select.item.SelectItem;
import org.apache.shardingsphere.core.optimize.sharding.segment.select.item.SelectItems;
import org.apache.shardingsphere.core.optimize.sharding.segment.select.orderby.OrderBy;
import org.apache.shardingsphere.core.optimize.sharding.segment.select.orderby.OrderByItem;
import org.apache.shardingsphere.core.optimize.sharding.segment.select.pagination.Pagination;
import org.apache.shardingsphere.core.optimize.sharding.statement.dml.ShardingSelectOptimizedStatement;
import org.apache.shardingsphere.core.parse.sql.segment.dml.item.AggregationDistinctSelectItemSegment;
import org.apache.shardingsphere.core.parse.sql.segment.dml.item.SelectItemSegment;
import org.apache.shardingsphere.core.parse.sql.segment.dml.item.SelectItemsSegment;
import org.apache.shardingsphere.core.parse.sql.statement.dml.SelectStatement;
import org.apache.shardingsphere.core.rewrite.token.pojo.SQLToken;
import org.apache.shardingsphere.core.rewrite.token.pojo.SelectItemPrefixToken;
import org.apache.shardingsphere.core.rule.EncryptRule;
import org.apache.shardingsphere.core.rule.ShardingRule;
import org.hamcrest.CoreMatchers;
import org.junit.Before;
import org.junit.Test;
import java.util.Collections;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public final class SQLTokenGenerateEngineTest {
private SQLTokenGenerateEngine shardingTokenGenerateEngine = new ShardingTokenGenerateEngine();
private SQLTokenGenerateEngine baseTokenGenerateEngine = new BaseTokenGenerateEngine();
private SQLTokenGenerateEngine encryptTokenGenerateEngine = new EncryptTokenGenerateEngine();
private ShardingSelectOptimizedStatement optimizedStatement;
@Before
public void setUp() {
SelectStatement selectStatement = new SelectStatement();
selectStatement.getAllSQLSegments().add(createSelectItemsSegment());
optimizedStatement = new ShardingSelectOptimizedStatement(selectStatement, Collections.<ShardingCondition>emptyList(), Collections.<EncryptCondition>emptyList(),
new GroupBy(Collections.<OrderByItem>emptyList(), 1), new OrderBy(Collections.<OrderByItem>emptyList(), false),
new SelectItems(Collections.<SelectItem>emptyList(), false, 0), new Pagination(null, null, Collections.emptyList()));
}
private SelectItemsSegment createSelectItemsSegment() {
SelectItemsSegment selectItemsSegment = mock(SelectItemsSegment.class);
when(selectItemsSegment.getStartIndex()).thenReturn(1);
when(selectItemsSegment.getSelectItems()).thenReturn(Collections.<SelectItemSegment>emptyList());
AggregationDistinctSelectItemSegment distinctSelectItemSegment = mock(AggregationDistinctSelectItemSegment.class);
when(distinctSelectItemSegment.getDistinctExpression()).thenReturn("COUNT(DISTINCT id)");
when(distinctSelectItemSegment.getAlias()).thenReturn(Optional.of("c"));
when(distinctSelectItemSegment.getStartIndex()).thenReturn(1);
when(distinctSelectItemSegment.getStopIndex()).thenReturn(2);
when(selectItemsSegment.findSelectItemSegments(AggregationDistinctSelectItemSegment.class)).thenReturn(Collections.singletonList(distinctSelectItemSegment));
return selectItemsSegment;
}
@SuppressWarnings("unchecked")
@Test
public void assertGenerateSQLTokensWithBaseTokenGenerateEngine() {
List<SQLToken> actual = baseTokenGenerateEngine.generateSQLTokens(optimizedStatement, null, mock(ShardingRule.class), true, false);
assertThat(actual.size(), is(0));
}
@SuppressWarnings("unchecked")
@Test
public void assertGetSQLTokenGeneratorsWithShardingTokenGenerateEngineWithoutSingleRoute() {
List<SQLToken> actual = shardingTokenGenerateEngine.generateSQLTokens(optimizedStatement, null, mock(ShardingRule.class), false, false);
assertThat(actual.size(), is(2));
assertThat(actual.get(0), CoreMatchers.<SQLToken>instanceOf(SelectItemPrefixToken.class));
}
@SuppressWarnings("unchecked")
@Test
public void assertGetSQLTokenGeneratorsWithShardingTokenGenerateEngineWithSingleRoute() {
List<SQLToken> actual = shardingTokenGenerateEngine.generateSQLTokens(optimizedStatement, null, mock(ShardingRule.class), true, false);
assertThat(actual.size(), is(0));
}
@SuppressWarnings("unchecked")
@Test
public void assertGenerateSQLTokensWithEncryptTokenGenerateEngine() {
List<SQLToken> actual = encryptTokenGenerateEngine.generateSQLTokens(optimizedStatement, null, mock(EncryptRule.class), true, false);
assertThat(actual.size(), is(0));
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mojo;
import org.apache.avro.generic.GenericData.StringType;
import java.io.File;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.compiler.specific.SpecificCompiler;
/**
* Generate Java classes from Avro schema files (.avsc)
*
* @goal schema
* @phase generate-sources
*/
public class SchemaMojo extends AbstractAvroMojo {
/**
* A set of Ant-like inclusion patterns used to select files from the source
* directory for processing. By default, the pattern
* <code>**/*.avsc</code> is used to select grammar files.
*
* @parameter
*/
private String[] includes = new String[] { "**/*.avsc" };
/**
* A set of Ant-like inclusion patterns used to select files from the source
* directory for processing. By default, the pattern
* <code>**/*.avsc</code> is used to select grammar files.
*
* @parameter
*/
private String[] testIncludes = new String[] { "**/*.avsc" };
@Override
protected void doCompile(String filename, File sourceDirectory, File outputDirectory) throws IOException {
File src = new File(sourceDirectory, filename);
Schema.Parser parser = new Schema.Parser();
Schema schema = parser.parse(src);
SpecificCompiler compiler = new SpecificCompiler(schema);
compiler.setStringType(StringType.valueOf(stringType));
compiler.compileToDestination(src, outputDirectory);
}
@Override
protected String[] getIncludes() {
return includes;
}
@Override
protected String[] getTestIncludes() {
return testIncludes;
}
}
|
package pl.digitalzombielab.dayview;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Example local unit test, which will execute on the development machine (host).
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() throws Exception {
assertEquals(4, 2 + 2);
}
}
|
/*
* Copyright (c) 2004-2021, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.hisp.dhis.commons.jsonfiltering.config;
import java.io.InputStream;
import java.net.URL;
import java.util.Map;
import java.util.Properties;
import java.util.SortedMap;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import lombok.SneakyThrows;
import org.hisp.dhis.commons.jsonfiltering.bean.BeanInfoIntrospector;
import org.hisp.dhis.commons.jsonfiltering.filter.JsonFilteringPropertyFilter;
import org.hisp.dhis.commons.jsonfiltering.parser.JsonFilteringParser;
import org.hisp.dhis.commons.jsonfiltering.view.PropertyView;
import com.google.common.cache.CacheBuilderSpec;
import com.google.common.collect.ImmutableSortedMap;
import com.google.common.collect.Maps;
/**
* Provides access to various configuration values that the JsonFiltering
* library uses.
* <p>
* Users can override the default configuration by putting a
* json-filtering.properties in their classpath.
*/
@NoArgsConstructor( access = AccessLevel.PRIVATE )
public class JsonFilteringConfig
{
private static final SortedMap<String, String> PROPS_MAP;
private static final boolean FILTER_IMPLICITLY_INCLUDE_BASE_FIELDS;
private static final boolean FILTER_IMPLICITLY_INCLUDE_BASE_FIELDS_IN_VIEW;
private static final CacheBuilderSpec FILTER_PATH_CACHE_SPEC;
private static final boolean FILTER_PROPAGATE_VIEW_TO_NESTED_FILTERS;
private static final CacheBuilderSpec PARSER_NODE_CACHE_SPEC;
private static final CacheBuilderSpec PROPERTY_DESCRIPTOR_CACHE_SPEC;
private static final boolean PROPERTY_ADD_NON_ANNOTATED_FIELDS_TO_BASE_VIEW;
static
{
Map<String, String> propsMap = Maps.newHashMap();
Map<String, String> sourceMap = Maps.newHashMap();
loadProps( propsMap, sourceMap, "json-filtering.default.properties" );
loadProps( propsMap, sourceMap, "json-filtering.properties" );
PROPS_MAP = ImmutableSortedMap.copyOf( propsMap );
FILTER_IMPLICITLY_INCLUDE_BASE_FIELDS = getBool( "filter.implicitlyIncludeBaseFields" );
FILTER_IMPLICITLY_INCLUDE_BASE_FIELDS_IN_VIEW = getBool( "filter.implicitlyIncludeBaseFieldsInView" );
FILTER_PATH_CACHE_SPEC = getCacheSpec( "filter.pathCache.spec" );
FILTER_PROPAGATE_VIEW_TO_NESTED_FILTERS = getBool( "filter.propagateViewToNestedFilters" );
PARSER_NODE_CACHE_SPEC = getCacheSpec( "parser.nodeCache.spec" );
PROPERTY_ADD_NON_ANNOTATED_FIELDS_TO_BASE_VIEW = getBool( "property.addNonAnnotatedFieldsToBaseView" );
PROPERTY_DESCRIPTOR_CACHE_SPEC = getCacheSpec( "property.descriptorCache.spec" );
}
private static CacheBuilderSpec getCacheSpec( String key )
{
String value = JsonFilteringConfig.PROPS_MAP.get( key );
if ( value == null )
{
value = "";
}
return CacheBuilderSpec.parse( value );
}
private static boolean getBool( String key )
{
return "true".equals( JsonFilteringConfig.PROPS_MAP.get( key ) );
}
@SneakyThrows
private static void loadProps( Map<String, String> propsMap, Map<String, String> sourceMap, String file )
{
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
URL url = classLoader.getResource( file );
if ( url == null )
{
return;
}
Properties fileProps = new Properties();
try ( InputStream inputStream = url.openStream() )
{
fileProps.load( inputStream );
}
for ( Map.Entry<Object, Object> entry : fileProps.entrySet() )
{
propsMap.put( entry.getKey().toString(), entry.getValue().toString() );
sourceMap.put( entry.getKey().toString(), url.toString() );
}
}
/**
* Determines whether or not to include base fields for nested objects
*
* @return true if includes, false if not
* @see PropertyView
*/
public static boolean isFILTER_IMPLICITLY_INCLUDE_BASE_FIELDS()
{
return FILTER_IMPLICITLY_INCLUDE_BASE_FIELDS;
}
/**
* Determines whether or not filters that specify a view also include "base"
* fields.
*
* @return true if includes, false if not
*/
public static boolean isFILTER_IMPLICITLY_INCLUDE_BASE_FIELDS_IN_VIEW()
{
return FILTER_IMPLICITLY_INCLUDE_BASE_FIELDS_IN_VIEW;
}
/**
* Get the {@link CacheBuilderSpec} of the path cache in the json-filtering
* filter.
*
* @return spec
* @see JsonFilteringPropertyFilter
*/
public static CacheBuilderSpec getFILTER_PATH_CACHE_SPEC()
{
return FILTER_PATH_CACHE_SPEC;
}
/**
* Determines whether or not filters that specify a view also propagates
* that view to nested filters.
* <p>
* For example, given a view called "full", does the full view also apply to
* the nested objects or does the nested object only include base fields.
*
* @return true if includes, false if not
*/
public static boolean isFILTER_PROPAGATE_VIEW_TO_NESTED_FILTERS()
{
return FILTER_PROPAGATE_VIEW_TO_NESTED_FILTERS;
}
/**
* Get the {@link CacheBuilderSpec} of the node cache in the json-filtering
* parser.
*
* @return spec
* @see JsonFilteringParser
*/
public static CacheBuilderSpec getPARSER_NODE_CACHE_SPEC()
{
return PARSER_NODE_CACHE_SPEC;
}
/**
* Determines whether or not non-annotated fields are added to the "base"
* view.
*
* @return true/false
* @see BeanInfoIntrospector
*/
public static boolean isPROPERTY_ADD_NON_ANNOTATED_FIELDS_TO_BASE_VIEW()
{
return PROPERTY_ADD_NON_ANNOTATED_FIELDS_TO_BASE_VIEW;
}
/**
* Get the {@link CacheBuilderSpec} of the descriptor cache in the property
* view introspector.
*
* @return spec
* @see BeanInfoIntrospector
*/
public static CacheBuilderSpec getPROPERTY_DESCRIPTOR_CACHE_SPEC()
{
return PROPERTY_DESCRIPTOR_CACHE_SPEC;
}
}
|
package io.quarkus.hibernate.search.orm.elasticsearch.test.singlepersistenceunit;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import org.hibernate.search.mapper.pojo.mapping.definition.annotation.FullTextField;
import org.hibernate.search.mapper.pojo.mapping.definition.annotation.Indexed;
@Entity
@Indexed
public class DefaultPUEntity {
@Id
@GeneratedValue
private Long id;
@FullTextField
private String text;
public DefaultPUEntity() {
}
public DefaultPUEntity(String text) {
this.text = text;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
}
|
package com.platform.utils;
import java.sql.Timestamp;
import java.util.Collection;
import java.util.Date;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* 名称:StringUtils <br>
* 描述:String工具类<br>
*
* @author admin
* @version 1.0
* @since 1.0.0
*/
public class StringUtils {
public static final String EMPTY = "";
private static Pattern linePattern = Pattern.compile("_(\\w)");
/**
* 判断字符串是否不为空,不为空则返回true
*
* @param str 源数据
* @return Boolean
*/
public static boolean isNotEmpty(String str) {
if (str != null && !"".equals(str.trim())) {
return true;
}
return false;
}
/**
* 判断对象或对象数组中每一个对象是否为空: 对象为null,字符序列长度为0,集合类、Map为empty
*
* @param obj
* @return
*/
public static boolean isNullOrEmpty(Object obj) {
if (obj == null)
return true;
if (obj instanceof CharSequence)
return ((CharSequence) obj).length() == 0;
if (obj instanceof Collection)
return ((Collection) obj).isEmpty();
if (obj instanceof Map)
return ((Map) obj).isEmpty();
if (obj instanceof Object[]) {
Object[] object = (Object[]) obj;
if (object.length == 0) {
return true;
}
boolean empty = true;
for (int i = 0; i < object.length; i++) {
if (!isNullOrEmpty(object[i])) {
empty = false;
break;
}
}
return empty;
}
return false;
}
/**
* 下划线转驼峰
*/
public static String lineToHump(String str) {
str = str.toLowerCase();
Matcher matcher = linePattern.matcher(str);
StringBuffer sb = new StringBuffer();
while (matcher.find()) {
matcher.appendReplacement(sb, matcher.group(1).toUpperCase());
}
matcher.appendTail(sb);
return sb.toString();
}
/**
* Object 对象转换成字符串
* @param obj
* @return
*/
public static String toStringByObject(Object obj){
return toStringByObject(obj,false,null);
}
/**
* Object 对象转换成字符串,并可以根据参数去掉两端空格
* @param obj
* @return
*/
public static String toStringByObject(Object obj, boolean isqdkg, String datatype){
if(obj==null){
return "";
}else{
if(isqdkg){
return obj.toString().trim();
}else{
//如果有设置时间格式类型,这转换
if(StringUtils.hasText(datatype)){
if(obj instanceof Timestamp){
return DateUtils.format((Timestamp)obj,datatype);
}else if(obj instanceof Date){
return DateUtils.format((Timestamp)obj,datatype);
}
}
return obj.toString();
}
}
}
public static boolean hasText(CharSequence str) {
if (!hasLength(str)) {
return false;
} else {
int strLen = str.length();
for(int i = 0; i < strLen; ++i) {
if (!Character.isWhitespace(str.charAt(i))) {
return true;
}
}
return false;
}
}
public static boolean hasLength(CharSequence str) {
return str != null && str.length() > 0;
}
public static boolean hasText(String str) {
return hasText((CharSequence)str);
}
public static int parseInt(Object str) {
return parseInt(str, 0);
}
public static int parseInt(Object str, int defaultValue) {
if (str == null || str.equals("")) {
return defaultValue;
}
String s = str.toString().trim();
if (!s.matches("-?\\d+")) {
return defaultValue;
}
return Integer.parseInt(s);
}
}
|
package com.GuoGuo.JuicyChat.stetho;
import android.content.Context;
import com.facebook.stetho.inspector.database.DatabaseFilesProvider;
import java.io.File;
import java.io.FilenameFilter;
import java.util.ArrayList;
import java.util.List;
/**
* Created by jiangecho on 2016/11/23.
*/
public class RongDatabaseFilesProvider implements DatabaseFilesProvider {
private Context context;
public RongDatabaseFilesProvider(Context context) {
this.context = context;
}
private static FilenameFilter rongDbFilenameFilter = new FilenameFilter() {
@Override
public boolean accept(File dir, String filename) {
return filename.equals("IMKitUserInfoCache")
|| filename.equals("SealUserInfo")
|| filename.equals("storage")
|| filename.endsWith(".db");
}
};
@Override
public List<File> getDatabaseFiles() {
List<File> dbFiles = new ArrayList<>();
File dir = context.getFilesDir();
dbFiles.addAll(listFiles(dir, rongDbFilenameFilter));
List<File> databaseFiles = new ArrayList<>();
for (String databaseName : context.databaseList()) {
databaseFiles.add(context.getDatabasePath(databaseName));
}
dbFiles.addAll(databaseFiles);
return dbFiles;
}
private List<File> listFiles(File dir, FilenameFilter fileNameFilter) {
if (dir == null || dir.isFile()) {
return null;
}
List<File> fileList = new ArrayList<>();
File[] files = dir.listFiles();
if (files != null) {
for (File file : files) {
if (file.isFile() && fileNameFilter.accept(dir, file.getName())) {
fileList.add(file);
} else {
List<File> tmp = listFiles(file, fileNameFilter);
if (tmp != null && tmp.size() > 0) {
fileList.addAll(tmp);
}
}
}
}
return fileList;
}
}
|
/*
* Copyright (c) 2017 The Hyve and respective contributors.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* See the file LICENSE in the root of this repository.
*/
package nl.thehyve.podium.validation;
import javax.validation.Constraint;
import javax.validation.Payload;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Target({ElementType.METHOD, ElementType.FIELD})
@Retention(RetentionPolicy.RUNTIME)
@Constraint(validatedBy = PasswordValidator.class)
public @interface ValidPassword {
String message() default "{podium.ValidPassword.message}";
Class<?>[] groups() default {};
Class<? extends Payload>[] payload() default {};
}
|
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.io.InterruptedIOException;
import java.net.InetSocketAddress;
import java.security.Key;
import java.security.KeyException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.NavigableSet;
import java.util.Set;
import java.util.SortedSet;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
/**
* A Store holds a column family in a Region. Its a memstore and a set of zero
* or more StoreFiles, which stretch backwards over time.
*
* <p>There's no reason to consider append-logging at this level; all logging
* and locking is handled at the HRegion level. Store just provides
* services to manage sets of StoreFiles. One of the most important of those
* services is compaction services where files are aggregated once they pass
* a configurable threshold.
*
* <p>The only thing having to do with logs that Store needs to deal with is
* the reconstructionLog. This is a segment of an HRegion's log that might
* NOT be present upon startup. If the param is NULL, there's nothing to do.
* If the param is non-NULL, we need to process the log to reconstruct
* a TreeMap that might not have been written to disk before the process
* died.
*
* <p>It's assumed that after this constructor returns, the reconstructionLog
* file will be deleted (by whoever has instantiated the Store).
*
* <p>Locking and transactions are handled at a higher level. This API should
* not be called directly but by an HRegion manager.
*/
@InterfaceAudience.Private
public class HStore implements Store {
public static final String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY =
"hbase.server.compactchecker.interval.multiplier";
public static final String BLOCKING_STOREFILES_KEY = "hbase.hstore.blockingStoreFiles";
public static final int DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER = 1000;
public static final int DEFAULT_BLOCKING_STOREFILE_COUNT = 7;
static final Log LOG = LogFactory.getLog(HStore.class);
protected final MemStore memstore;
// This stores directory in the filesystem.
private final HRegion region;
private final HColumnDescriptor family;
private final HRegionFileSystem fs;
private final Configuration conf;
private final CacheConfig cacheConf;
private long lastCompactSize = 0;
volatile boolean forceMajor = false;
/* how many bytes to write between status checks */
static int closeCheckInterval = 0;
private volatile long storeSize = 0L;
private volatile long totalUncompressedBytes = 0L;
/**
* RWLock for store operations.
* Locked in shared mode when the list of component stores is looked at:
* - all reads/writes to table data
* - checking for split
* Locked in exclusive mode when the list of component stores is modified:
* - closing
* - completing a compaction
*/
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private final boolean verifyBulkLoads;
private ScanInfo scanInfo;
final List<StoreFile> filesCompacting = Lists.newArrayList();
// All access must be synchronized.
private final Set<ChangedReadersObserver> changedReaderObservers =
Collections.newSetFromMap(new ConcurrentHashMap<ChangedReadersObserver, Boolean>());
private final int blocksize;
private HFileDataBlockEncoder dataBlockEncoder;
/** Checksum configuration */
private ChecksumType checksumType;
private int bytesPerChecksum;
// Comparing KeyValues
private final KeyValue.KVComparator comparator;
final StoreEngine<?, ?, ?, ?> storeEngine;
private static final AtomicBoolean offPeakCompactionTracker = new AtomicBoolean();
private final OffPeakHours offPeakHours;
private static final int DEFAULT_FLUSH_RETRIES_NUMBER = 10;
private int flushRetriesNumber;
private int pauseTime;
private long blockingFileCount;
private int compactionCheckMultiplier;
private Encryption.Context cryptoContext = Encryption.Context.NONE;
/**
* Constructor
* @param region
* @param family HColumnDescriptor for this column
* @param confParam configuration object
* failed. Can be null.
* @throws IOException
*/
protected HStore(final HRegion region, final HColumnDescriptor family,
final Configuration confParam) throws IOException {
HRegionInfo info = region.getRegionInfo();
this.fs = region.getRegionFileSystem();
// Assemble the store's home directory and Ensure it exists.
fs.createStoreDir(family.getNameAsString());
this.region = region;
this.family = family;
// 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
// CompoundConfiguration will look for keys in reverse order of addition, so we'd
// add global config first, then table and cf overrides, then cf metadata.
this.conf = new CompoundConfiguration()
.add(confParam)
.addStringMap(region.getTableDesc().getConfiguration())
.addStringMap(family.getConfiguration())
.addWritableMap(family.getValues());
this.blocksize = family.getBlocksize();
this.dataBlockEncoder =
new HFileDataBlockEncoderImpl(family.getDataBlockEncoding());
this.comparator = info.getComparator();
// used by ScanQueryMatcher
long timeToPurgeDeletes =
Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
LOG.trace("Time to purge deletes set to " + timeToPurgeDeletes +
"ms in store " + this);
// Get TTL
long ttl = determineTTLFromFamily(family);
// Why not just pass a HColumnDescriptor in here altogether? Even if have
// to clone it?
scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator);
this.memstore = new MemStore(conf, this.comparator);
this.offPeakHours = OffPeakHours.getInstance(conf);
// Setting up cache configuration for this family
this.cacheConf = new CacheConfig(conf, family);
this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false);
this.blockingFileCount =
conf.getInt(BLOCKING_STOREFILES_KEY, DEFAULT_BLOCKING_STOREFILE_COUNT);
this.compactionCheckMultiplier = conf.getInt(
COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY, DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER);
if (this.compactionCheckMultiplier <= 0) {
LOG.error("Compaction check period multiplier must be positive, setting default: "
+ DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER);
this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER;
}
if (HStore.closeCheckInterval == 0) {
HStore.closeCheckInterval = conf.getInt(
"hbase.hstore.close.check.interval", 10*1000*1000 /* 10 MB */);
}
this.storeEngine = StoreEngine.create(this, this.conf, this.comparator);
this.storeEngine.getStoreFileManager().loadFiles(loadStoreFiles());
// Initialize checksum type from name. The names are CRC32, CRC32C, etc.
this.checksumType = getChecksumType(conf);
// initilize bytes per checksum
this.bytesPerChecksum = getBytesPerChecksum(conf);
flushRetriesNumber = conf.getInt(
"hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER);
pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE);
if (flushRetriesNumber <= 0) {
throw new IllegalArgumentException(
"hbase.hstore.flush.retries.number must be > 0, not "
+ flushRetriesNumber);
}
// Crypto context for new store files
String cipherName = family.getEncryptionType();
if (cipherName != null) {
Cipher cipher;
Key key;
byte[] keyBytes = family.getEncryptionKey();
if (keyBytes != null) {
// Family provides specific key material
String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,
User.getCurrent().getShortName());
try {
// First try the master key
key = EncryptionUtil.unwrapKey(conf, masterKeyName, keyBytes);
} catch (KeyException e) {
// If the current master key fails to unwrap, try the alternate, if
// one is configured
if (LOG.isDebugEnabled()) {
LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'");
}
String alternateKeyName =
conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY);
if (alternateKeyName != null) {
try {
key = EncryptionUtil.unwrapKey(conf, alternateKeyName, keyBytes);
} catch (KeyException ex) {
throw new IOException(ex);
}
} else {
throw new IOException(e);
}
}
// Use the algorithm the key wants
cipher = Encryption.getCipher(conf, key.getAlgorithm());
if (cipher == null) {
throw new RuntimeException("Cipher '" + cipher + "' is not available");
}
// Fail if misconfigured
// We use the encryption type specified in the column schema as a sanity check on
// what the wrapped key is telling us
if (!cipher.getName().equalsIgnoreCase(cipherName)) {
throw new RuntimeException("Encryption for family '" + family.getNameAsString() +
"' configured with type '" + cipherName +
"' but key specifies algorithm '" + cipher.getName() + "'");
}
} else {
// Family does not provide key material, create a random key
cipher = Encryption.getCipher(conf, cipherName);
if (cipher == null) {
throw new RuntimeException("Cipher '" + cipher + "' is not available");
}
key = cipher.getRandomKey();
}
cryptoContext = Encryption.newContext(conf);
cryptoContext.setCipher(cipher);
cryptoContext.setKey(key);
}
}
/**
* @param family
* @return TTL in seconds of the specified family
*/
private static long determineTTLFromFamily(final HColumnDescriptor family) {
// HCD.getTimeToLive returns ttl in seconds. Convert to milliseconds.
long ttl = family.getTimeToLive();
if (ttl == HConstants.FOREVER) {
// Default is unlimited ttl.
ttl = Long.MAX_VALUE;
} else if (ttl == -1) {
ttl = Long.MAX_VALUE;
} else {
// Second -> ms adjust for user data
ttl *= 1000;
}
return ttl;
}
@Override
public String getColumnFamilyName() {
return this.family.getNameAsString();
}
@Override
public TableName getTableName() {
return this.getRegionInfo().getTable();
}
@Override
public FileSystem getFileSystem() {
return this.fs.getFileSystem();
}
public HRegionFileSystem getRegionFileSystem() {
return this.fs;
}
/* Implementation of StoreConfigInformation */
@Override
public long getStoreFileTtl() {
// TTL only applies if there's no MIN_VERSIONs setting on the column.
return (this.scanInfo.getMinVersions() == 0) ? this.scanInfo.getTtl() : Long.MAX_VALUE;
}
@Override
public long getMemstoreFlushSize() {
// TODO: Why is this in here? The flushsize of the region rather than the store? St.Ack
return this.region.memstoreFlushSize;
}
@Override
public long getFlushableSize() {
return this.memstore.getFlushableSize();
}
@Override
public long getCompactionCheckMultiplier() {
return this.compactionCheckMultiplier;
}
@Override
public long getBlockingFileCount() {
return blockingFileCount;
}
/* End implementation of StoreConfigInformation */
/**
* Returns the configured bytesPerChecksum value.
* @param conf The configuration
* @return The bytesPerChecksum that is set in the configuration
*/
public static int getBytesPerChecksum(Configuration conf) {
return conf.getInt(HConstants.BYTES_PER_CHECKSUM,
HFile.DEFAULT_BYTES_PER_CHECKSUM);
}
/**
* Returns the configured checksum algorithm.
* @param conf The configuration
* @return The checksum algorithm that is set in the configuration
*/
public static ChecksumType getChecksumType(Configuration conf) {
String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME);
if (checksumName == null) {
return HFile.DEFAULT_CHECKSUM_TYPE;
} else {
return ChecksumType.nameToType(checksumName);
}
}
/**
* @return how many bytes to write between status checks
*/
public static int getCloseCheckInterval() {
return closeCheckInterval;
}
@Override
public HColumnDescriptor getFamily() {
return this.family;
}
/**
* @return The maximum sequence id in all store files. Used for log replay.
*/
long getMaxSequenceId() {
return StoreFile.getMaxSequenceIdInList(this.getStorefiles());
}
@Override
public long getMaxMemstoreTS() {
return StoreFile.getMaxMemstoreTSInList(this.getStorefiles());
}
/**
* @param tabledir {@link Path} to where the table is being stored
* @param hri {@link HRegionInfo} for the region.
* @param family {@link HColumnDescriptor} describing the column family
* @return Path to family/Store home directory.
*/
@Deprecated
public static Path getStoreHomedir(final Path tabledir,
final HRegionInfo hri, final byte[] family) {
return getStoreHomedir(tabledir, hri.getEncodedName(), family);
}
/**
* @param tabledir {@link Path} to where the table is being stored
* @param encodedName Encoded region name.
* @param family {@link HColumnDescriptor} describing the column family
* @return Path to family/Store home directory.
*/
@Deprecated
public static Path getStoreHomedir(final Path tabledir,
final String encodedName, final byte[] family) {
return new Path(tabledir, new Path(encodedName, Bytes.toString(family)));
}
@Override
public HFileDataBlockEncoder getDataBlockEncoder() {
return dataBlockEncoder;
}
/**
* Should be used only in tests.
* @param blockEncoder the block delta encoder to use
*/
void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) {
this.dataBlockEncoder = blockEncoder;
}
/**
* Creates an unsorted list of StoreFile loaded in parallel
* from the given directory.
* @throws IOException
*/
private List<StoreFile> loadStoreFiles() throws IOException {
Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName());
if (files == null || files.size() == 0) {
return new ArrayList<StoreFile>();
}
// initialize the thread pool for opening store files in parallel..
ThreadPoolExecutor storeFileOpenerThreadPool =
this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" +
this.getColumnFamilyName());
CompletionService<StoreFile> completionService =
new ExecutorCompletionService<StoreFile>(storeFileOpenerThreadPool);
int totalValidStoreFile = 0;
for (final StoreFileInfo storeFileInfo: files) {
// open each store file in parallel
completionService.submit(new Callable<StoreFile>() {
@Override
public StoreFile call() throws IOException {
StoreFile storeFile = createStoreFileAndReader(storeFileInfo);
return storeFile;
}
});
totalValidStoreFile++;
}
ArrayList<StoreFile> results = new ArrayList<StoreFile>(files.size());
IOException ioe = null;
try {
for (int i = 0; i < totalValidStoreFile; i++) {
try {
Future<StoreFile> future = completionService.take();
StoreFile storeFile = future.get();
long length = storeFile.getReader().length();
this.storeSize += length;
this.totalUncompressedBytes +=
storeFile.getReader().getTotalUncompressedBytes();
if (LOG.isDebugEnabled()) {
LOG.debug("loaded " + storeFile.toStringDetailed());
}
results.add(storeFile);
} catch (InterruptedException e) {
if (ioe == null) ioe = new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
if (ioe == null) ioe = new IOException(e.getCause());
}
}
} finally {
storeFileOpenerThreadPool.shutdownNow();
}
if (ioe != null) {
// close StoreFile readers
for (StoreFile file : results) {
try {
if (file != null) file.closeReader(true);
} catch (IOException e) {
LOG.warn(e.getMessage());
}
}
throw ioe;
}
return results;
}
private StoreFile createStoreFileAndReader(final Path p) throws IOException {
StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(), p);
return createStoreFileAndReader(info);
}
private StoreFile createStoreFileAndReader(final StoreFileInfo info)
throws IOException {
info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
StoreFile storeFile = new StoreFile(this.getFileSystem(), info, this.conf, this.cacheConf,
this.family.getBloomFilterType());
storeFile.createReader();
return storeFile;
}
@Override
public long add(final KeyValue kv) {
lock.readLock().lock();
try {
return this.memstore.add(kv);
} finally {
lock.readLock().unlock();
}
}
@Override
public long timeOfOldestEdit() {
return memstore.timeOfOldestEdit();
}
/**
* Adds a value to the memstore
*
* @param kv
* @return memstore size delta
*/
protected long delete(final KeyValue kv) {
lock.readLock().lock();
try {
return this.memstore.delete(kv);
} finally {
lock.readLock().unlock();
}
}
@Override
public void rollback(final KeyValue kv) {
lock.readLock().lock();
try {
this.memstore.rollback(kv);
} finally {
lock.readLock().unlock();
}
}
/**
* @return All store files.
*/
@Override
public Collection<StoreFile> getStorefiles() {
return this.storeEngine.getStoreFileManager().getStorefiles();
}
@Override
public void assertBulkLoadHFileOk(Path srcPath) throws IOException {
HFile.Reader reader = null;
try {
LOG.info("Validating hfile at " + srcPath + " for inclusion in "
+ "store " + this + " region " + this.getRegionInfo().getRegionNameAsString());
reader = HFile.createReader(srcPath.getFileSystem(conf),
srcPath, cacheConf, conf);
reader.loadFileInfo();
byte[] firstKey = reader.getFirstRowKey();
Preconditions.checkState(firstKey != null, "First key can not be null");
byte[] lk = reader.getLastKey();
Preconditions.checkState(lk != null, "Last key can not be null");
byte[] lastKey = KeyValue.createKeyValueFromKey(lk).getRow();
LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey) +
" last=" + Bytes.toStringBinary(lastKey));
LOG.debug("Region bounds: first=" +
Bytes.toStringBinary(getRegionInfo().getStartKey()) +
" last=" + Bytes.toStringBinary(getRegionInfo().getEndKey()));
if (!this.getRegionInfo().containsRange(firstKey, lastKey)) {
throw new WrongRegionException(
"Bulk load file " + srcPath.toString() + " does not fit inside region "
+ this.getRegionInfo().getRegionNameAsString());
}
if (verifyBulkLoads) {
KeyValue prevKV = null;
HFileScanner scanner = reader.getScanner(false, false, false);
scanner.seekTo();
do {
KeyValue kv = scanner.getKeyValue();
if (prevKV != null) {
if (Bytes.compareTo(prevKV.getBuffer(), prevKV.getRowOffset(),
prevKV.getRowLength(), kv.getBuffer(), kv.getRowOffset(),
kv.getRowLength()) > 0) {
throw new InvalidHFileException("Previous row is greater than"
+ " current row: path=" + srcPath + " previous="
+ Bytes.toStringBinary(prevKV.getKey()) + " current="
+ Bytes.toStringBinary(kv.getKey()));
}
if (Bytes.compareTo(prevKV.getBuffer(), prevKV.getFamilyOffset(),
prevKV.getFamilyLength(), kv.getBuffer(), kv.getFamilyOffset(),
kv.getFamilyLength()) != 0) {
throw new InvalidHFileException("Previous key had different"
+ " family compared to current key: path=" + srcPath
+ " previous=" + Bytes.toStringBinary(prevKV.getFamily())
+ " current=" + Bytes.toStringBinary(kv.getFamily()));
}
}
prevKV = kv;
} while (scanner.next());
}
} finally {
if (reader != null) reader.close();
}
}
@Override
public void bulkLoadHFile(String srcPathStr, long seqNum) throws IOException {
Path srcPath = new Path(srcPathStr);
Path dstPath = fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum);
StoreFile sf = createStoreFileAndReader(dstPath);
StoreFile.Reader r = sf.getReader();
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
LOG.info("Loaded HFile " + srcPath + " into store '" + getColumnFamilyName() +
"' as " + dstPath + " - updating store file list.");
// Append the new storefile into the list
this.lock.writeLock().lock();
try {
this.storeEngine.getStoreFileManager().insertNewFiles(Lists.newArrayList(sf));
} finally {
// We need the lock, as long as we are updating the storeFiles
// or changing the memstore. Let us release it before calling
// notifyChangeReadersObservers. See HBASE-4485 for a possible
// deadlock scenario that could have happened if continue to hold
// the lock.
this.lock.writeLock().unlock();
}
notifyChangedReadersObservers();
LOG.info("Successfully loaded store file " + srcPath
+ " into store " + this + " (new location: " + dstPath + ")");
if (LOG.isTraceEnabled()) {
String traceMessage = "BULK LOAD time,size,store size,store files ["
+ EnvironmentEdgeManager.currentTimeMillis() + "," + r.length() + "," + storeSize
+ "," + storeEngine.getStoreFileManager().getStorefileCount() + "]";
LOG.trace(traceMessage);
}
}
@Override
public ImmutableCollection<StoreFile> close() throws IOException {
this.lock.writeLock().lock();
try {
// Clear so metrics doesn't find them.
ImmutableCollection<StoreFile> result = storeEngine.getStoreFileManager().clearFiles();
if (!result.isEmpty()) {
// initialize the thread pool for closing store files in parallel.
ThreadPoolExecutor storeFileCloserThreadPool = this.region
.getStoreFileOpenAndCloseThreadPool("StoreFileCloserThread-"
+ this.getColumnFamilyName());
// close each store file in parallel
CompletionService<Void> completionService =
new ExecutorCompletionService<Void>(storeFileCloserThreadPool);
for (final StoreFile f : result) {
completionService.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
f.closeReader(true);
return null;
}
});
}
IOException ioe = null;
try {
for (int i = 0; i < result.size(); i++) {
try {
Future<Void> future = completionService.take();
future.get();
} catch (InterruptedException e) {
if (ioe == null) {
ioe = new InterruptedIOException();
ioe.initCause(e);
}
} catch (ExecutionException e) {
if (ioe == null) ioe = new IOException(e.getCause());
}
}
} finally {
storeFileCloserThreadPool.shutdownNow();
}
if (ioe != null) throw ioe;
}
LOG.info("Closed " + this);
return result;
} finally {
this.lock.writeLock().unlock();
}
}
/**
* Snapshot this stores memstore. Call before running
* {@link #flushCache(long, SortedSet, TimeRangeTracker, AtomicLong, MonitoredTask)}
* so it has some work to do.
*/
void snapshot() {
this.lock.writeLock().lock();
try {
this.memstore.snapshot();
} finally {
this.lock.writeLock().unlock();
}
}
/**
* Write out current snapshot. Presumes {@link #snapshot()} has been called
* previously.
* @param logCacheFlushId flush sequence number
* @param snapshot
* @param snapshotTimeRangeTracker
* @param flushedSize The number of bytes flushed
* @param status
* @return The path name of the tmp file to which the store was flushed
* @throws IOException
*/
protected List<Path> flushCache(final long logCacheFlushId,
SortedSet<KeyValue> snapshot,
TimeRangeTracker snapshotTimeRangeTracker,
AtomicLong flushedSize,
MonitoredTask status) throws IOException {
// If an exception happens flushing, we let it out without clearing
// the memstore snapshot. The old snapshot will be returned when we say
// 'snapshot', the next time flush comes around.
// Retry after catching exception when flushing, otherwise server will abort
// itself
StoreFlusher flusher = storeEngine.getStoreFlusher();
IOException lastException = null;
for (int i = 0; i < flushRetriesNumber; i++) {
try {
List<Path> pathNames = flusher.flushSnapshot(
snapshot, logCacheFlushId, snapshotTimeRangeTracker, flushedSize, status);
Path lastPathName = null;
try {
for (Path pathName : pathNames) {
lastPathName = pathName;
validateStoreFile(pathName);
}
return pathNames;
} catch (Exception e) {
LOG.warn("Failed validating store file " + lastPathName + ", retrying num=" + i, e);
if (e instanceof IOException) {
lastException = (IOException) e;
} else {
lastException = new IOException(e);
}
}
} catch (IOException e) {
LOG.warn("Failed flushing store file, retrying num=" + i, e);
lastException = e;
}
if (lastException != null && i < (flushRetriesNumber - 1)) {
try {
Thread.sleep(pauseTime);
} catch (InterruptedException e) {
IOException iie = new InterruptedIOException();
iie.initCause(e);
throw iie;
}
}
}
throw lastException;
}
/*
* @param path The pathname of the tmp file into which the store was flushed
* @param logCacheFlushId
* @return StoreFile created.
* @throws IOException
*/
private StoreFile commitFile(final Path path,
final long logCacheFlushId,
TimeRangeTracker snapshotTimeRangeTracker,
AtomicLong flushedSize,
MonitoredTask status)
throws IOException {
// Write-out finished successfully, move into the right spot
Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);
status.setStatus("Flushing " + this + ": reopening flushed file");
StoreFile sf = createStoreFileAndReader(dstPath);
StoreFile.Reader r = sf.getReader();
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
if (LOG.isInfoEnabled()) {
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
", sequenceid=" + logCacheFlushId +
", filesize=" + StringUtils.humanReadableInt(r.length()));
}
return sf;
}
/*
* @param maxKeyCount
* @param compression Compression algorithm to use
* @param isCompaction whether we are creating a new file in a compaction
* @param includesMVCCReadPoint - whether to include MVCC or not
* @param includesTag - includesTag or not
* @return Writer for a new StoreFile in the tmp dir.
*/
@Override
public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression,
boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag)
throws IOException {
final CacheConfig writerCacheConf;
if (isCompaction) {
// Don't cache data on write on compactions.
writerCacheConf = new CacheConfig(cacheConf);
writerCacheConf.setCacheDataOnWrite(false);
} else {
writerCacheConf = cacheConf;
}
InetSocketAddress[] favoredNodes = null;
if (region.getRegionServerServices() != null) {
favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion(
region.getRegionInfo().getEncodedName());
}
HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag,
cryptoContext);
StoreFile.Writer w = new StoreFile.WriterBuilder(conf, writerCacheConf,
this.getFileSystem())
.withFilePath(fs.createTempName())
.withComparator(comparator)
.withBloomType(family.getBloomFilterType())
.withMaxKeyCount(maxKeyCount)
.withFavoredNodes(favoredNodes)
.withFileContext(hFileContext)
.build();
return w;
}
private HFileContext createFileContext(Compression.Algorithm compression,
boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
if (compression == null) {
compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
}
HFileContext hFileContext = new HFileContextBuilder()
.withIncludesMvcc(includeMVCCReadpoint)
.withIncludesTags(includesTag)
.withCompression(compression)
.withCompressTags(family.shouldCompressTags())
.withChecksumType(checksumType)
.withBytesPerCheckSum(bytesPerChecksum)
.withBlockSize(blocksize)
.withHBaseCheckSum(true)
.withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
.build();
return hFileContext;
}
/*
* Change storeFiles adding into place the Reader produced by this new flush.
* @param sfs Store files
* @param set That was used to make the passed file.
* @throws IOException
* @return Whether compaction is required.
*/
private boolean updateStorefiles(
final List<StoreFile> sfs, final SortedSet<KeyValue> set) throws IOException {
this.lock.writeLock().lock();
try {
this.storeEngine.getStoreFileManager().insertNewFiles(sfs);
this.memstore.clearSnapshot(set);
} finally {
// We need the lock, as long as we are updating the storeFiles
// or changing the memstore. Let us release it before calling
// notifyChangeReadersObservers. See HBASE-4485 for a possible
// deadlock scenario that could have happened if continue to hold
// the lock.
this.lock.writeLock().unlock();
}
// Tell listeners of the change in readers.
notifyChangedReadersObservers();
if (LOG.isTraceEnabled()) {
long totalSize = 0;
for (StoreFile sf : sfs) {
totalSize += sf.getReader().length();
}
String traceMessage = "FLUSH time,count,size,store size,store files ["
+ EnvironmentEdgeManager.currentTimeMillis() + "," + sfs.size() + "," + totalSize
+ "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]";
LOG.trace(traceMessage);
}
return needsCompaction();
}
/*
* Notify all observers that set of Readers has changed.
* @throws IOException
*/
private void notifyChangedReadersObservers() throws IOException {
for (ChangedReadersObserver o: this.changedReaderObservers) {
o.updateReaders();
}
}
/**
* Get all scanners with no filtering based on TTL (that happens further down
* the line).
* @return all scanners for this store
*/
@Override
public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet,
boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow,
byte[] stopRow, long readPt) throws IOException {
Collection<StoreFile> storeFilesToScan;
List<KeyValueScanner> memStoreScanners;
this.lock.readLock().lock();
try {
storeFilesToScan =
this.storeEngine.getStoreFileManager().getFilesForScanOrGet(isGet, startRow, stopRow);
memStoreScanners = this.memstore.getScanners(readPt);
} finally {
this.lock.readLock().unlock();
}
// First the store file scanners
// TODO this used to get the store files in descending order,
// but now we get them in ascending order, which I think is
// actually more correct, since memstore get put at the end.
List<StoreFileScanner> sfScanners = StoreFileScanner
.getScannersForStoreFiles(storeFilesToScan, cacheBlocks, usePread, isCompaction, matcher,
readPt);
List<KeyValueScanner> scanners =
new ArrayList<KeyValueScanner>(sfScanners.size()+1);
scanners.addAll(sfScanners);
// Then the memstore scanners
scanners.addAll(memStoreScanners);
return scanners;
}
@Override
public void addChangedReaderObserver(ChangedReadersObserver o) {
this.changedReaderObservers.add(o);
}
@Override
public void deleteChangedReaderObserver(ChangedReadersObserver o) {
// We don't check if observer present; it may not be (legitimately)
this.changedReaderObservers.remove(o);
}
//////////////////////////////////////////////////////////////////////////////
// Compaction
//////////////////////////////////////////////////////////////////////////////
/**
* Compact the StoreFiles. This method may take some time, so the calling
* thread must be able to block for long periods.
*
* <p>During this time, the Store can work as usual, getting values from
* StoreFiles and writing new StoreFiles from the memstore.
*
* Existing StoreFiles are not destroyed until the new compacted StoreFile is
* completely written-out to disk.
*
* <p>The compactLock prevents multiple simultaneous compactions.
* The structureLock prevents us from interfering with other write operations.
*
* <p>We don't want to hold the structureLock for the whole time, as a compact()
* can be lengthy and we want to allow cache-flushes during this period.
*
* <p> Compaction event should be idempotent, since there is no IO Fencing for
* the region directory in hdfs. A region server might still try to complete the
* compaction after it lost the region. That is why the following events are carefully
* ordered for a compaction:
* 1. Compaction writes new files under region/.tmp directory (compaction output)
* 2. Compaction atomically moves the temporary file under region directory
* 3. Compaction appends a WAL edit containing the compaction input and output files.
* Forces sync on WAL.
* 4. Compaction deletes the input files from the region directory.
*
* Failure conditions are handled like this:
* - If RS fails before 2, compaction wont complete. Even if RS lives on and finishes
* the compaction later, it will only write the new data file to the region directory.
* Since we already have this data, this will be idempotent but we will have a redundant
* copy of the data.
* - If RS fails between 2 and 3, the region will have a redundant copy of the data. The
* RS that failed won't be able to finish snyc() for WAL because of lease recovery in WAL.
* - If RS fails after 3, the region region server who opens the region will pick up the
* the compaction marker from the WAL and replay it by removing the compaction input files.
* Failed RS can also attempt to delete those files, but the operation will be idempotent
*
* See HBASE-2231 for details.
*
* @param compaction compaction details obtained from requestCompaction()
* @throws IOException
* @return Storefile we compacted into or null if we failed or opted out early.
*/
@Override
public List<StoreFile> compact(CompactionContext compaction) throws IOException {
assert compaction != null && compaction.hasSelection();
CompactionRequest cr = compaction.getRequest();
Collection<StoreFile> filesToCompact = cr.getFiles();
assert !filesToCompact.isEmpty();
synchronized (filesCompacting) {
// sanity check: we're compacting files that this store knows about
// TODO: change this to LOG.error() after more debugging
Preconditions.checkArgument(filesCompacting.containsAll(filesToCompact));
}
// Ready to go. Have list of files to compact.
LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in "
+ this + " of " + this.getRegionInfo().getRegionNameAsString()
+ " into tmpdir=" + fs.getTempDir() + ", totalSize="
+ StringUtils.humanReadableInt(cr.getSize()));
long compactionStartTime = EnvironmentEdgeManager.currentTimeMillis();
List<StoreFile> sfs = null;
try {
// Commence the compaction.
List<Path> newFiles = compaction.compact();
// TODO: get rid of this!
if (!this.conf.getBoolean("hbase.hstore.compaction.complete", true)) {
LOG.warn("hbase.hstore.compaction.complete is set to false");
sfs = new ArrayList<StoreFile>(newFiles.size());
for (Path newFile : newFiles) {
// Create storefile around what we wrote with a reader on it.
StoreFile sf = createStoreFileAndReader(newFile);
sf.closeReader(true);
sfs.add(sf);
}
return sfs;
}
// Do the steps necessary to complete the compaction.
sfs = moveCompatedFilesIntoPlace(cr, newFiles);
writeCompactionWalRecord(filesToCompact, sfs);
replaceStoreFiles(filesToCompact, sfs);
// At this point the store will use new files for all new scanners.
completeCompaction(filesToCompact); // Archive old files & update store size.
} finally {
finishCompactionRequest(cr);
}
logCompactionEndMessage(cr, sfs, compactionStartTime);
return sfs;
}
private List<StoreFile> moveCompatedFilesIntoPlace(
CompactionRequest cr, List<Path> newFiles) throws IOException {
List<StoreFile> sfs = new ArrayList<StoreFile>(newFiles.size());
for (Path newFile : newFiles) {
assert newFile != null;
StoreFile sf = moveFileIntoPlace(newFile);
if (this.getCoprocessorHost() != null) {
this.getCoprocessorHost().postCompact(this, sf, cr);
}
assert sf != null;
sfs.add(sf);
}
return sfs;
}
// Package-visible for tests
StoreFile moveFileIntoPlace(final Path newFile) throws IOException {
validateStoreFile(newFile);
// Move the file into the right spot
Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile);
return createStoreFileAndReader(destPath);
}
/**
* Writes the compaction WAL record.
* @param filesCompacted Files compacted (input).
* @param newFiles Files from compaction.
*/
private void writeCompactionWalRecord(Collection<StoreFile> filesCompacted,
Collection<StoreFile> newFiles) throws IOException {
if (region.getLog() == null) return;
List<Path> inputPaths = new ArrayList<Path>(filesCompacted.size());
for (StoreFile f : filesCompacted) {
inputPaths.add(f.getPath());
}
List<Path> outputPaths = new ArrayList<Path>(newFiles.size());
for (StoreFile f : newFiles) {
outputPaths.add(f.getPath());
}
HRegionInfo info = this.region.getRegionInfo();
CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info,
family.getName(), inputPaths, outputPaths, fs.getStoreDir(getFamily().getNameAsString()));
HLogUtil.writeCompactionMarker(region.getLog(), this.region.getTableDesc(),
this.region.getRegionInfo(), compactionDescriptor, this.region.getSequenceId());
}
private void replaceStoreFiles(final Collection<StoreFile> compactedFiles,
final Collection<StoreFile> result) throws IOException {
this.lock.writeLock().lock();
try {
this.storeEngine.getStoreFileManager().addCompactionResults(compactedFiles, result);
filesCompacting.removeAll(compactedFiles); // safe bc: lock.writeLock();
} finally {
this.lock.writeLock().unlock();
}
}
/**
* Log a very elaborate compaction completion message.
* @param cr Request.
* @param sfs Resulting files.
* @param compactionStartTime Start time.
*/
private void logCompactionEndMessage(
CompactionRequest cr, List<StoreFile> sfs, long compactionStartTime) {
long now = EnvironmentEdgeManager.currentTimeMillis();
StringBuilder message = new StringBuilder(
"Completed" + (cr.isMajor() ? " major " : " ") + "compaction of "
+ cr.getFiles().size() + " file(s) in " + this + " of "
+ this.getRegionInfo().getRegionNameAsString()
+ " into ");
if (sfs.isEmpty()) {
message.append("none, ");
} else {
for (StoreFile sf: sfs) {
message.append(sf.getPath().getName());
message.append("(size=");
message.append(StringUtils.humanReadableInt(sf.getReader().length()));
message.append("), ");
}
}
message.append("total size for store is ")
.append(StringUtils.humanReadableInt(storeSize))
.append(". This selection was in queue for ")
.append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime()))
.append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime))
.append(" to execute.");
LOG.info(message.toString());
if (LOG.isTraceEnabled()) {
int fileCount = storeEngine.getStoreFileManager().getStorefileCount();
long resultSize = 0;
for (StoreFile sf : sfs) {
resultSize += sf.getReader().length();
}
String traceMessage = "COMPACTION start,end,size out,files in,files out,store size,"
+ "store files [" + compactionStartTime + "," + now + "," + resultSize + ","
+ cr.getFiles().size() + "," + sfs.size() + "," + storeSize + "," + fileCount + "]";
LOG.trace(traceMessage);
}
}
/**
* Call to complete a compaction. Its for the case where we find in the WAL a compaction
* that was not finished. We could find one recovering a WAL after a regionserver crash.
* See HBASE-2231.
* @param compaction
*/
@Override
public void completeCompactionMarker(CompactionDescriptor compaction)
throws IOException {
LOG.debug("Completing compaction from the WAL marker");
List<String> compactionInputs = compaction.getCompactionInputList();
// The Compaction Marker is written after the compaction is completed,
// and the files moved into the region/family folder.
//
// If we crash after the entry is written, we may not have removed the
// input files, but the output file is present.
// (The unremoved input files will be removed by this function)
//
// If we scan the directory and the file is not present, it can mean that:
// - The file was manually removed by the user
// - The file was removed as consequence of subsequent compaction
// so, we can't do anything with the "compaction output list" because those
// files have already been loaded when opening the region (by virtue of
// being in the store's folder) or they may be missing due to a compaction.
String familyName = this.getColumnFamilyName();
List<Path> inputPaths = new ArrayList<Path>(compactionInputs.size());
for (String compactionInput : compactionInputs) {
Path inputPath = fs.getStoreFilePath(familyName, compactionInput);
inputPaths.add(inputPath);
}
//some of the input files might already be deleted
List<StoreFile> inputStoreFiles = new ArrayList<StoreFile>(compactionInputs.size());
for (StoreFile sf : this.getStorefiles()) {
if (inputPaths.contains(sf.getQualifiedPath())) {
inputStoreFiles.add(sf);
}
}
this.replaceStoreFiles(inputStoreFiles, Collections.EMPTY_LIST);
this.completeCompaction(inputStoreFiles);
}
/**
* This method tries to compact N recent files for testing.
* Note that because compacting "recent" files only makes sense for some policies,
* e.g. the default one, it assumes default policy is used. It doesn't use policy,
* but instead makes a compaction candidate list by itself.
* @param N Number of files.
*/
public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException {
List<StoreFile> filesToCompact;
boolean isMajor;
this.lock.readLock().lock();
try {
synchronized (filesCompacting) {
filesToCompact = Lists.newArrayList(storeEngine.getStoreFileManager().getStorefiles());
if (!filesCompacting.isEmpty()) {
// exclude all files older than the newest file we're currently
// compacting. this allows us to preserve contiguity (HBASE-2856)
StoreFile last = filesCompacting.get(filesCompacting.size() - 1);
int idx = filesToCompact.indexOf(last);
Preconditions.checkArgument(idx != -1);
filesToCompact.subList(0, idx + 1).clear();
}
int count = filesToCompact.size();
if (N > count) {
throw new RuntimeException("Not enough files");
}
filesToCompact = filesToCompact.subList(count - N, count);
isMajor = (filesToCompact.size() == storeEngine.getStoreFileManager().getStorefileCount());
filesCompacting.addAll(filesToCompact);
Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
}
} finally {
this.lock.readLock().unlock();
}
try {
// Ready to go. Have list of files to compact.
List<Path> newFiles = ((DefaultCompactor)this.storeEngine.getCompactor())
.compactForTesting(filesToCompact, isMajor);
for (Path newFile: newFiles) {
// Move the compaction into place.
StoreFile sf = moveFileIntoPlace(newFile);
if (this.getCoprocessorHost() != null) {
this.getCoprocessorHost().postCompact(this, sf, null);
}
replaceStoreFiles(filesToCompact, Lists.newArrayList(sf));
completeCompaction(filesToCompact);
}
} finally {
synchronized (filesCompacting) {
filesCompacting.removeAll(filesToCompact);
}
}
}
@Override
public boolean hasReferences() {
return StoreUtils.hasReferences(this.storeEngine.getStoreFileManager().getStorefiles());
}
@Override
public CompactionProgress getCompactionProgress() {
return this.storeEngine.getCompactor().getProgress();
}
@Override
public boolean isMajorCompaction() throws IOException {
for (StoreFile sf : this.storeEngine.getStoreFileManager().getStorefiles()) {
// TODO: what are these reader checks all over the place?
if (sf.getReader() == null) {
LOG.debug("StoreFile " + sf + " has null Reader");
return false;
}
}
return storeEngine.getCompactionPolicy().isMajorCompaction(
this.storeEngine.getStoreFileManager().getStorefiles());
}
@Override
public CompactionContext requestCompaction() throws IOException {
return requestCompaction(Store.NO_PRIORITY, null);
}
@Override
public CompactionContext requestCompaction(int priority, CompactionRequest baseRequest)
throws IOException {
// don't even select for compaction if writes are disabled
if (!this.areWritesEnabled()) {
return null;
}
CompactionContext compaction = storeEngine.createCompaction();
this.lock.readLock().lock();
try {
synchronized (filesCompacting) {
// First, see if coprocessor would want to override selection.
if (this.getCoprocessorHost() != null) {
List<StoreFile> candidatesForCoproc = compaction.preSelect(this.filesCompacting);
boolean override = this.getCoprocessorHost().preCompactSelection(
this, candidatesForCoproc, baseRequest);
if (override) {
// Coprocessor is overriding normal file selection.
compaction.forceSelect(new CompactionRequest(candidatesForCoproc));
}
}
// Normal case - coprocessor is not overriding file selection.
if (!compaction.hasSelection()) {
boolean isUserCompaction = priority == Store.PRIORITY_USER;
boolean mayUseOffPeak = offPeakHours.isOffPeakHour() &&
offPeakCompactionTracker.compareAndSet(false, true);
try {
compaction.select(this.filesCompacting, isUserCompaction,
mayUseOffPeak, forceMajor && filesCompacting.isEmpty());
} catch (IOException e) {
if (mayUseOffPeak) {
offPeakCompactionTracker.set(false);
}
throw e;
}
assert compaction.hasSelection();
if (mayUseOffPeak && !compaction.getRequest().isOffPeak()) {
// Compaction policy doesn't want to take advantage of off-peak.
offPeakCompactionTracker.set(false);
}
}
if (this.getCoprocessorHost() != null) {
this.getCoprocessorHost().postCompactSelection(
this, ImmutableList.copyOf(compaction.getRequest().getFiles()), baseRequest);
}
// Selected files; see if we have a compaction with some custom base request.
if (baseRequest != null) {
// Update the request with what the system thinks the request should be;
// its up to the request if it wants to listen.
compaction.forceSelect(
baseRequest.combineWith(compaction.getRequest()));
}
final Collection<StoreFile> selectedFiles = compaction.getRequest().getFiles();
if (selectedFiles.isEmpty()) {
return null;
}
// Update filesCompacting (check that we do not try to compact the same StoreFile twice).
if (!Collections.disjoint(filesCompacting, selectedFiles)) {
Preconditions.checkArgument(false, "%s overlaps with %s",
selectedFiles, filesCompacting);
}
filesCompacting.addAll(selectedFiles);
Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID);
// If we're enqueuing a major, clear the force flag.
boolean isMajor = selectedFiles.size() == this.getStorefilesCount();
this.forceMajor = this.forceMajor && !isMajor;
// Set common request properties.
// Set priority, either override value supplied by caller or from store.
compaction.getRequest().setPriority(
(priority != Store.NO_PRIORITY) ? priority : getCompactPriority());
compaction.getRequest().setIsMajor(isMajor);
compaction.getRequest().setDescription(
getRegionInfo().getRegionNameAsString(), getColumnFamilyName());
}
} finally {
this.lock.readLock().unlock();
}
LOG.debug(getRegionInfo().getEncodedName() + " - " + getColumnFamilyName() + ": Initiating "
+ (compaction.getRequest().isMajor() ? "major" : "minor") + " compaction");
this.region.reportCompactionRequestStart(compaction.getRequest().isMajor());
return compaction;
}
@Override
public void cancelRequestedCompaction(CompactionContext compaction) {
finishCompactionRequest(compaction.getRequest());
}
private void finishCompactionRequest(CompactionRequest cr) {
this.region.reportCompactionRequestEnd(cr.isMajor(), cr.getFiles().size(), cr.getSize());
if (cr.isOffPeak()) {
offPeakCompactionTracker.set(false);
cr.setOffPeak(false);
}
synchronized (filesCompacting) {
filesCompacting.removeAll(cr.getFiles());
}
}
/**
* Validates a store file by opening and closing it. In HFileV2 this should
* not be an expensive operation.
*
* @param path the path to the store file
*/
private void validateStoreFile(Path path)
throws IOException {
StoreFile storeFile = null;
try {
storeFile = createStoreFileAndReader(path);
} catch (IOException e) {
LOG.error("Failed to open store file : " + path
+ ", keeping it in tmp location", e);
throw e;
} finally {
if (storeFile != null) {
storeFile.closeReader(false);
}
}
}
/*
* <p>It works by processing a compaction that's been written to disk.
*
* <p>It is usually invoked at the end of a compaction, but might also be
* invoked at HStore startup, if the prior execution died midway through.
*
* <p>Moving the compacted TreeMap into place means:
* <pre>
* 1) Unload all replaced StoreFile, close and collect list to delete.
* 2) Compute new store size
* </pre>
*
* @param compactedFiles list of files that were compacted
* @param newFile StoreFile that is the result of the compaction
*/
@VisibleForTesting
protected void completeCompaction(final Collection<StoreFile> compactedFiles)
throws IOException {
try {
// Do not delete old store files until we have sent out notification of
// change in case old files are still being accessed by outstanding scanners.
// Don't do this under writeLock; see HBASE-4485 for a possible deadlock
// scenario that could have happened if continue to hold the lock.
notifyChangedReadersObservers();
// At this point the store will use new files for all scanners.
// let the archive util decide if we should archive or delete the files
LOG.debug("Removing store files after compaction...");
for (StoreFile compactedFile : compactedFiles) {
compactedFile.closeReader(true);
}
this.fs.removeStoreFiles(this.getColumnFamilyName(), compactedFiles);
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.error("Failed removing compacted files in " + this +
". Files we were trying to remove are " + compactedFiles.toString() +
"; some of them may have been already removed", e);
}
// 4. Compute new store size
this.storeSize = 0L;
this.totalUncompressedBytes = 0L;
for (StoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) {
StoreFile.Reader r = hsf.getReader();
if (r == null) {
LOG.warn("StoreFile " + hsf + " has a null Reader");
continue;
}
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
}
}
/*
* @param wantedVersions How many versions were asked for.
* @return wantedVersions or this families' {@link HConstants#VERSIONS}.
*/
int versionsToReturn(final int wantedVersions) {
if (wantedVersions <= 0) {
throw new IllegalArgumentException("Number of versions must be > 0");
}
// Make sure we do not return more than maximum versions for this store.
int maxVersions = this.family.getMaxVersions();
return wantedVersions > maxVersions ? maxVersions: wantedVersions;
}
static boolean isExpired(final KeyValue key, final long oldestTimestamp) {
return key.getTimestamp() < oldestTimestamp;
}
@Override
public KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException {
// If minVersions is set, we will not ignore expired KVs.
// As we're only looking for the latest matches, that should be OK.
// With minVersions > 0 we guarantee that any KV that has any version
// at all (expired or not) has at least one version that will not expire.
// Note that this method used to take a KeyValue as arguments. KeyValue
// can be back-dated, a row key cannot.
long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.scanInfo.getTtl();
KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP);
GetClosestRowBeforeTracker state = new GetClosestRowBeforeTracker(
this.comparator, kv, ttlToUse, this.getRegionInfo().isMetaRegion());
this.lock.readLock().lock();
try {
// First go to the memstore. Pick up deletes and candidates.
this.memstore.getRowKeyAtOrBefore(state);
// Check if match, if we got a candidate on the asked for 'kv' row.
// Process each relevant store file. Run through from newest to oldest.
Iterator<StoreFile> sfIterator = this.storeEngine.getStoreFileManager()
.getCandidateFilesForRowKeyBefore(state.getTargetKey());
while (sfIterator.hasNext()) {
StoreFile sf = sfIterator.next();
sfIterator.remove(); // Remove sf from iterator.
boolean haveNewCandidate = rowAtOrBeforeFromStoreFile(sf, state);
KeyValue keyv = state.getCandidate();
// we have an optimization here which stops the search if we find exact match.
if (keyv != null && keyv.matchingRow(row)) return state.getCandidate();
if (haveNewCandidate) {
sfIterator = this.storeEngine.getStoreFileManager().updateCandidateFilesForRowKeyBefore(
sfIterator, state.getTargetKey(), state.getCandidate());
}
}
return state.getCandidate();
} finally {
this.lock.readLock().unlock();
}
}
/*
* Check an individual MapFile for the row at or before a given row.
* @param f
* @param state
* @throws IOException
* @return True iff the candidate has been updated in the state.
*/
private boolean rowAtOrBeforeFromStoreFile(final StoreFile f,
final GetClosestRowBeforeTracker state)
throws IOException {
StoreFile.Reader r = f.getReader();
if (r == null) {
LOG.warn("StoreFile " + f + " has a null Reader");
return false;
}
if (r.getEntries() == 0) {
LOG.warn("StoreFile " + f + " is a empty store file");
return false;
}
// TODO: Cache these keys rather than make each time?
byte [] fk = r.getFirstKey();
if (fk == null) return false;
KeyValue firstKV = KeyValue.createKeyValueFromKey(fk, 0, fk.length);
byte [] lk = r.getLastKey();
KeyValue lastKV = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
KeyValue firstOnRow = state.getTargetKey();
if (this.comparator.compareRows(lastKV, firstOnRow) < 0) {
// If last key in file is not of the target table, no candidates in this
// file. Return.
if (!state.isTargetTable(lastKV)) return false;
// If the row we're looking for is past the end of file, set search key to
// last key. TODO: Cache last and first key rather than make each time.
firstOnRow = new KeyValue(lastKV.getRow(), HConstants.LATEST_TIMESTAMP);
}
// Get a scanner that caches blocks and that uses pread.
HFileScanner scanner = r.getScanner(true, true, false);
// Seek scanner. If can't seek it, return.
if (!seekToScanner(scanner, firstOnRow, firstKV)) return false;
// If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN!
// Unlikely that there'll be an instance of actual first row in table.
if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true;
// If here, need to start backing up.
while (scanner.seekBefore(firstOnRow.getBuffer(), firstOnRow.getKeyOffset(),
firstOnRow.getKeyLength())) {
KeyValue kv = scanner.getKeyValue();
if (!state.isTargetTable(kv)) break;
if (!state.isBetterCandidate(kv)) break;
// Make new first on row.
firstOnRow = new KeyValue(kv.getRow(), HConstants.LATEST_TIMESTAMP);
// Seek scanner. If can't seek it, break.
if (!seekToScanner(scanner, firstOnRow, firstKV)) return false;
// If we find something, break;
if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true;
}
return false;
}
/*
* Seek the file scanner to firstOnRow or first entry in file.
* @param scanner
* @param firstOnRow
* @param firstKV
* @return True if we successfully seeked scanner.
* @throws IOException
*/
private boolean seekToScanner(final HFileScanner scanner,
final KeyValue firstOnRow,
final KeyValue firstKV)
throws IOException {
KeyValue kv = firstOnRow;
// If firstOnRow < firstKV, set to firstKV
if (this.comparator.compareRows(firstKV, firstOnRow) == 0) kv = firstKV;
int result = scanner.seekTo(kv.getBuffer(), kv.getKeyOffset(),
kv.getKeyLength());
return result != -1;
}
/*
* When we come in here, we are probably at the kv just before we break into
* the row that firstOnRow is on. Usually need to increment one time to get
* on to the row we are interested in.
* @param scanner
* @param firstOnRow
* @param state
* @return True we found a candidate.
* @throws IOException
*/
private boolean walkForwardInSingleRow(final HFileScanner scanner,
final KeyValue firstOnRow,
final GetClosestRowBeforeTracker state)
throws IOException {
boolean foundCandidate = false;
do {
KeyValue kv = scanner.getKeyValue();
// If we are not in the row, skip.
if (this.comparator.compareRows(kv, firstOnRow) < 0) continue;
// Did we go beyond the target row? If so break.
if (state.isTooFar(kv, firstOnRow)) break;
if (state.isExpired(kv)) {
continue;
}
// If we added something, this row is a contender. break.
if (state.handle(kv)) {
foundCandidate = true;
break;
}
} while(scanner.next());
return foundCandidate;
}
@Override
public boolean canSplit() {
this.lock.readLock().lock();
try {
// Not split-able if we find a reference store file present in the store.
boolean result = !hasReferences();
if (!result && LOG.isDebugEnabled()) {
LOG.debug("Cannot split region due to reference files being there");
}
return result;
} finally {
this.lock.readLock().unlock();
}
}
@Override
public byte[] getSplitPoint() {
this.lock.readLock().lock();
try {
// Should already be enforced by the split policy!
assert !this.getRegionInfo().isMetaRegion();
// Not split-able if we find a reference store file present in the store.
if (hasReferences()) {
return null;
}
return this.storeEngine.getStoreFileManager().getSplitPoint();
} catch(IOException e) {
LOG.warn("Failed getting store size for " + this, e);
} finally {
this.lock.readLock().unlock();
}
return null;
}
@Override
public long getLastCompactSize() {
return this.lastCompactSize;
}
@Override
public long getSize() {
return storeSize;
}
@Override
public void triggerMajorCompaction() {
this.forceMajor = true;
}
boolean getForceMajorCompaction() {
return this.forceMajor;
}
//////////////////////////////////////////////////////////////////////////////
// File administration
//////////////////////////////////////////////////////////////////////////////
@Override
public KeyValueScanner getScanner(Scan scan,
final NavigableSet<byte []> targetCols, long readPt) throws IOException {
lock.readLock().lock();
try {
KeyValueScanner scanner = null;
if (this.getCoprocessorHost() != null) {
scanner = this.getCoprocessorHost().preStoreScannerOpen(this, scan, targetCols);
}
if (scanner == null) {
scanner = scan.isReversed() ? new ReversedStoreScanner(this,
getScanInfo(), scan, targetCols, readPt) : new StoreScanner(this,
getScanInfo(), scan, targetCols, readPt);
}
return scanner;
} finally {
lock.readLock().unlock();
}
}
@Override
public String toString() {
return this.getColumnFamilyName();
}
@Override
// TODO: why is there this and also getNumberOfStorefiles?! Remove one.
public int getStorefilesCount() {
return this.storeEngine.getStoreFileManager().getStorefileCount();
}
@Override
public long getStoreSizeUncompressed() {
return this.totalUncompressedBytes;
}
@Override
public long getStorefilesSize() {
long size = 0;
for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) {
StoreFile.Reader r = s.getReader();
if (r == null) {
LOG.warn("StoreFile " + s + " has a null Reader");
continue;
}
size += r.length();
}
return size;
}
@Override
public long getStorefilesIndexSize() {
long size = 0;
for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) {
StoreFile.Reader r = s.getReader();
if (r == null) {
LOG.warn("StoreFile " + s + " has a null Reader");
continue;
}
size += r.indexSize();
}
return size;
}
@Override
public long getTotalStaticIndexSize() {
long size = 0;
for (StoreFile s : this.storeEngine.getStoreFileManager().getStorefiles()) {
size += s.getReader().getUncompressedDataIndexSize();
}
return size;
}
@Override
public long getTotalStaticBloomSize() {
long size = 0;
for (StoreFile s : this.storeEngine.getStoreFileManager().getStorefiles()) {
StoreFile.Reader r = s.getReader();
size += r.getTotalBloomSize();
}
return size;
}
@Override
public long getMemStoreSize() {
return this.memstore.heapSize();
}
@Override
public int getCompactPriority() {
int priority = this.storeEngine.getStoreFileManager().getStoreCompactionPriority();
if (priority == PRIORITY_USER) {
LOG.warn("Compaction priority is USER despite there being no user compaction");
}
return priority;
}
@Override
public boolean throttleCompaction(long compactionSize) {
return storeEngine.getCompactionPolicy().throttleCompaction(compactionSize);
}
public HRegion getHRegion() {
return this.region;
}
@Override
public RegionCoprocessorHost getCoprocessorHost() {
return this.region.getCoprocessorHost();
}
@Override
public HRegionInfo getRegionInfo() {
return this.fs.getRegionInfo();
}
@Override
public boolean areWritesEnabled() {
return this.region.areWritesEnabled();
}
@Override
public long getSmallestReadPoint() {
return this.region.getSmallestReadPoint();
}
/**
* Used in tests. TODO: Remove
*
* Updates the value for the given row/family/qualifier. This function will always be seen as
* atomic by other readers because it only puts a single KV to memstore. Thus no read/write
* control necessary.
* @param row row to update
* @param f family to update
* @param qualifier qualifier to update
* @param newValue the new value to set into memstore
* @return memstore size delta
* @throws IOException
*/
public long updateColumnValue(byte [] row, byte [] f,
byte [] qualifier, long newValue)
throws IOException {
this.lock.readLock().lock();
try {
long now = EnvironmentEdgeManager.currentTimeMillis();
return this.memstore.updateColumnValue(row,
f,
qualifier,
newValue,
now);
} finally {
this.lock.readLock().unlock();
}
}
@Override
public long upsert(Iterable<Cell> cells, long readpoint) throws IOException {
this.lock.readLock().lock();
try {
return this.memstore.upsert(cells, readpoint);
} finally {
this.lock.readLock().unlock();
}
}
@Override
public StoreFlushContext createFlushContext(long cacheFlushId) {
return new StoreFlusherImpl(cacheFlushId);
}
private class StoreFlusherImpl implements StoreFlushContext {
private long cacheFlushSeqNum;
private SortedSet<KeyValue> snapshot;
private List<Path> tempFiles;
private TimeRangeTracker snapshotTimeRangeTracker;
private final AtomicLong flushedSize = new AtomicLong();
private StoreFlusherImpl(long cacheFlushSeqNum) {
this.cacheFlushSeqNum = cacheFlushSeqNum;
}
/**
* This is not thread safe. The caller should have a lock on the region or the store.
* If necessary, the lock can be added with the patch provided in HBASE-10087
*/
@Override
public void prepare() {
memstore.snapshot();
this.snapshot = memstore.getSnapshot();
this.snapshotTimeRangeTracker = memstore.getSnapshotTimeRangeTracker();
}
@Override
public void flushCache(MonitoredTask status) throws IOException {
tempFiles = HStore.this.flushCache(
cacheFlushSeqNum, snapshot, snapshotTimeRangeTracker, flushedSize, status);
}
@Override
public boolean commit(MonitoredTask status) throws IOException {
if (this.tempFiles == null || this.tempFiles.isEmpty()) {
return false;
}
List<StoreFile> storeFiles = new ArrayList<StoreFile>(this.tempFiles.size());
for (Path storeFilePath : tempFiles) {
try {
storeFiles.add(HStore.this.commitFile(storeFilePath, cacheFlushSeqNum,
snapshotTimeRangeTracker, flushedSize, status));
} catch (IOException ex) {
LOG.error("Failed to commit store file " + storeFilePath, ex);
// Try to delete the files we have committed before.
for (StoreFile sf : storeFiles) {
Path pathToDelete = sf.getPath();
try {
sf.deleteReader();
} catch (IOException deleteEx) {
LOG.fatal("Failed to delete store file we committed, halting " + pathToDelete, ex);
Runtime.getRuntime().halt(1);
}
}
throw new IOException("Failed to commit the flush", ex);
}
}
if (HStore.this.getCoprocessorHost() != null) {
for (StoreFile sf : storeFiles) {
HStore.this.getCoprocessorHost().postFlush(HStore.this, sf);
}
}
// Add new file to store files. Clear snapshot too while we have the Store write lock.
return HStore.this.updateStorefiles(storeFiles, snapshot);
}
}
@Override
public boolean needsCompaction() {
return this.storeEngine.needsCompaction(this.filesCompacting);
}
@Override
public CacheConfig getCacheConfig() {
return this.cacheConf;
}
public static final long FIXED_OVERHEAD =
ClassSize.align(ClassSize.OBJECT + (16 * ClassSize.REFERENCE) + (4 * Bytes.SIZEOF_LONG)
+ (5 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN));
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD
+ ClassSize.OBJECT + ClassSize.REENTRANT_LOCK
+ ClassSize.CONCURRENT_SKIPLISTMAP
+ ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT
+ ScanInfo.FIXED_OVERHEAD);
@Override
public long heapSize() {
return DEEP_OVERHEAD + this.memstore.heapSize();
}
@Override
public KeyValue.KVComparator getComparator() {
return comparator;
}
@Override
public ScanInfo getScanInfo() {
return scanInfo;
}
/**
* Set scan info, used by test
* @param scanInfo new scan info to use for test
*/
void setScanInfo(ScanInfo scanInfo) {
this.scanInfo = scanInfo;
}
@Override
public boolean hasTooManyStoreFiles() {
return getStorefilesCount() > this.blockingFileCount;
}
}
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE369_Divide_by_Zero__int_database_divide_21.java
Label Definition File: CWE369_Divide_by_Zero__int.label.xml
Template File: sources-sinks-21.tmpl.java
*/
/*
* @description
* CWE: 369 Divide by zero
* BadSource: database Read data from a database
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: divide
* GoodSink: Check for zero before dividing
* BadSink : Dividing by a value that may be zero
* Flow Variant: 21 Control flow: Flow controlled by value of a private variable. All functions contained in one file.
*
* */
package testcases.CWE369_Divide_by_Zero.s02;
import testcasesupport.*;
import javax.servlet.http.*;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.logging.Level;
public class CWE369_Divide_by_Zero__int_database_divide_21 extends AbstractTestCase
{
/* The variable below is used to drive control flow in the sink function */
private boolean badPrivate = false;
public void bad() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* Read data from a database */
{
Connection connection = null;
PreparedStatement preparedStatement = null;
ResultSet resultSet = null;
try
{
/* setup the connection */
connection = IO.getDBConnection();
/* prepare and execute a (hardcoded) query */
preparedStatement = connection.prepareStatement("select name from users where id=0");
resultSet = preparedStatement.executeQuery();
/* POTENTIAL FLAW: Read data from a database query resultset */
String stringNumber = resultSet.getString(1);
if (stringNumber != null) /* avoid NPD incidental warnings */
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch (NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error with SQL statement", exceptSql);
}
finally
{
/* Close database objects */
try
{
if (resultSet != null)
{
resultSet.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing ResultSet", exceptSql);
}
try
{
if (preparedStatement != null)
{
preparedStatement.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing PreparedStatement", exceptSql);
}
try
{
if (connection != null)
{
connection.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing Connection", exceptSql);
}
}
}
badPrivate = true;
badSink(data );
}
private void badSink(int data ) throws Throwable
{
if (badPrivate)
{
/* POTENTIAL FLAW: Zero denominator will cause an issue. An integer division will
result in an exception. */
IO.writeLine("bad: 100/" + data + " = " + (100 / data) + "\n");
}
}
/* The variables below are used to drive control flow in the sink functions. */
private boolean goodB2G1Private = false;
private boolean goodB2G2Private = false;
private boolean goodG2BPrivate = false;
public void good() throws Throwable
{
goodB2G1();
goodB2G2();
goodG2B();
}
/* goodB2G1() - use BadSource and GoodSink by setting the variable to false instead of true */
private void goodB2G1() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* Read data from a database */
{
Connection connection = null;
PreparedStatement preparedStatement = null;
ResultSet resultSet = null;
try
{
/* setup the connection */
connection = IO.getDBConnection();
/* prepare and execute a (hardcoded) query */
preparedStatement = connection.prepareStatement("select name from users where id=0");
resultSet = preparedStatement.executeQuery();
/* POTENTIAL FLAW: Read data from a database query resultset */
String stringNumber = resultSet.getString(1);
if (stringNumber != null) /* avoid NPD incidental warnings */
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch (NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error with SQL statement", exceptSql);
}
finally
{
/* Close database objects */
try
{
if (resultSet != null)
{
resultSet.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing ResultSet", exceptSql);
}
try
{
if (preparedStatement != null)
{
preparedStatement.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing PreparedStatement", exceptSql);
}
try
{
if (connection != null)
{
connection.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing Connection", exceptSql);
}
}
}
goodB2G1Private = false;
goodB2G1Sink(data );
}
private void goodB2G1Sink(int data ) throws Throwable
{
if (goodB2G1Private)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
IO.writeLine("Benign, fixed string");
}
else
{
/* FIX: test for a zero denominator */
if (data != 0)
{
IO.writeLine("100/" + data + " = " + (100 / data) + "\n");
}
else
{
IO.writeLine("This would result in a divide by zero");
}
}
}
/* goodB2G2() - use BadSource and GoodSink by reversing the blocks in the if in the sink function */
private void goodB2G2() throws Throwable
{
int data;
data = Integer.MIN_VALUE; /* Initialize data */
/* Read data from a database */
{
Connection connection = null;
PreparedStatement preparedStatement = null;
ResultSet resultSet = null;
try
{
/* setup the connection */
connection = IO.getDBConnection();
/* prepare and execute a (hardcoded) query */
preparedStatement = connection.prepareStatement("select name from users where id=0");
resultSet = preparedStatement.executeQuery();
/* POTENTIAL FLAW: Read data from a database query resultset */
String stringNumber = resultSet.getString(1);
if (stringNumber != null) /* avoid NPD incidental warnings */
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch (NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error with SQL statement", exceptSql);
}
finally
{
/* Close database objects */
try
{
if (resultSet != null)
{
resultSet.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing ResultSet", exceptSql);
}
try
{
if (preparedStatement != null)
{
preparedStatement.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing PreparedStatement", exceptSql);
}
try
{
if (connection != null)
{
connection.close();
}
}
catch (SQLException exceptSql)
{
IO.logger.log(Level.WARNING, "Error closing Connection", exceptSql);
}
}
}
goodB2G2Private = true;
goodB2G2Sink(data );
}
private void goodB2G2Sink(int data ) throws Throwable
{
if (goodB2G2Private)
{
/* FIX: test for a zero denominator */
if (data != 0)
{
IO.writeLine("100/" + data + " = " + (100 / data) + "\n");
}
else
{
IO.writeLine("This would result in a divide by zero");
}
}
}
/* goodG2B() - use GoodSource and BadSink */
private void goodG2B() throws Throwable
{
int data;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
goodG2BPrivate = true;
goodG2BSink(data );
}
private void goodG2BSink(int data ) throws Throwable
{
if (goodG2BPrivate)
{
/* POTENTIAL FLAW: Zero denominator will cause an issue. An integer division will
result in an exception. */
IO.writeLine("bad: 100/" + data + " = " + (100 / data) + "\n");
}
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
// Copyright 2015-present 650 Industries. All rights reserved.
package abi29_0_0.host.exp.exponent.modules.api;
import android.app.Activity;
import android.content.Intent;
import android.net.Uri;
import android.support.annotation.Nullable;
import android.support.customtabs.CustomTabsIntent;
import abi29_0_0.com.facebook.infer.annotation.Assertions;
import abi29_0_0.com.facebook.react.bridge.Arguments;
import abi29_0_0.com.facebook.react.bridge.Promise;
import abi29_0_0.com.facebook.react.bridge.ReactApplicationContext;
import abi29_0_0.com.facebook.react.bridge.ReactContextBaseJavaModule;
import abi29_0_0.com.facebook.react.bridge.ReactMethod;
import abi29_0_0.com.facebook.react.bridge.WritableMap;
import de.greenrobot.event.EventBus;
import host.exp.exponent.chrometabs.ChromeTabsManagerActivity;
import host.exp.expoview.Exponent;
public class WebBrowserModule extends ReactContextBaseJavaModule {
private final static String ERROR_CODE = "EXWebBrowser";
private @Nullable Promise mOpenBrowserPromise;
public WebBrowserModule(ReactApplicationContext reactContext) {
super(reactContext);
}
@Override
public String getName() {
return "ExponentWebBrowser";
}
@ReactMethod
public void openBrowserAsync(final String url, final Promise promise) {
if (mOpenBrowserPromise != null) {
WritableMap result = Arguments.createMap();
result.putString("type", "cancel");
mOpenBrowserPromise.resolve(result);
return;
}
mOpenBrowserPromise = promise;
final Activity activity = Exponent.getInstance().getCurrentActivity();
if (activity == null) {
promise.reject(ERROR_CODE, "No activity");
mOpenBrowserPromise = null;
return;
}
CustomTabsIntent.Builder builder = new CustomTabsIntent.Builder();
CustomTabsIntent customTabsIntent = builder.build();
Intent intent = customTabsIntent.intent;
intent.setData(Uri.parse(url));
intent.putExtra(CustomTabsIntent.EXTRA_TITLE_VISIBILITY_STATE, CustomTabsIntent.NO_TITLE);
EventBus.getDefault().register(this);
activity.startActivity(
ChromeTabsManagerActivity.createStartIntent(activity, intent));
}
@ReactMethod
public void dismissBrowser() {
if (mOpenBrowserPromise == null) {
return;
}
final Activity activity = Exponent.getInstance().getCurrentActivity();
if (activity == null) {
mOpenBrowserPromise.reject(ERROR_CODE, "No activity");
mOpenBrowserPromise = null;
return;
}
EventBus.getDefault().unregister(this);
WritableMap result = Arguments.createMap();
result.putString("type", "dismiss");
mOpenBrowserPromise.resolve(result);
mOpenBrowserPromise = null;
activity.startActivity(
ChromeTabsManagerActivity.createDismissIntent(activity));
}
public void onEvent(ChromeTabsManagerActivity.ChromeTabsDismissedEvent event) {
EventBus.getDefault().unregister(this);
Assertions.assertNotNull(mOpenBrowserPromise);
WritableMap result = Arguments.createMap();
result.putString("type", "cancel");
mOpenBrowserPromise.resolve(result);
mOpenBrowserPromise = null;
}
}
|
package org.ovirt.engine.ui.webadmin.uicommon;
import org.ovirt.engine.core.common.businessentities.DisplayType;
import org.ovirt.engine.core.compat.Event;
import org.ovirt.engine.core.compat.EventArgs;
import org.ovirt.engine.core.compat.EventDefinition;
import org.ovirt.engine.core.compat.IEventListener;
import org.ovirt.engine.core.compat.Version;
import org.ovirt.engine.ui.frontend.AsyncQuery;
import org.ovirt.engine.ui.frontend.INewAsyncCallback;
import org.ovirt.engine.ui.uicommonweb.Configurator;
import org.ovirt.engine.ui.uicommonweb.dataprovider.AsyncDataProvider;
import org.ovirt.engine.ui.uicommonweb.models.vms.ISpice;
import com.google.gwt.core.client.GWT;
import com.google.gwt.http.client.Request;
import com.google.gwt.http.client.RequestBuilder;
import com.google.gwt.http.client.RequestCallback;
import com.google.gwt.http.client.RequestException;
import com.google.gwt.http.client.Response;
public class WebAdminConfigurator extends Configurator implements IEventListener {
// Temporarily save the locations of webadmin and userportal.
// TODO: create a new SPICE RPM for webadmin
private static final String WEBADMIN_ROOT_FOLDER = "/webadmin/webadmin/";
private static final String USERPORTAL_ROOT_FOLDER = "/UserPortal/org.ovirt.engine.ui.userportal.UserPortal/";
public EventDefinition spiceVersionFileFetchedEvent_Definition =
new EventDefinition("spiceVersionFileFetched", WebAdminConfigurator.class);
public Event spiceVersionFileFetchedEvent = new Event(spiceVersionFileFetchedEvent_Definition);
private boolean isInitialized;
public WebAdminConfigurator()
{
// Set default configuration values
setIsAdmin(true);
setSpiceAdminConsole(true);
setSpiceFullScreen(false);
// Add event listeners
spiceVersionFileFetchedEvent.addListener(this);
// Update Spice version if needed
updateSpiceVersion();
}
private void updateSpiceVersion() {
// Update spice version from the text files which are located on the server.
// If can't update spice version - leave the default value from the Configurator.
ClientAgentType cat = new ClientAgentType();
if ((cat.os.equalsIgnoreCase("Windows")) && (cat.browser.equalsIgnoreCase("Explorer"))) {
if (cat.getPlatform().equalsIgnoreCase("win32")) {
updateSpice32Version();
} else if (cat.getPlatform().equalsIgnoreCase("win64")) {
updateSpice64Version();
}
}
}
public void updateSpice32Version() {
fetchFile("SpiceVersion.txt", spiceVersionFileFetchedEvent);
}
public void updateSpice64Version() {
fetchFile("SpiceVersion_x64.txt", spiceVersionFileFetchedEvent);
}
public void updateIsUsbEnabled(final ISpice spice) {
// Get 'EnableUSBAsDefault' value from database
AsyncDataProvider.IsUSBEnabledByDefault(new AsyncQuery(this,
new INewAsyncCallback() {
@Override
public void OnSuccess(Object target, Object returnValue) {
// Update IsUsbEnabled value
setIsUsbEnabled((Boolean) returnValue);
}
}));
}
public static String getSpiceBaseURL() {
return GWT.getModuleBaseURL().replace(WEBADMIN_ROOT_FOLDER, USERPORTAL_ROOT_FOLDER);
}
// Fetch file from a specified path
public void fetchFile(String filePath, final Event onFetched) {
RequestBuilder requestBuilder = new RequestBuilder(RequestBuilder.GET, getSpiceBaseURL() + filePath);
try {
requestBuilder.sendRequest(null, new RequestCallback() {
@Override
public void onError(Request request, Throwable exception) {
}
@Override
public void onResponseReceived(Request request, Response response) {
String result = response.getText();
onFetched.raise(this, new FileFetchEventArgs(result));
}
});
} catch (RequestException e) {
}
}
// Create a Version object from string
public Version parseVersion(String versionStr) {
return new Version(versionStr.replace(',', '.').replace("\n", ""));
}
@Override
public void eventRaised(Event ev, Object sender, EventArgs args)
{
if (ev.equals(spiceVersionFileFetchedEvent_Definition))
{
Version spiceVersion = parseVersion(((FileFetchEventArgs) args).getFileContent());
setSpiceVersion(spiceVersion);
}
}
public final class FileFetchEventArgs extends EventArgs
{
private String fileContent;
public String getFileContent() {
return fileContent;
}
public void setFileContent(String fileContent) {
this.fileContent = fileContent;
}
public FileFetchEventArgs(String fileContent) {
setFileContent(fileContent);
}
}
// Check whether the specified displayType is currently supported
@Override
public boolean IsDisplayTypeSupported(DisplayType displayType) {
switch (displayType) {
case vnc:
return false;
case qxl:
ClientAgentType cat = new ClientAgentType();
return (cat.os.equalsIgnoreCase("Windows") && cat.browser.equalsIgnoreCase("Explorer")) ||
(cat.os.equalsIgnoreCase("Linux") && cat.browser.equalsIgnoreCase("Firefox"));
}
return false;
}
@Override
public void Configure(ISpice spiceImpl)
{
SpiceInterfaceImpl spice = (SpiceInterfaceImpl) spiceImpl;
spice.setDesiredVersion(getSpiceVersion());
spice.setCurrentVersion(getSpiceVersion());
spice.setAdminConsole(getSpiceAdminConsole());
spice.setFullScreen(getSpiceFullScreen());
if (!isInitialized) {
updateIsUsbEnabled(spice);
isInitialized = true;
}
}
}
|
package ex;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.*;
public class BasicQueue {
public static void main(String[] args) throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
Queue<Integer> basicQue = new LinkedList<>();
int[] operations = Arrays.stream(reader.readLine().split("\\s+"))
.mapToInt(Integer::parseInt)
.toArray();
int[] numbers = Arrays.stream(reader.readLine().split("\\s+"))
.mapToInt(Integer::parseInt)
.toArray();
for (int i = 0; i < operations[0]; i++) {
basicQue.add(numbers[i]);
}
for (int i = 0; i < operations[1]; i++) {
basicQue.poll();
}
if ( basicQue.size() > 0 ) {
if (basicQue.contains(operations[2])) {
System.out.println(true);
} else {
Optional<Integer> smallest = basicQue.stream().min(Integer::compareTo);
System.out.println(smallest.get());
}
} else {
System.out.println(0);
}
}
}
|
/*
* Copyright (c) 2020, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.siddhi.extension.io.file;
import io.siddhi.core.SiddhiAppRuntime;
import io.siddhi.core.SiddhiManager;
import io.siddhi.core.event.Event;
import io.siddhi.core.stream.input.InputHandler;
import io.siddhi.core.stream.output.StreamCallback;
import io.siddhi.core.util.EventPrinter;
import io.siddhi.core.util.SiddhiTestHelper;
import io.siddhi.extension.util.Utils;
import org.apache.commons.vfs2.FileObject;
import org.apache.commons.vfs2.FileSystemException;
import org.apache.commons.vfs2.Selectors;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.testng.AssertJUnit;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.atomic.AtomicInteger;
public class SFTPFileSourceSinkTestCase {
private static final Logger log = LogManager.getLogger(SFTPFileSourceSinkTestCase.class);
private AtomicInteger count = new AtomicInteger();
private FileObject tempFTPSource;
String fileOptions;
private int waitTime = 10000;
private int timeout = 30000;
@BeforeClass
public void init() {
ClassLoader classLoader = FileSourceLineModeTestCase.class.getClassLoader();
String rootPath = classLoader.getResource("files").getFile();
fileOptions = "USER_DIR_IS_ROOT:true,AVOID_PERMISSION_CHECK:true";
tempFTPSource = Utils.getFileObject(
"sftp://demo:demo@localhost:22/sftp/source/", fileOptions);
}
@BeforeMethod
public void doBeforeMethod() throws InterruptedException, FileSystemException {
count.set(0);
try {
tempFTPSource.delete(Selectors.SELECT_ALL);
tempFTPSource.createFolder();
} catch (IOException e) {
log.error(e.getMessage(), e);
}
}
@Test
public void fileSinkSourceTest1() throws InterruptedException {
log.info("test SiddhiIoFile Sink 1");
String streams = "" +
"@App:name('TestSiddhiApp')" +
"define stream FooStream (symbol string, price float, volume long); " +
"@sink(type='file', @map(type='json'), append='false', " +
"file.system.options='USER_DIR_IS_ROOT:false,AVOID_PERMISSION_CHECK:true'," +
"file.uri='sftp://demo:demo@localhost:22/sftp/source/published.json') " +
"define stream BarStream (symbol string, price float, volume long); ";
String query = "" +
"from FooStream " +
"select * " +
"insert into BarStream; ";
SiddhiManager siddhiManager = new SiddhiManager();
SiddhiAppRuntime siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(streams + query);
InputHandler stockStream = siddhiAppRuntime.getInputHandler("FooStream");
siddhiAppRuntime.start();
stockStream.send(new Object[]{"WSO2", 55.6f, 100L});
stockStream.send(new Object[]{"IBM", 57.678f, 200L});
Thread.sleep(100);
ArrayList<String> symbolNames = new ArrayList<>();
symbolNames.add("WSO2.json");
symbolNames.add("IBM.json");
symbolNames.add("GOOGLE.json");
symbolNames.add("REDHAT.json");
Thread.sleep(1000);
siddhiAppRuntime.shutdown();
streams = "" +
"@App:name('TestSiddhiApp')" +
"@source(type='file', mode='line'," +
"file.uri='sftp://demo:demo@localhost:22/sftp/source/published.json', " +
"action.after.process='keep', " +
"tailing='false', " +
"file.system.options='USER_DIR_IS_ROOT:false,AVOID_PERMISSION_CHECK:true'," +
"@map(type='json'))" +
"define stream FooStream (symbol string, price float, volume long); " +
"define stream BarStream (symbol string, price float, volume long); ";
query = "" +
"from FooStream " +
"select * " +
"insert into BarStream; ";
siddhiManager = new SiddhiManager();
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(streams + query);
siddhiAppRuntime.addCallback("BarStream", new StreamCallback() {
@Override
public void receive(Event[] events) {
EventPrinter.print(events);
int n = count.getAndIncrement() % 5;
for (Event event : events) {
switch (n) {
case 0:
AssertJUnit.assertEquals(200L, event.getData(2));
break;
default:
AssertJUnit.fail("More events received than expected.");
}
}
}
});
siddhiAppRuntime.start();
SiddhiTestHelper.waitForEvents(waitTime, 1, count, timeout);
//assert event count
AssertJUnit.assertEquals("Number of events", 1, count.get());
siddhiAppRuntime.shutdown();
}
@Test
public void fileDynamicSinkDirectorySourceTest() throws InterruptedException {
log.info("test SiddhiIoFile Sink for dynamic paths and reading from a directory");
String streams = "" +
"@App:name('TestSiddhiApp')" +
"define stream FooStream (symbol string, price float, fileName string); " +
"@sink(type='file', @map(type='json'), append='false', " +
"file.system.options='USER_DIR_IS_ROOT:false,AVOID_PERMISSION_CHECK:true'," +
"file.uri='sftp://demo:demo@localhost:22/sftp/source/{{fileName}}.json') " +
"define stream BarStream (symbol string, price float, fileName string); ";
String query = "" +
"from FooStream " +
"select * " +
"insert into BarStream; ";
SiddhiManager siddhiManager = new SiddhiManager();
SiddhiAppRuntime siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(streams + query);
InputHandler stockStream = siddhiAppRuntime.getInputHandler("FooStream");
siddhiAppRuntime.start();
stockStream.send(new Object[]{"WSO2", 55.6f, "file1"});
stockStream.send(new Object[]{"IBM", 57.678f, "file2"});
Thread.sleep(100);
Thread.sleep(1000);
siddhiAppRuntime.shutdown();
streams = "" +
"@App:name('TestSiddhiApp')" +
"@source(type='file', mode='line'," +
"dir.uri='sftp://demo:demo@localhost:22/sftp/source/', " +
"file.system.options='USER_DIR_IS_ROOT:false,AVOID_PERMISSION_CHECK:true'," +
"action.after.process='keep', " +
"tailing='false', " +
"@map(type='json'))" +
"define stream FooStream (symbol string, price float, fileName string); " +
"define stream BarStream (symbol string, price float, fileName string); ";
query = "" +
"from FooStream " +
"select * " +
"insert into BarStream; ";
siddhiManager = new SiddhiManager();
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(streams + query);
siddhiAppRuntime.addCallback("BarStream", new StreamCallback() {
@Override
public void receive(Event[] events) {
EventPrinter.print(events);
count.getAndIncrement();
}
});
siddhiAppRuntime.start();
SiddhiTestHelper.waitForEvents(waitTime, 2, count, timeout);
//assert event count
AssertJUnit.assertEquals("Number of events", 2, count.get());
siddhiAppRuntime.shutdown();
}
}
|
/*
* The MIT License
*
* Copyright (c) 2016 Marcelo "Ataxexe" Guimarães <ataxexe@devnull.tools>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package tools.devnull.boteco.message;
import tools.devnull.boteco.Builder;
import tools.devnull.boteco.Sendable;
import tools.devnull.boteco.SendableObject;
/**
* A class used to configure a message.
*/
public class MessageBuilder implements Builder<Sendable> {
private final String content;
private Priority priority = Priority.NORMAL;
private String title;
private String url;
private MessageBuilder(String content) {
this.content = content;
}
public MessageBuilder withPriority(Priority priority) {
this.priority = priority;
return this;
}
public MessageBuilder withTitle(String title) {
this.title = title;
return this;
}
public MessageBuilder withUrl(String url) {
this.url = url;
return this;
}
@Override
public Sendable build() {
return new SendableObject(content, title, url, priority);
}
public static MessageBuilder message(String content) {
return new MessageBuilder(content);
}
}
|
package io.pivotal.davos;
import java.time.LocalDate;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import lombok.Builder;
import lombok.Getter;
@Builder
@Getter
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonPropertyOrder({
"className",
"releaseDate",
"version",
"id"
})
public class ReleaseRef {
@JsonProperty("className")
private String className;
@JsonProperty("releaseDate")
private LocalDate releaseDate;
@JsonProperty("version")
private String version;
@JsonProperty("id")
private Integer id;
@JsonCreator
public ReleaseRef(
@JsonProperty("className") String className,
@JsonProperty("releaseDate") LocalDate releaseDate,
@JsonProperty("version") String version,
@JsonProperty("id") Integer id) {
this.className = className;
this.releaseDate = releaseDate;
this.version = version;
this.id = id;
}
}
|
package cn.wuxia.common.spring.orm.mongo;
import cn.wuxia.common.orm.query.Conditions;
import cn.wuxia.common.orm.query.MatchType;
import cn.wuxia.common.orm.query.Pages;
import cn.wuxia.common.orm.query.Sort.Order;
import cn.wuxia.common.util.ListUtil;
import cn.wuxia.common.util.StringUtil;
import cn.wuxia.common.util.reflection.ReflectionUtil;
import com.google.common.collect.Lists;
import com.mongodb.client.result.DeleteResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.annotation.Id;
import org.springframework.data.domain.Sort;
import org.springframework.data.domain.Sort.Direction;
import org.springframework.data.mongodb.core.MongoTemplate;
import org.springframework.data.mongodb.core.query.Criteria;
import org.springframework.data.mongodb.core.query.Query;
import org.springframework.data.mongodb.core.query.Update;
import java.beans.BeanInfo;
import java.beans.IntrospectionException;
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
import java.io.Serializable;
import java.lang.reflect.Method;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
public abstract class SpringDataMongoDao<T, K extends Serializable> {
protected Logger logger = LoggerFactory.getLogger(getClass());
protected MongoTemplate mongoTemplate;
/**
* 如非实体存储的时候需要制定collection name
*
* @return
*/
protected String collectionName;
public SpringDataMongoDao() {
}
public SpringDataMongoDao(final MongoTemplate mongoTemplate) {
this.mongoTemplate = mongoTemplate;
}
/**
* @param query
* @return
*/
public List<T> find(Query query) {
if (StringUtil.isBlank(collectionName)) {
return getMongoTemplate().find(query, this.getEntityClass());
} else {
return getMongoTemplate().find(query, this.getEntityClass(), collectionName);
}
}
/**
* 查找唯一
*
* @param query
* @return
*/
public T findUnique(Query query) {
if (StringUtil.isBlank(collectionName)) {
return getMongoTemplate().findOne(query, this.getEntityClass());
} else {
return getMongoTemplate().findOne(query, this.getEntityClass(), collectionName);
}
}
/**
* @param properties
* @param value
* @return
*/
public T findUniqueBy(final String properties, final Object value) {
Query query = new Query(Criteria.where(properties).is(value));
return this.findUnique(query);
}
/**
* @param conditions
* @return
*/
public T findUniqueBy(final Conditions... conditions) {
return this.findUnique(condition2Query(conditions));
}
/**
* @param properties
* @param value
* @return
*/
public List<T> findIn(final String properties, final Object... value) {
Query query = new Query(Criteria.where(properties).in(value));
return this.find(query);
}
/**
* 更新
*
* @param query
* @param update
*/
public void update(Query query, Update update) {
if (StringUtil.isBlank(collectionName)) {
getMongoTemplate().updateMulti(query, update, this.getEntityClass());
} else {
getMongoTemplate().updateMulti(query, update, this.getEntityClass(), collectionName);
}
}
/**
* 更新
*
* @param query
* @param update
*/
public T updateFirst(Query query, Update update) {
if (StringUtil.isBlank(collectionName)) {
return getMongoTemplate().findAndModify(query, update, this.getEntityClass());
} else {
return getMongoTemplate().findAndModify(query, update, this.getEntityClass(), collectionName);
}
}
/**
* 删除
*
* @param query
*/
public void delete(Query query) {
if (StringUtil.isBlank(collectionName)) {
getMongoTemplate().remove(query, this.getEntityClass());
} else {
getMongoTemplate().remove(query, this.getEntityClass(), collectionName);
}
}
/**
* 保存
*
* @param entity
* @return
*/
public void save(T entity) {
if (StringUtil.isBlank(collectionName)) {
getMongoTemplate().insert(entity);
} else {
getMongoTemplate().insert(entity, collectionName);
}
}
public void batchSave(Collection<T> entitys) {
if (ListUtil.isEmpty(entitys)) {
return;
}
if (StringUtil.isBlank(collectionName)) {
getMongoTemplate().insert(entitys, this.getEntityClass());
} else {
getMongoTemplate().insert(entitys, collectionName);
}
}
public void save(Map<String, ?> m) {
if (StringUtil.isBlank(collectionName)) {
getMongoTemplate().insert(m);
} else {
getMongoTemplate().insert(m, collectionName);
}
}
/**
* 删除对象
*
* @param entity
* @author songlin
*/
public void delete(T entity) {
if (StringUtil.isBlank(collectionName)) {
DeleteResult res = getMongoTemplate().remove(entity);
logger.info("", res);
} else {
DeleteResult res = getMongoTemplate().remove(entity, collectionName);
logger.info("", res);
}
}
public void deleteById(final K id) throws Exception {
if (StringUtil.isBlank(collectionName)) {
getMongoTemplate().remove(new Query().addCriteria(Criteria.where(getIdName()).is(id)), this.getEntityClass());
} else {
getMongoTemplate().remove(new Query().addCriteria(Criteria.where(getIdName()).is(id)), this.getEntityClass(), collectionName);
}
}
/**
* 根据id查找
*
* @param id
* @return
*/
public T findById(final K id) {
if (StringUtil.isBlank(collectionName)) {
return getMongoTemplate().findById(id, this.getEntityClass());
} else {
return getMongoTemplate().findById(id, this.getEntityClass(), collectionName);
}
}
/**
* 根据某个熟悉查找
*
* @param properties
* @param value
* @return
*/
public List<T> findBy(final String properties, final Object value) {
Query query = new Query(Criteria.where(properties).is(value));
return this.find(query);
}
/**
* 根据某个熟悉查找
*
* @param properties
* @param value
* @return
*/
public List<T> findBy(final String properties, final Object value, String orderby, Direction direction) {
Query query = new Query(Criteria.where(properties).is(value));
Sort sort = Sort.by(direction, orderby);
query.with(sort);
return this.find(query);
}
/**
* 分页查找
*
* @param page
* @return
*/
public Pages<T> findPage(Pages<T> page) {
return findPage(new Query(), page);
}
/**
* 分页查找
*
* @param query
* @param page
* @return
*/
public Pages<T> findPage(Query query, Pages<T> page) {
addCondition2Query(query, page.getConditions());
long count = this.count(query);
if (count <= 0) {
return page;
}
page.setTotalCount(count);
int pageNumber = page.getPageNo();
int pageSize = page.getPageSize();
if (pageSize != -1) {
query.skip((pageNumber - 1) * pageSize).limit(pageSize);
}
if (page.getSort() != null) {
Iterator<Order> iterator = page.getSort().iterator();
List<Sort.Order> orders = Lists.newLinkedList();
while (iterator.hasNext()) {
Order order = iterator.next();
if (order.isAscending()) {
orders.add(new Sort.Order(Direction.ASC, order.getProperty()));
} else {
orders.add(new Sort.Order(Direction.DESC, order.getProperty()));
}
}
query.with(Sort.by(orders));
}
List<T> rows = this.find(query);
page.setResult(rows);
return page;
}
/**
* 分页查找
*
* @param query
* @param conditions
* @return
*/
protected void addCondition2Query(Query query, List<Conditions> conditions) {
addCondition2Query(query, conditions.toArray(new Conditions[]{}));
}
/**
* 分页查找
*
* @param query
* @param conditions
* @return
*/
protected void addCondition2Query(Query query, Conditions... conditions) {
for (Conditions cond : conditions) {
/**
* 除了is null or is not null 条件外,其他条件必须带值
*/
if (cond.getMatchType() != MatchType.ISN && cond.getMatchType() != MatchType.INN) {
if (StringUtil.isBlank(cond.getValue())) {
logger.warn("condition: " + cond.getProperty() + " value is null, ignore this condition");
continue;
}
}
switch (cond.getMatchType()) {
case EQ:
query.addCriteria(Criteria.where(cond.getProperty()).is(cond.getValue()));
break;
case NE:
query.addCriteria(Criteria.where(cond.getProperty()).ne(cond.getValue()));
break;
case ISN:
query.addCriteria(Criteria.where(cond.getProperty()).is(null));
break;
case INN:
query.addCriteria(Criteria.where(cond.getProperty()).ne(null));
break;
case LL:
//左匹配
query.addCriteria(
Criteria.where(cond.getProperty()).regex(Pattern.compile("^.\"+cond.getValue()+\"*$", Pattern.CASE_INSENSITIVE)));
break;
case RL:
//右匹配
query.addCriteria(
Criteria.where(cond.getProperty()).regex(Pattern.compile("^.*\"+cond.getValue()+\"$", Pattern.CASE_INSENSITIVE)));
break;
case FL:
//模糊匹配
query.addCriteria(
Criteria.where(cond.getProperty()).regex(Pattern.compile("^.*" + cond.getValue() + ".*$", Pattern.CASE_INSENSITIVE)));
break;
case NL:
break;
case LT:
query.addCriteria(Criteria.where(cond.getProperty()).lt(cond.getValue()));
break;
case GT:
query.addCriteria(Criteria.where(cond.getProperty()).gt(cond.getValue()));
break;
case GTE:
query.addCriteria(Criteria.where(cond.getProperty()).gte(cond.getValue()));
break;
case LTE:
query.addCriteria(Criteria.where(cond.getProperty()).lte(cond.getValue()));
break;
case IN:
query.addCriteria(Criteria.where(cond.getProperty()).in(cond.getValue()));
break;
case NIN:
query.addCriteria(Criteria.where(cond.getProperty()).nin(cond.getValue()));
break;
case BW:
query.addCriteria(Criteria.where(cond.getProperty()).gte(cond.getValue()).lte(cond.getAnotherValue()));
break;
default:
break;
}
}
}
/**
* 将condition转换为mongodb query
*
* @param conditions
* @return
*/
protected Query condition2Query(Conditions... conditions) {
Query query = new Query();
addCondition2Query(query, conditions);
return query;
}
/**
* 统计总数
*
* @param query
* @return
*/
protected long count(Query query) {
if (StringUtil.isBlank(collectionName)) {
return getMongoTemplate().count(query, this.getEntityClass());
} else {
return getMongoTemplate().count(query, this.getEntityClass(), collectionName);
}
}
/**
* 获取需要操作的实体类class
*
* @return
*/
protected Class<T> getEntityClass() {
return ReflectionUtil.getSuperClassGenricType(getClass());
}
/**
* 可以重写注入
*
* @param mongoTemplate
*/
@Autowired
public void setMongoTemplate(MongoTemplate mongoTemplate) {
this.mongoTemplate = mongoTemplate;
}
public MongoTemplate getMongoTemplate() {
return mongoTemplate;
}
public String getCollectionName() {
return collectionName;
}
public void setCollectionName(String collectionName) {
this.collectionName = collectionName;
}
public String getIdName() throws IntrospectionException {
BeanInfo beanInfo = Introspector.getBeanInfo(this.getEntityClass());
PropertyDescriptor[] properties = beanInfo.getPropertyDescriptors();
String idName = null;
for (int i = 0; i < properties.length; i++) {
Method get = properties[i].getReadMethod();
if (get.getAnnotation(Id.class) == null) {
continue;
}
idName = properties[i].getName();
break;
}
return idName;
}
}
|
package com.raoulvdberge.refinedstorage.apiimpl.network.node.diskdrive;
import com.raoulvdberge.refinedstorage.api.storage.AccessType;
import com.raoulvdberge.refinedstorage.api.storage.IStorageDisk;
import com.raoulvdberge.refinedstorage.api.storage.StorageDiskType;
import com.raoulvdberge.refinedstorage.tile.TileDiskDrive;
import com.raoulvdberge.refinedstorage.tile.config.IFilterable;
import com.raoulvdberge.refinedstorage.util.StackUtils;
import com.raoulvdberge.refinedstorage.util.WorldUtils;
import net.minecraft.item.ItemStack;
import net.minecraftforge.fluids.FluidStack;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Collection;
import java.util.function.Supplier;
public class StorageFluidDiskDrive implements IStorageDisk<FluidStack> {
private NetworkNodeDiskDrive diskDrive;
private IStorageDisk<FluidStack> parent;
private int lastState;
public StorageFluidDiskDrive(NetworkNodeDiskDrive diskDrive, IStorageDisk<FluidStack> parent) {
this.diskDrive = diskDrive;
this.parent = parent;
this.onPassContainerContext(
() -> {
diskDrive.markDirty();
int currentState = TileDiskDrive.getDiskState(getStored(), getCapacity());
if (lastState != currentState) {
lastState = currentState;
WorldUtils.updateBlock(diskDrive.getWorld(), diskDrive.getPos());
}
},
diskDrive::getVoidExcess,
diskDrive::getAccessType
);
this.lastState = TileDiskDrive.getDiskState(getStored(), getCapacity());
}
@Override
public int getInsertPriority() {
return diskDrive.getInsertPriority();
}
@Override
public int getExtractPriority() {
return diskDrive.getExtractPriority();
}
@Override
public AccessType getAccessType() {
return parent.getAccessType();
}
@Override
public Collection<FluidStack> getStacks() {
return parent.getStacks();
}
@Override
@Nullable
public FluidStack insert(@Nonnull FluidStack stack, int size, boolean simulate) {
if (!IFilterable.canTakeFluids(diskDrive.getFluidFilters(), diskDrive.getMode(), diskDrive.getCompare(), stack)) {
return StackUtils.copy(stack, size);
}
return parent.insert(stack, size, simulate);
}
@Nullable
@Override
public FluidStack extract(@Nonnull FluidStack stack, int size, int flags, boolean simulate) {
return parent.extract(stack, size, flags, simulate);
}
@Override
public int getStored() {
return parent.getStored();
}
@Override
public int getCacheDelta(int storedPreInsertion, int size, @Nullable FluidStack remainder) {
return parent.getCacheDelta(storedPreInsertion, size, remainder);
}
@Override
public int getCapacity() {
return parent.getCapacity();
}
@Override
public boolean isValid(ItemStack stack) {
return parent.isValid(stack);
}
@Override
public void onPassContainerContext(Runnable listener, Supplier<Boolean> voidExcess, Supplier<AccessType> accessType) {
parent.onPassContainerContext(listener, voidExcess, accessType);
}
@Override
public void readFromNBT() {
parent.readFromNBT();
}
@Override
public void writeToNBT() {
parent.writeToNBT();
}
@Override
public StorageDiskType getType() {
return parent.getType();
}
}
|
/*
* This file is generated by jOOQ.
*/
package cn.edu.kmust.flst.domain.information_schema.tables.pojos;
import java.io.Serializable;
import javax.annotation.Generated;
import javax.validation.constraints.Size;
/**
* This class is generated by jOOQ.
*/
@Generated(
value = {
"http://www.jooq.org",
"jOOQ version:3.10.7"
},
comments = "This class is generated by jOOQ"
)
@SuppressWarnings({ "all", "unchecked", "rawtypes" })
public class Help implements Serializable {
private static final long serialVersionUID = 1653470859;
private Integer id;
private String section;
private String topic;
private String syntax;
private String text;
public Help() {}
public Help(Help value) {
this.id = value.id;
this.section = value.section;
this.topic = value.topic;
this.syntax = value.syntax;
this.text = value.text;
}
public Help(
Integer id,
String section,
String topic,
String syntax,
String text
) {
this.id = id;
this.section = section;
this.topic = topic;
this.syntax = syntax;
this.text = text;
}
public Integer getId() {
return this.id;
}
public void setId(Integer id) {
this.id = id;
}
@Size(max = 2147483647)
public String getSection() {
return this.section;
}
public void setSection(String section) {
this.section = section;
}
@Size(max = 2147483647)
public String getTopic() {
return this.topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
@Size(max = 2147483647)
public String getSyntax() {
return this.syntax;
}
public void setSyntax(String syntax) {
this.syntax = syntax;
}
@Size(max = 2147483647)
public String getText() {
return this.text;
}
public void setText(String text) {
this.text = text;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Help (");
sb.append(id);
sb.append(", ").append(section);
sb.append(", ").append(topic);
sb.append(", ").append(syntax);
sb.append(", ").append(text);
sb.append(")");
return sb.toString();
}
}
|
package com.github.overmighty.croissant.command.argument;
import com.github.overmighty.croissant.command.CommandExecutor;
import com.github.overmighty.croissant.command.TestCommand;
import org.bukkit.command.CommandSender;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.util.Collections;
import java.util.List;
import java.util.stream.Stream;
class RestTest extends TestCommand {
RestTest() {
super("rest");
}
@CommandExecutor
void run(CommandSender sender, @Rest String message) {
super.parsedArgs = new Object[] { message };
}
@ParameterizedTest(name = "Test the parsing of @Rest arguments ({index}/4)")
@MethodSource
void testRestArgumentsParsing(String[] args, Object[] expected) {
super.execute(args);
Assertions.assertArrayEquals(expected, super.parsedArgs);
}
@SuppressWarnings("unused")
static Stream<Arguments> testRestArgumentsParsing() {
return Stream.of(
Arguments.arguments(
new String[0],
null
),
Arguments.arguments(
new String[] { "test" },
new Object[] { "test" }
),
Arguments.arguments(
new String[] { "Hello,", "World!" },
new Object[] { "Hello, World!" }
),
Arguments.arguments(
new String[] { "Yet", "another", "JUnit", "test" },
new Object[] { "Yet another JUnit test" }
)
);
}
@ParameterizedTest(name = "Test the tab-completion of @Rest arguments ({index}/3)")
@MethodSource
void testRestArgumentsCompletion(String[] args, List<String> expected) {
Assertions.assertEquals(expected, super.tabComplete(args));
}
@SuppressWarnings("unused")
static Stream<Arguments> testRestArgumentsCompletion() {
return Stream.of(
Arguments.arguments(
new String[] { "" },
Collections.singletonList("OverMighty")
),
Arguments.arguments(
new String[] { "test1", "" },
Collections.singletonList("OverMighty")
),
Arguments.arguments(
new String[] { "test1", "test2", "" },
Collections.singletonList("OverMighty")
)
);
}
}
|
package org.appenders.log4j2.elasticsearch.failover;
/*-
* #%L
* log4j2-elasticsearch
* %%
* Copyright (C) 2020 Rafal Foltynski
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import net.openhft.chronicle.map.ChronicleMap;
import org.appenders.log4j2.elasticsearch.ItemSource;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import java.util.Map;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
public class ChronicleMapProxyTest {
@Test
public void putDelegatesDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
CharSequence key = mock(CharSequence.class);
ItemSource value = mock(ItemSource.class);
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.put(key, value);
// then
Mockito.verify(chronicleMap).put(eq(key), eq(value));
}
@Test
public void removeDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
CharSequence key = mock(CharSequence.class);
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.remove(key);
// then
Mockito.verify(chronicleMap).remove(eq(key));
}
@Test
public void putAllDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
Map<CharSequence, ItemSource> map = mock(Map.class);
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.putAll(map);
// then
Mockito.verify(chronicleMap).putAll(eq(map));
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@Test
public void containsKeyDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
CharSequence key = mock(CharSequence.class);
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.containsKey(key);
// then
Mockito.verify(chronicleMap).containsKey(eq(key));
}
@Test
public void getDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
CharSequence key = mock(CharSequence.class);
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.get(key);
// then
Mockito.verify(chronicleMap).get(eq(key));
}
@Test
public void clearDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.clear();
// then
Mockito.verify(chronicleMap).clear();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@Test
public void keySetDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.keySet();
// then
Mockito.verify(chronicleMap).keySet();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@Test
public void valuesDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.values();
// then
Mockito.verify(chronicleMap).values();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@Test
public void entrySetDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.entrySet();
// then
Mockito.verify(chronicleMap).entrySet();
}
@Test
public void sizeDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.size();
// then
Mockito.verify(chronicleMap).size();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@Test
public void isEmptyDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.isEmpty();
// then
Mockito.verify(chronicleMap).isEmpty();
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@Test
public void containsValueDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
ItemSource value = mock(ItemSource.class);
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.containsValue(value);
// then
Mockito.verify(chronicleMap).containsValue(eq(value));
}
@Test
public void closeDelegates() {
// given
ChronicleMap<CharSequence, ItemSource> chronicleMap = createDefaultTestChronicleMap();
ChronicleMapProxy proxy = createDefaultTestProxy(chronicleMap);
// when
proxy.close();
// then
Mockito.verify(chronicleMap).close();
}
private ChronicleMapProxy createDefaultTestProxy(ChronicleMap<CharSequence, ItemSource> chronicleMap) {
return new ChronicleMapProxy(chronicleMap);
}
private ChronicleMap<CharSequence, ItemSource> createDefaultTestChronicleMap() {
return mock(ChronicleMap.class);
}
}
|
/*
* Marketing API Vivo商业开放平台
* Marketing API Vivo商业开放平台
*
* Do not edit the class manually.
*/
package com.hyq0719.mktapi.vivo.bean.acoountService;
import com.google.gson.Gson;
import com.google.gson.annotations.SerializedName;
import lombok.Data;
import java.math.BigDecimal;
;
;
;
/**
*
* @author hyq0719
* @email yueqi.huang@qq.com
* @date 2022-02-07 23:52:23
*/
@Data
public class FinanceFundsQueryFoundsListStruct {
/**
* 记录产生时间-毫秒级时间戳
*/
@SerializedName("date")
private String date = null;
/**
* 对应请求参数type,账号类型
*/
@SerializedName("accountType")
private String accountType = null;
/**
* 收入金额
*/
@SerializedName("income")
private BigDecimal income = null;
/**
* 支出金额
*/
@SerializedName("expense")
private BigDecimal expense = null;
/**
* 余额-在发生收入/支出后的余额
*/
@SerializedName("balance")
private BigDecimal balance = null;
public FinanceFundsQueryFoundsListStruct date (String date) {
this.date = date;
return this;
}
public FinanceFundsQueryFoundsListStruct accountType (String accountType) {
this.accountType = accountType;
return this;
}
public FinanceFundsQueryFoundsListStruct income (BigDecimal income) {
this.income = income;
return this;
}
public FinanceFundsQueryFoundsListStruct expense (BigDecimal expense) {
this.expense = expense;
return this;
}
public FinanceFundsQueryFoundsListStruct balance (BigDecimal balance) {
this.balance = balance;
return this;
}
@Override
public String toString() {
Gson gson = new Gson();
return gson.toJson(this);
}
}
|
// "Add on demand static import for 'java.util.Arrays'" "true"
import java.util.*;
import static java.util.Arrays.*;
class Foo {
void test(String[] foos, String[] bars) {
System.out.println(/*foos1*//*foos2*/asList(foos)+":"+/*bars1*//*bars2*/asList(bars));
}
void test2(String[] foos, String[] bars) {
System.out.println(/*foos0*//*foos1*//*foos2*/asList(foos)+":"+/*bars0*//*bars1*//*bars2*/asList(bars));
}
void test3(String[] foos, String[] bars) {
System.out.println(//line comment
asList(foos)+":"+//line comment
asList(bars));
}
}
|
package org.junit.contrib.tests.theories.runner;
import org.junit.Test;
import org.junit.contrib.theories.DataPoints;
import org.junit.contrib.theories.Theories;
import org.junit.contrib.theories.Theory;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
import static org.junit.Assume.*;
import static org.junit.experimental.results.PrintableResult.*;
import static org.junit.experimental.results.ResultMatchers.*;
public class TheoriesPerformanceTest {
@RunWith(Theories.class)
public static class UpToTen {
@DataPoints public static final int[] ints = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
@Theory public void threeInts(int x, int y, int z) {
// pass always
}
}
private static final boolean TESTING_PERFORMANCE = true;
// If we do not share the same instance of TestClass, repeatedly parsing the
// class's annotations looking for @Befores and @Afters gets really costly.
//
// Likewise, the TestClass must be passed into AllMembersSupplier, or the
// annotation parsing is again costly.
@Test public void tryCombinationsQuickly() {
assumeTrue(TESTING_PERFORMANCE);
assertThat(testResult(UpToTen.class), isSuccessful());
}
}
|
package com.atguigu.gmall.cart.service;
import com.alibaba.fastjson.JSON;
import com.atguigu.core.bean.Resp;
import com.atguigu.gmall.cart.feign.GmallPmsClient;
import com.atguigu.gmall.cart.feign.GmallSmsClient;
import com.atguigu.gmall.cart.feign.GmallWmsClient;
import com.atguigu.gmall.cart.interceptors.LoginInterceptor;
import com.atguigu.gmall.cart.po.Cart;
import com.atguigu.gmall.cart.po.UserInfo;
import com.atguigu.gmall.pms.entity.SkuInfoEntity;
import com.atguigu.gmall.pms.entity.SkuSaleAttrValueEntity;
import com.atguigu.gmall.sms.vo.SaleVO;
import com.atguigu.gmall.wms.entity.WareSkuEntity;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.BoundHashOperations;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import javax.security.auth.login.AccountException;
import java.math.BigDecimal;
import java.util.List;
import java.util.stream.Collectors;
/**
* @author lzc
* @create 2019-12-17 19:28
*/
@Service
public class CartService {
private static final String KEY_PREFIX ="gmall:cart:";
private static final String PRICE_PREFIX ="gmall:sku:";
@Autowired
private StringRedisTemplate redisTemplate;
@Autowired
private GmallPmsClient pmsClient;
@Autowired
private GmallWmsClient wmsClient;
@Autowired
private GmallSmsClient smsClient;
public void addCart(Cart cart) {
String key = getLoginState();
// 获取购物车,获取的是用户的hash操作对象
BoundHashOperations<String, Object, Object> hashOps = this.redisTemplate.boundHashOps(key);
String skuId = cart.getSkuId().toString();
Integer count = cart.getCount();
// 判断购物车中是否有该记录
if(hashOps.hasKey(skuId)){
//有 则更新数量
// 获取购物车中的sku记录
String cartJson = hashOps.get(skuId).toString();
// 发序列化,更新数量
cart = JSON.parseObject(cartJson, Cart.class);
cart.setCount(cart.getCount() + count);
} else {
// 没有 新增购物车记录
cart.setCheck(true);
// 查询sku相关信息
Resp<SkuInfoEntity> skuInfoEntityResp = this.pmsClient.querySkuById(cart.getSkuId());
SkuInfoEntity skuInfoEntity = skuInfoEntityResp.getData();
if (skuInfoEntity == null) {
return ;
}
cart.setDefaultImage(skuInfoEntity.getSkuDefaultImg());
cart.setPrice(skuInfoEntity.getPrice());
cart.setTitle(skuInfoEntity.getSkuTitle());
// 查询营销属性
Resp<List<SkuSaleAttrValueEntity>> listResp = this.pmsClient.querySkuSaleAttrValuesBySkuId(cart.getSkuId());
List<SkuSaleAttrValueEntity> saleAttrValueEntities = listResp.getData();
cart.setSaleAttrValues(saleAttrValueEntities);
// 查询营销信息
Resp<List<SaleVO>> saleResp = this.smsClient.querySalesBySkuId(cart.getSkuId());
List<SaleVO> saleVOS = saleResp.getData();
cart.setSales(saleVOS);
// 查询库存信息
Resp<List<WareSkuEntity>> wareResp = this.wmsClient.queryWareSkusBySkuId(cart.getSkuId());
List<WareSkuEntity> wareSkuEntities = wareResp.getData();
if (!CollectionUtils.isEmpty(wareSkuEntities)) {
cart.setStore(wareSkuEntities.stream().anyMatch(wareSkuEntity -> wareSkuEntity.getStock() > 0));
}
this.redisTemplate.opsForValue().set(PRICE_PREFIX + skuId,skuInfoEntity.getPrice().toString());
}
hashOps.put(skuId, JSON.toJSONString(cart));
}
private String getLoginState() {
String key = KEY_PREFIX;
// 获取登录状态
UserInfo userInfo = LoginInterceptor.getUserInfo();
if (userInfo.getId() != null) {
key += userInfo.getId();
} else {
key += userInfo.getUserKey();
}
return key;
}
public List<Cart> queryCarts() {
//获取登录状态
UserInfo userInfo = LoginInterceptor.getUserInfo();
//查询未登录的购物车
String unLoginKey = KEY_PREFIX + userInfo.getUserKey();
BoundHashOperations<String, Object, Object> unLoginHashOps = this.redisTemplate.boundHashOps(unLoginKey);
List<Object> cartJsonList = unLoginHashOps.values();
List<Cart> unLoginCarts = null;
if (!CollectionUtils.isEmpty(cartJsonList)) {
unLoginCarts = cartJsonList.stream().map(cartJson -> {
Cart cart = JSON.parseObject(cartJson.toString(), Cart.class);
//查询当前价格
String priceString = this.redisTemplate.opsForValue().get(PRICE_PREFIX + cart.getSkuId());
cart.setCurrentPrice(new BigDecimal(priceString));
return cart;
}).collect(Collectors.toList());
}
//判断是否登录,未登录,直接返回
if (userInfo.getId() == null) {
return unLoginCarts;
}
//登录,购物车同步
String loginKey = KEY_PREFIX + userInfo.getId();
BoundHashOperations<String, Object, Object> loginHashOps = this.redisTemplate.boundHashOps(loginKey);
if (!CollectionUtils.isEmpty(unLoginCarts)) {
unLoginCarts.forEach(cart -> {
Integer count = cart.getCount();
if (loginHashOps.hasKey(cart.getSkuId().toString())){
String cartJson = loginHashOps.get(cart.getSkuId().toString()).toString();
cart = JSON.parseObject(cartJson, Cart.class);
cart.setCount(cart.getCount()+ count);
}
loginHashOps.put(cart.getSkuId().toString(),JSON.toJSONString(cart));
});
//删除未登录状态购物车
this.redisTemplate.delete(unLoginKey);
}
//查询登录状态的购物车
List<Object> loginCartJsonList = loginHashOps.values();
return loginCartJsonList.stream().map(cartJson -> {
Cart cart = JSON.parseObject(cartJson.toString(), Cart.class);
String priceString = this.redisTemplate.opsForValue().get(PRICE_PREFIX + cart.getSkuId());
cart.setCurrentPrice(new BigDecimal(priceString));
return cart;
}).collect(Collectors.toList());
}
public void updateCart(Cart cart) {
String key = this.getLoginState();
//获取购物车
BoundHashOperations<String, Object, Object> boundHashOps = this.redisTemplate.boundHashOps(key);
Integer count = cart.getCount();
//判断更新的这条记录,在购物车中有没有
if (boundHashOps.hasKey(cart.getSkuId().toString())) {
String cartJson = boundHashOps.get(cart.getSkuId().toString()).toString();
cart = JSON.parseObject(cartJson, Cart.class);
cart.setCount(count);
boundHashOps.put(cart.getSkuId().toString(),JSON.toJSONString(cart));
}
}
public void deleteCart(Long skuId) {
//获取登录信息
UserInfo userInfo = LoginInterceptor.getUserInfo();
//获取redis中的key
String key = KEY_PREFIX;
if (userInfo.getId() == null) {
key += userInfo.getUserKey();
} else {
key += userInfo.getId();
}
BoundHashOperations<String, Object, Object> hashOperations = this.redisTemplate.boundHashOps(key);
hashOperations.delete(skuId.toString());
}
}
|
package mpern.sap.commerce.ccv2.validation.impl;
import static mpern.sap.commerce.ccv2.model.Aspect.ADMIN_ASPECT;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import mpern.sap.commerce.ccv2.model.Aspect;
import mpern.sap.commerce.ccv2.model.Manifest;
import mpern.sap.commerce.ccv2.model.Webapp;
import mpern.sap.commerce.ccv2.validation.Error;
import mpern.sap.commerce.ccv2.validation.ExtensionValidator;
import mpern.sap.commerce.ccv2.validation.ExtensionsResolver;
public class AspectWebappValidator extends ExtensionValidator {
public AspectWebappValidator(ExtensionsResolver extensionsResolver) {
super(extensionsResolver);
}
@Override
protected List<Error> validateWithExtensions(Manifest manifest, ExtensionsResolver.Result effectiveExtensions) {
List<Error> errors = new ArrayList<>();
Set<String> extensionNames = effectiveExtensions.extensions.stream().map(e -> e.name)
.collect(Collectors.toSet());
for (int i = 0; i < manifest.aspects.size(); i++) {
Aspect aspect = manifest.aspects.get(i);
if (ADMIN_ASPECT.equals(aspect.name)) {
continue;
}
for (int j = 0; j < aspect.webapps.size(); j++) {
Webapp w = aspect.webapps.get(j);
// extension does not exist / not loaded
if (!extensionNames.contains(w.name)) {
errors.add(
new Error.Builder().setLocation("aspects[?name == '%s'].webapps[%d]", aspect.name, i)
.setMessage("Extension `%s` not available.\n%s", w.name,
formatLocations(effectiveExtensions.locations))
.setCode("E-001").createError());
}
}
}
return errors;
}
}
|
package com.strakswallet.presenter.activities;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.content.res.Resources;
import android.os.Bundle;
import android.support.v4.app.FragmentPagerAdapter;
import android.support.v4.content.ContextCompat;
import android.support.v4.view.ViewPager;
import android.util.SparseArray;
import android.util.TypedValue;
import android.view.View;
import android.view.WindowManager;
import android.widget.Button;
import android.widget.ImageButton;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.strakswallet.R;
import com.strakswallet.presenter.activities.util.BRActivity;
import com.strakswallet.presenter.customviews.BRDialogView;
import com.strakswallet.presenter.fragments.FragmentPhraseWord;
import com.strakswallet.tools.animation.BRAnimator;
import com.strakswallet.tools.animation.BRDialog;
import com.strakswallet.tools.manager.BRReportsManager;
import com.strakswallet.tools.security.PostAuth;
import com.strakswallet.tools.util.Utils;
import java.util.Locale;
public class PaperKeyActivity extends BRActivity {
private static final String TAG = PaperKeyActivity.class.getName();
private ViewPager wordViewPager;
private Button nextButton;
private Button previousButton;
private LinearLayout buttonsLayout;
private TextView itemIndexText;
private SparseArray<String> wordMap;
public static boolean appVisible = false;
private static PaperKeyActivity app;
private ImageButton close;
public static PaperKeyActivity getApp() {
return app;
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_paper_key);
getWindow().setFlags(WindowManager.LayoutParams.FLAG_SECURE, WindowManager.LayoutParams.FLAG_SECURE);
wordViewPager = (ViewPager) findViewById(R.id.phrase_words_pager);
wordViewPager.setOnPageChangeListener(new ViewPager.OnPageChangeListener() {
public void onPageScrollStateChanged(int state) {
}
public void onPageScrolled(int position, float positionOffset, int positionOffsetPixels) {
}
public void onPageSelected(int position) {
if (position == 0)
setButtonEnabled(false);
else
setButtonEnabled(true);
updateItemIndexText();
}
});
nextButton = (Button) findViewById(R.id.send_button);
previousButton = (Button) findViewById(R.id.button_previous);
close = (ImageButton) findViewById(R.id.close_button);
itemIndexText = (TextView) findViewById(R.id.item_index_text);
buttonsLayout = (LinearLayout) findViewById(R.id.buttons_layout);
nextButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
updateWordView(true);
}
});
close.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (!BRAnimator.isClickAllowed()) return;
BRAnimator.startBreadActivity(PaperKeyActivity.this, true);
if (!isDestroyed()) finish();
}
});
previousButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
updateWordView(false);
}
});
String cleanPhrase = getIntent().getExtras() == null ? null : getIntent().getStringExtra("phrase");
wordMap = new SparseArray<>();
if (Utils.isNullOrEmpty(cleanPhrase)) {
throw new RuntimeException(TAG + ": cleanPhrase is null");
}
String wordArray[] = cleanPhrase.split(" ");
if (cleanPhrase.charAt(cleanPhrase.length() - 1) == '\0') {
BRDialog.showCustomDialog(this, getString(R.string.JailbreakWarnings_title),
getString(R.string.Alert_keystore_generic_android), getString(R.string.Button_ok), null, new BRDialogView.BROnClickListener() {
@Override
public void onClick(BRDialogView brDialogView) {
brDialogView.dismissWithAnimation();
}
}, null, null, 0);
BRReportsManager.reportBug(new IllegalArgumentException("Paper Key error, please contact support at breadwallet.com: " + wordArray.length), true);
} else {
if (wordArray.length != 12) {
BRReportsManager.reportBug(new IllegalArgumentException("Wrong number of paper keys: " + wordArray.length + ", lang: " + Locale.getDefault().getLanguage()), true);
}
WordPagerAdapter adapter = new WordPagerAdapter(getSupportFragmentManager());
adapter.setWords(wordArray);
wordViewPager.setAdapter(adapter);
for (int i = 0; i < wordArray.length; i++) {
wordMap.append(i, wordArray[i]);
}
updateItemIndexText();
}
}
private void updateWordView(boolean isNext) {
int currentIndex = wordViewPager.getCurrentItem();
if (isNext) {
setButtonEnabled(true);
if (currentIndex >= 11) {
PostAuth.getInstance().onPhraseProveAuth(this, false);
} else {
wordViewPager.setCurrentItem(currentIndex + 1);
}
} else {
if (currentIndex <= 1) {
wordViewPager.setCurrentItem(currentIndex - 1);
setButtonEnabled(false);
} else {
wordViewPager.setCurrentItem(currentIndex - 1);
}
}
}
private void setButtonEnabled(boolean b) {
previousButton.setTextColor(ContextCompat.getColor(getBaseContext(),b ? R.color.light_gray : R.color.extra_light_gray));
Resources r = getResources();
float px = TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, b ? 8 : 0, r.getDisplayMetrics());
previousButton.setElevation(px);
previousButton.setEnabled(b);
}
@Override
protected void onResume() {
super.onResume();
appVisible = true;
app = this;
}
@Override
protected void onPause() {
super.onPause();
appVisible = false;
}
private void updateItemIndexText() {
String text = String.format(Locale.getDefault(), getString(R.string.WritePaperPhrase_step), wordViewPager.getCurrentItem() + 1, wordMap.size());
itemIndexText.setText(text);
}
@Override
public void onBackPressed() {
super.onBackPressed();
overridePendingTransition(R.anim.enter_from_right, R.anim.exit_to_left);
}
private class WordPagerAdapter extends FragmentPagerAdapter {
private String[] words;
public WordPagerAdapter(FragmentManager fm) {
super(fm);
}
public void setWords(String[] words) {
this.words = words;
}
@Override
public Fragment getItem(int pos) {
return FragmentPhraseWord.newInstance(words[pos]);
}
@Override
public int getCount() {
return words == null ? 0 : words.length;
}
}
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alipay.sofa.registry.server.session.bootstrap;
import com.alipay.sofa.registry.common.model.wrapper.WrapperInterceptor;
import com.alipay.sofa.registry.jdbc.config.JdbcConfiguration;
import com.alipay.sofa.registry.jraft.config.RaftConfiguration;
import com.alipay.sofa.registry.remoting.bolt.exchange.BoltExchange;
import com.alipay.sofa.registry.remoting.exchange.Exchange;
import com.alipay.sofa.registry.remoting.exchange.NodeExchanger;
import com.alipay.sofa.registry.remoting.jersey.exchange.JerseyExchange;
import com.alipay.sofa.registry.server.session.acceptor.WriteDataAcceptor;
import com.alipay.sofa.registry.server.session.acceptor.WriteDataAcceptorImpl;
import com.alipay.sofa.registry.server.session.cache.CacheGenerator;
import com.alipay.sofa.registry.server.session.cache.CacheService;
import com.alipay.sofa.registry.server.session.cache.DatumCacheGenerator;
import com.alipay.sofa.registry.server.session.cache.SessionCacheService;
import com.alipay.sofa.registry.server.session.circuit.breaker.CircuitBreakerService;
import com.alipay.sofa.registry.server.session.circuit.breaker.DefaultCircuitBreakerService;
import com.alipay.sofa.registry.server.session.client.manager.CheckClientManagerService;
import com.alipay.sofa.registry.server.session.connections.ConnectionsService;
import com.alipay.sofa.registry.server.session.filter.IPMatchStrategy;
import com.alipay.sofa.registry.server.session.filter.ProcessFilter;
import com.alipay.sofa.registry.server.session.filter.blacklist.BlacklistMatchProcessFilter;
import com.alipay.sofa.registry.server.session.filter.blacklist.DefaultIPMatchStrategy;
import com.alipay.sofa.registry.server.session.limit.AccessLimitService;
import com.alipay.sofa.registry.server.session.limit.AccessLimitServiceImpl;
import com.alipay.sofa.registry.server.session.mapper.ConnectionMapper;
import com.alipay.sofa.registry.server.session.metadata.AppRevisionCacheRegistry;
import com.alipay.sofa.registry.server.session.metadata.AppRevisionHeartbeatRegistry;
import com.alipay.sofa.registry.server.session.node.service.*;
import com.alipay.sofa.registry.server.session.providedata.*;
import com.alipay.sofa.registry.server.session.push.*;
import com.alipay.sofa.registry.server.session.registry.Registry;
import com.alipay.sofa.registry.server.session.registry.SessionRegistry;
import com.alipay.sofa.registry.server.session.remoting.ClientNodeExchanger;
import com.alipay.sofa.registry.server.session.remoting.DataNodeExchanger;
import com.alipay.sofa.registry.server.session.remoting.DataNodeNotifyExchanger;
import com.alipay.sofa.registry.server.session.remoting.console.SessionConsoleExchanger;
import com.alipay.sofa.registry.server.session.remoting.console.handler.*;
import com.alipay.sofa.registry.server.session.remoting.handler.*;
import com.alipay.sofa.registry.server.session.resource.*;
import com.alipay.sofa.registry.server.session.scheduler.timertask.CacheCountTask;
import com.alipay.sofa.registry.server.session.scheduler.timertask.SessionCacheDigestTask;
import com.alipay.sofa.registry.server.session.scheduler.timertask.SyncClientsHeartbeatTask;
import com.alipay.sofa.registry.server.session.slot.SlotTableCache;
import com.alipay.sofa.registry.server.session.slot.SlotTableCacheImpl;
import com.alipay.sofa.registry.server.session.store.*;
import com.alipay.sofa.registry.server.session.strategy.*;
import com.alipay.sofa.registry.server.session.strategy.impl.*;
import com.alipay.sofa.registry.server.session.wrapper.*;
import com.alipay.sofa.registry.server.shared.client.manager.BaseClientManagerService;
import com.alipay.sofa.registry.server.shared.client.manager.ClientManagerService;
import com.alipay.sofa.registry.server.shared.meta.MetaServerManager;
import com.alipay.sofa.registry.server.shared.meta.MetaServerService;
import com.alipay.sofa.registry.server.shared.providedata.FetchSystemPropertyService;
import com.alipay.sofa.registry.server.shared.providedata.ProvideDataProcessor;
import com.alipay.sofa.registry.server.shared.providedata.SystemPropertyProcessorManager;
import com.alipay.sofa.registry.server.shared.remoting.AbstractClientHandler;
import com.alipay.sofa.registry.server.shared.remoting.AbstractServerHandler;
import com.alipay.sofa.registry.server.shared.remoting.SlotTableChangeEventHandler;
import com.alipay.sofa.registry.server.shared.resource.MetricsResource;
import com.alipay.sofa.registry.server.shared.resource.RegistryOpsResource;
import com.alipay.sofa.registry.server.shared.resource.SlotGenericResource;
import com.alipay.sofa.registry.server.shared.resource.VersionResource;
import com.alipay.sofa.registry.server.shared.slot.DiskSlotTableRecorder;
import com.alipay.sofa.registry.store.api.config.StoreApiConfiguration;
import com.alipay.sofa.registry.task.MetricsableThreadPoolExecutor;
import com.alipay.sofa.registry.util.NamedThreadFactory;
import com.alipay.sofa.registry.util.PropertySplitter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.glassfish.jersey.jackson.JacksonFeature;
import org.glassfish.jersey.server.ResourceConfig;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
/**
* @author shangyu.wh
* @version $Id: SessionServerConfiguration.java, v 0.1 2017-11-14 11:39 synex Exp $
*/
@Configuration
@Import({
SessionServerInitializer.class,
StoreApiConfiguration.class,
JdbcConfiguration.class,
RaftConfiguration.class
})
@EnableConfigurationProperties
public class SessionServerConfiguration {
@Bean
@ConditionalOnMissingBean
public SessionServerBootstrap sessionServerBootstrap() {
return new SessionServerBootstrap();
}
@Configuration
public static class SessionServerConfigBeanConfiguration {
@Bean
public CommonConfig commonConfig() {
return new CommonConfig();
}
@Bean
@ConditionalOnMissingBean
public SessionServerConfig sessionServerConfig(CommonConfig commonConfig) {
return new SessionServerConfigBean(commonConfig);
}
@Bean(name = "PropertySplitter")
public PropertySplitter propertySplitter() {
return new PropertySplitter();
}
@Bean
public DiskSlotTableRecorder diskSlotTableRecorder() {
return new DiskSlotTableRecorder();
}
}
@Configuration
public static class SessionRemotingConfiguration {
@Bean
@ConditionalOnMissingBean(name = "boltExchange")
public Exchange boltExchange() {
return new BoltExchange();
}
@Bean
public Exchange jerseyExchange() {
return new JerseyExchange();
}
@Bean
public NodeExchanger clientNodeExchanger() {
return new ClientNodeExchanger();
}
@Bean
public NodeExchanger sessionConsoleExchanger() {
return new SessionConsoleExchanger();
}
@Bean
public DataNodeExchanger dataNodeExchanger() {
return new DataNodeExchanger();
}
@Bean
public DataNodeNotifyExchanger dataNodeNotifyExchanger() {
return new DataNodeNotifyExchanger();
}
@Bean
public MetaServerManager metaServerManager() {
return new SessionMetaServerManager();
}
@Bean
public SlotTableCache slotTableCache() {
return new SlotTableCacheImpl();
}
@Bean(name = "serverHandlers")
public Collection<AbstractServerHandler> serverHandlers() {
Collection<AbstractServerHandler> list = new ArrayList<>();
list.add(publisherHandler());
list.add(subscriberHandler());
list.add(watcherHandler());
list.add(clientNodeConnectionHandler());
list.add(syncConfigHandler());
list.add(publisherPbHandler());
list.add(subscriberPbHandler());
list.add(syncConfigPbHandler());
list.add(metadataRegisterPbHandler());
list.add(serviceAppMappingHandler());
list.add(metaRevisionHeartbeatHandler());
list.add(getRevisionHandler());
return list;
}
@Bean(name = "sessionSyncHandlers")
public Collection<AbstractServerHandler> serverSyncHandlers() {
Collection<AbstractServerHandler> list = new ArrayList<>();
list.add(dataSlotDiffDigestRequestHandler());
list.add(dataSlotDiffPublisherRequestHandler());
return list;
}
@Bean(name = "consoleHandlers")
public Collection<AbstractServerHandler> consoleHandlers() {
Collection<AbstractServerHandler> list = new ArrayList<>();
list.add(querySubscriberRequestHandler());
list.add(clientOffRequestHandler());
list.add(clientOnRequestHandler());
list.add(getClientManagerRequestHandler());
list.add(checkClientManagerHandler());
list.add(pubSubDataInfoIdRequestHandler());
list.add(filterSubscriberIPsHandler());
list.add(stopPushRequestHandler());
return list;
}
@Bean
public AbstractServerHandler publisherHandler() {
return new PublisherHandler();
}
@Bean
public AbstractServerHandler syncConfigHandler() {
return new SyncConfigHandler();
}
@Bean
public AbstractServerHandler subscriberHandler() {
return new SubscriberHandler();
}
@Bean
public AbstractServerHandler watcherHandler() {
return new WatcherHandler();
}
@Bean
public AbstractServerHandler clientNodeConnectionHandler() {
return new ClientNodeConnectionHandler();
}
@Bean
public AbstractServerHandler querySubscriberRequestHandler() {
return new QuerySubscriberRequestHandler();
}
@Bean
public AbstractServerHandler filterSubscriberIPsHandler() {
return new FilterSubscriberIPsHandler();
}
@Bean
public AbstractServerHandler clientOffRequestHandler() {
return new ClientOffRequestHandler();
}
@Bean
public AbstractServerHandler clientOnRequestHandler() {
return new ClientOnRequestHandler();
}
@Bean
public AbstractServerHandler getClientManagerRequestHandler() {
return new GetClientManagerRequestHandler();
}
@Bean
public AbstractServerHandler checkClientManagerHandler() {
return new CheckClientManagerHandler();
}
@Bean
public AbstractServerHandler stopPushRequestHandler() {
return new StopPushRequestHandler();
}
@Bean
public AbstractServerHandler pubSubDataInfoIdRequestHandler() {
return new PubSubDataInfoIdRequestHandler();
}
@Bean
public AbstractServerHandler dataSlotDiffDigestRequestHandler() {
return new DataSlotDiffDigestRequestHandler();
}
@Bean
public AbstractServerHandler dataSlotDiffPublisherRequestHandler() {
return new DataSlotDiffPublisherRequestHandler();
}
@Bean
public AbstractServerHandler publisherPbHandler() {
return new PublisherPbHandler();
}
@Bean
public AbstractServerHandler metadataRegisterPbHandler() {
return new MetadataRegisterPbHandler();
}
@Bean
public AbstractServerHandler serviceAppMappingHandler() {
return new ServiceAppMappingPbHandler();
}
@Bean
public AbstractServerHandler getRevisionHandler() {
return new GetRevisionPbHandler();
}
@Bean
public AbstractServerHandler metaRevisionHeartbeatHandler() {
return new MetaRevisionHeartbeatPbHandler();
}
@Bean
public AbstractServerHandler subscriberPbHandler() {
return new SubscriberPbHandler();
}
@Bean
public AbstractServerHandler syncConfigPbHandler() {
return new SyncConfigPbHandler();
}
@Bean
@ConditionalOnMissingBean(name = "circuitBreakerService")
public CircuitBreakerService circuitBreakerService() {
return new DefaultCircuitBreakerService();
}
@Bean(name = "dataNotifyClientHandlers")
public Collection<AbstractClientHandler> dataClientHandlers() {
Collection<AbstractClientHandler> list = new ArrayList<>();
list.add(dataChangeRequestHandler());
list.add(dataPushRequestHandler());
return list;
}
@Bean(name = "metaClientHandlers")
public Collection<AbstractClientHandler> metaClientHandlers() {
Collection<AbstractClientHandler> list = new ArrayList<>();
list.add(notifyProvideDataChangeHandler());
list.add(slotTableChangeEventHandler());
list.add(appRevisionSliceHandler());
return list;
}
@Bean
public AbstractClientHandler dataChangeRequestHandler() {
return new DataChangeRequestHandler();
}
@Bean
public AbstractClientHandler dataPushRequestHandler() {
return new DataPushRequestHandler();
}
@Bean
public AbstractClientHandler notifyProvideDataChangeHandler() {
return new NotifyProvideDataChangeHandler();
}
@Bean
public AbstractClientHandler appRevisionSliceHandler() {
return new AppRevisionSliceHandler();
}
@Bean
public SlotTableChangeEventHandler slotTableChangeEventHandler() {
return new SlotTableChangeEventHandler();
}
}
@Configuration
public static class ResourceConfiguration {
@Bean
public ResourceConfig jerseyResourceConfig() {
ResourceConfig resourceConfig = new ResourceConfig();
resourceConfig.register(JacksonFeature.class);
return resourceConfig;
}
@Bean
@ConditionalOnMissingBean(name = "sessionOpenResource")
public SessionOpenResource sessionOpenResource() {
return new SessionOpenResource();
}
@Bean
public SessionDigestResource sessionDigestResource() {
return new SessionDigestResource();
}
@Bean
@ConditionalOnMissingBean(name = "healthResource")
public HealthResource healthResource() {
return new HealthResource();
}
@Bean
public CompressResource compressResource() {
return new CompressResource();
}
@Bean
public ClientsOpenResource clientsOpenResource() {
return new ClientsOpenResource();
}
@Bean
public ConnectionsResource connectionsResource() {
return new ConnectionsResource();
}
@Bean
public SlotGenericResource slotGenericResource() {
return new SlotGenericResource();
}
@Bean
public MetricsResource metricsResource() {
return new MetricsResource();
}
@Bean
@ConditionalOnMissingBean
public VersionResource versionResource() {
return new VersionResource();
}
@Bean
public RegistryOpsResource opsResource() {
return new RegistryOpsResource();
}
@Bean
public ClientManagerResource clientManagerResource() {
return new ClientManagerResource();
}
@Bean
public PersistenceClientManagerResource persistenceClientManagerResource() {
return new PersistenceClientManagerResource();
}
@Bean
public SlotTableStatusResource slotTableStatusResource() {
return new SlotTableStatusResource();
}
@Bean
public EmergencyApiResource emergencyApiResource() {
return new EmergencyApiResource();
}
}
@Configuration
public static class SessionRegistryConfiguration {
@Bean
@ConditionalOnMissingBean(name = "sessionRegistry")
public Registry sessionRegistry() {
return new SessionRegistry();
}
@Bean
@ConditionalOnMissingBean
public Interests sessionInterests() {
return new SessionInterests();
}
@Bean
@ConditionalOnMissingBean
public Watchers sessionWatchers() {
return new SessionWatchers();
}
@Bean
@ConditionalOnMissingBean
public DataStore sessionDataStore() {
return new SessionDataStore();
}
}
@Configuration
public static class SessionNodeConfiguration {
@Bean
@ConditionalOnMissingBean
public DataNodeService dataNodeService() {
return new DataNodeServiceImpl();
}
@Bean
@ConditionalOnMissingBean
public MetaServerService metaServerService() {
return new MetaServerServiceImpl();
}
@Bean
@ConditionalOnMissingBean
public ClientNodeService clientNodeService() {
return new ClientNodeServiceImpl();
}
@Bean
@ConditionalOnMissingBean
public FirePushService firePushService() {
return new FirePushService();
}
@Bean
@ConditionalOnMissingBean
public PushProcessor pushProcessor() {
return new PushProcessor();
}
@Bean
@ConditionalOnMissingBean
public WatchProcessor watchProcessor() {
return new WatchProcessor();
}
@Bean
@ConditionalOnMissingBean
public ChangeProcessor changeProcessor() {
return new ChangeProcessor();
}
@Bean
@ConditionalOnMissingBean
public PushDataGenerator pushDataGenerator() {
return new PushDataGenerator();
}
@Bean
public PushSwitchService pushSwitchService() {
return new PushSwitchService();
}
@Bean
@ConditionalOnMissingBean
public FetchPubSubDataInfoIdService fetchPubSubDataInfoIdService() {
return new FetchPubSubDataInfoIdService();
}
@Bean
public ClientManagerService clientManagerService() {
return new BaseClientManagerService();
}
@Bean
public CheckClientManagerService checkClientManagerService() {
return new CheckClientManagerService();
}
}
@Configuration
public static class SessionCacheConfiguration {
@Bean
public CacheService sessionCacheService() {
return new SessionCacheService();
}
@Bean(name = "com.alipay.sofa.registry.server.session.cache.DatumKey")
public CacheGenerator datumCacheGenerator() {
return new DatumCacheGenerator();
}
@Bean
public AppRevisionCacheRegistry appRevisionCacheRegistry() {
return new AppRevisionCacheRegistry();
}
@Bean
public AppRevisionHeartbeatRegistry appRevisionHeartbeatRegistry() {
return new AppRevisionHeartbeatRegistry();
}
}
@Configuration
public static class ExecutorConfiguration {
@Bean(name = "metaNodeExecutor")
public ThreadPoolExecutor metaNodeExecutor(SessionServerConfig sessionServerConfig) {
return new MetricsableThreadPoolExecutor(
"metaNodeInSessionExecutor",
sessionServerConfig.getMetaNodeWorkerSize(),
sessionServerConfig.getMetaNodeWorkerSize(),
300,
TimeUnit.SECONDS,
new ArrayBlockingQueue<>(sessionServerConfig.getMetaNodeBufferSize()),
new NamedThreadFactory("metaNodeInSessionExecutor", true));
}
@Bean
public ExecutorManager executorManager(SessionServerConfig sessionServerConfig) {
return new ExecutorManager(sessionServerConfig);
}
}
@Configuration
public static class SessionTimerTaskConfiguration {
@Bean
public SyncClientsHeartbeatTask syncClientsHeartbeatTask() {
return new SyncClientsHeartbeatTask();
}
@Bean
public SessionCacheDigestTask sessionCacheDigestTask() {
return new SessionCacheDigestTask();
}
@Bean
public CacheCountTask cacheCountTask() {
return new CacheCountTask();
}
}
@Configuration
public static class SessionStrategyConfiguration {
@Bean
@ConditionalOnMissingBean
public SessionRegistryStrategy sessionRegistryStrategy() {
return new DefaultSessionRegistryStrategy();
}
@Bean
@ConditionalOnMissingBean
public SyncConfigHandlerStrategy syncConfigHandlerStrategy() {
return new DefaultSyncConfigHandlerStrategy();
}
@Bean
@ConditionalOnMissingBean
public PublisherHandlerStrategy publisherHandlerStrategy() {
return new DefaultPublisherHandlerStrategy();
}
@Bean
@ConditionalOnMissingBean
public SubscriberHandlerStrategy subscriberHandlerStrategy() {
return new DefaultSubscriberHandlerStrategy();
}
@Bean
@ConditionalOnMissingBean
public WatcherHandlerStrategy watcherHandlerStrategy() {
return new DefaultWatcherHandlerStrategy();
}
@Bean
@ConditionalOnMissingBean
public AppRevisionHandlerStrategy appRevisionHandlerStrategy() {
return new DefaultAppRevisionHandlerStrategy();
}
}
@Configuration
public static class AccessLimitServiceConfiguration {
@Bean
public AccessLimitService accessLimitService(SessionServerConfig sessionServerConfig) {
return new AccessLimitServiceImpl(sessionServerConfig);
}
}
@Configuration
public static class SessionFilterConfiguration {
@Bean
public IPMatchStrategy ipMatchStrategy() {
return new DefaultIPMatchStrategy();
}
@Bean
@ConditionalOnMissingBean
public ProcessFilter blacklistMatchProcessFilter() {
return new BlacklistMatchProcessFilter();
}
@Bean
public WrapperInterceptorManager wrapperInterceptorManager() {
WrapperInterceptorManager mgr = new WrapperInterceptorManager();
mgr.addInterceptor(clientCheckWrapperInterceptor());
mgr.addInterceptor(blacklistWrapperInterceptor());
mgr.addInterceptor(accessLimitWrapperInterceptor());
mgr.addInterceptor(clientOffWrapperInterceptor());
return mgr;
}
@Bean
public WrapperInterceptor clientCheckWrapperInterceptor() {
return new ClientCheckWrapperInterceptor();
}
@Bean
public WrapperInterceptor blacklistWrapperInterceptor() {
return new BlacklistWrapperInterceptor();
}
@Bean
public WrapperInterceptor clientOffWrapperInterceptor() {
return new ClientOffWrapperInterceptor();
}
@Bean
public WrapperInterceptor accessLimitWrapperInterceptor() {
return new AccessLimitWrapperInterceptor();
}
}
@Configuration
public static class SessionRenewDatumConfiguration {
@Bean
public WriteDataAcceptor writeDataAcceptor() {
return new WriteDataAcceptorImpl();
}
}
@Configuration
public static class SessionConnectionsConfiguration {
@Bean
public ConnectionsService connectionsService() {
return new ConnectionsService();
}
@Bean
public ConnectionMapper connectionMapper() {
return new ConnectionMapper();
}
}
@Configuration
public static class SessionProvideDataConfiguration {
@Bean
public SystemPropertyProcessorManager systemPropertyProcessorManager() {
return new SystemPropertyProcessorManager();
}
@Bean
public ProvideDataProcessor provideDataProcessorManager() {
return new ProvideDataProcessorManager();
}
@Bean
public FetchSystemPropertyService fetchBlackListService(
SystemPropertyProcessorManager systemPropertyProcessorManager) {
FetchBlackListService fetchBlackListService = new FetchBlackListService();
systemPropertyProcessorManager.addSystemDataProcessor(fetchBlackListService);
return fetchBlackListService;
}
@Bean
public FetchSystemPropertyService fetchStopPushService(
SystemPropertyProcessorManager systemPropertyProcessorManager) {
FetchStopPushService fetchStopPushService = new FetchStopPushService();
systemPropertyProcessorManager.addSystemDataPersistenceProcessor(fetchStopPushService);
return fetchStopPushService;
}
@Bean
public FetchSystemPropertyService fetchClientOffAddressService(
SystemPropertyProcessorManager systemPropertyProcessorManager) {
FetchClientOffAddressService fetchClientOffAddressService =
new FetchClientOffAddressService();
systemPropertyProcessorManager.addSystemDataPersistenceProcessor(
fetchClientOffAddressService);
return fetchClientOffAddressService;
}
@Bean
public FetchSystemPropertyService fetchGrayPushSwitchService(
SystemPropertyProcessorManager systemPropertyProcessorManager) {
FetchGrayPushSwitchService fetchGrayPushSwitchService = new FetchGrayPushSwitchService();
systemPropertyProcessorManager.addSystemDataProcessor(fetchGrayPushSwitchService);
return fetchGrayPushSwitchService;
}
@Bean
public FetchSystemPropertyService compressPushService(
SystemPropertyProcessorManager systemPropertyProcessorManager) {
CompressPushService compressPushService = new CompressPushService();
systemPropertyProcessorManager.addSystemDataProcessor(compressPushService);
return compressPushService;
}
@Bean
public FetchSystemPropertyService fetchShutdownService(
SystemPropertyProcessorManager systemPropertyProcessorManager) {
FetchShutdownService fetchShutdownService = new FetchShutdownService();
systemPropertyProcessorManager.addSystemDataPersistenceProcessor(fetchShutdownService);
return fetchShutdownService;
}
@Bean
public FetchCircuitBreakerService fetchCircuitBreakerService(
SystemPropertyProcessorManager systemPropertyProcessorManager) {
FetchCircuitBreakerService fetchCircuitBreakerService = new FetchCircuitBreakerService();
systemPropertyProcessorManager.addSystemDataPersistenceProcessor(fetchCircuitBreakerService);
return fetchCircuitBreakerService;
}
@Bean
public ConfigProvideDataWatcher configProvideDataWatcher() {
return new ConfigProvideDataWatcher();
}
}
}
|
package com.yr.common.xss;
import com.yr.common.exception.RRException;
import org.apache.commons.lang.StringUtils;
/**
* SQL过滤
*/
public class SQLFilter {
/**
* SQL注入过滤
* @param str 待验证的字符串
*/
public static String sqlInject(String str){
if(StringUtils.isBlank(str)){
return null;
}
//去掉'|"|;|\字符
str = StringUtils.replace(str, "'", "");
str = StringUtils.replace(str, "\"", "");
str = StringUtils.replace(str, ";", "");
str = StringUtils.replace(str, "\\", "");
//转换成小写
str = str.toLowerCase();
//非法字符
String[] keywords = {"master", "truncate", "insert", "select", "delete", "update", "declare", "alert", "drop"};
//判断是否包含非法字符
for(String keyword : keywords){
if(str.indexOf(keyword) != -1){
throw new RRException("包含非法字符");
}
}
return str;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.