code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.sunshine.app.data;
import android.annotation.TargetApi;
import android.content.ContentProvider;
import android.content.ContentValues;
import android.content.UriMatcher;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteQueryBuilder;
import android.net.Uri;
public class WeatherProvider extends ContentProvider {
// The URI Matcher used by this content provider.
private static final UriMatcher sUriMatcher = buildUriMatcher();
private WeatherDbHelper mOpenHelper;
static final int WEATHER = 100;
static final int WEATHER_WITH_LOCATION = 101;
static final int WEATHER_WITH_LOCATION_AND_DATE = 102;
static final int LOCATION = 300;
private static final SQLiteQueryBuilder sWeatherByLocationSettingQueryBuilder;
static {
sWeatherByLocationSettingQueryBuilder = new SQLiteQueryBuilder();
//This is an inner join which looks like
//weather INNER JOIN location ON weather.location_id = location._id
sWeatherByLocationSettingQueryBuilder.setTables(
WeatherContract.WeatherEntry.TABLE_NAME + " INNER JOIN " +
WeatherContract.LocationEntry.TABLE_NAME +
" ON " + WeatherContract.WeatherEntry.TABLE_NAME +
"." + WeatherContract.WeatherEntry.COLUMN_LOC_KEY +
" = " + WeatherContract.LocationEntry.TABLE_NAME +
"." + WeatherContract.LocationEntry._ID);
}
//location.location_setting = ?
private static final String sLocationSettingSelection =
WeatherContract.LocationEntry.TABLE_NAME +
"." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? ";
//location.location_setting = ? AND date >= ?
private static final String sLocationSettingWithStartDateSelection =
WeatherContract.LocationEntry.TABLE_NAME +
"." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? AND " +
WeatherContract.WeatherEntry.COLUMN_DATE + " >= ? ";
//location.location_setting = ? AND date = ?
private static final String sLocationSettingAndDaySelection =
WeatherContract.LocationEntry.TABLE_NAME +
"." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? AND " +
WeatherContract.WeatherEntry.COLUMN_DATE + " = ? ";
private Cursor getWeatherByLocationSetting(Uri uri, String[] projection, String sortOrder) {
String locationSetting = WeatherContract.WeatherEntry.getLocationSettingFromUri(uri);
long startDate = WeatherContract.WeatherEntry.getStartDateFromUri(uri);
String[] selectionArgs;
String selection;
if (startDate == 0) {
selection = sLocationSettingSelection;
selectionArgs = new String[]{locationSetting};
} else {
selectionArgs = new String[]{locationSetting, Long.toString(startDate)};
selection = sLocationSettingWithStartDateSelection;
}
return sWeatherByLocationSettingQueryBuilder.query(mOpenHelper.getReadableDatabase(),
projection,
selection,
selectionArgs,
null,
null,
sortOrder
);
}
private Cursor getWeatherByLocationSettingAndDate(
Uri uri, String[] projection, String sortOrder) {
String locationSetting = WeatherContract.WeatherEntry.getLocationSettingFromUri(uri);
long date = WeatherContract.WeatherEntry.getDateFromUri(uri);
return sWeatherByLocationSettingQueryBuilder.query(mOpenHelper.getReadableDatabase(),
projection,
sLocationSettingAndDaySelection,
new String[]{locationSetting, Long.toString(date)},
null,
null,
sortOrder
);
}
/*
Students: Here is where you need to create the UriMatcher. This UriMatcher will
match each URI to the WEATHER, WEATHER_WITH_LOCATION, WEATHER_WITH_LOCATION_AND_DATE,
and LOCATION integer constants defined above. You can test this by uncommenting the
testUriMatcher test within TestUriMatcher.
*/
static UriMatcher buildUriMatcher() {
// I know what you're thinking. Why create a UriMatcher when you can use regular
// expressions instead? Because you're not crazy, that's why.
// All paths added to the UriMatcher have a corresponding code to return when a match is
// found. The code passed into the constructor represents the code to return for the root
// URI. It's common to use NO_MATCH as the code for this case.
final UriMatcher matcher = new UriMatcher(UriMatcher.NO_MATCH);
final String authority = WeatherContract.CONTENT_AUTHORITY;
// For each type of URI you want to add, create a corresponding code.
matcher.addURI(authority, WeatherContract.PATH_WEATHER, WEATHER);
matcher.addURI(authority, WeatherContract.PATH_WEATHER + "/*", WEATHER_WITH_LOCATION);
matcher.addURI(authority, WeatherContract.PATH_WEATHER + "/*/#", WEATHER_WITH_LOCATION_AND_DATE);
matcher.addURI(authority, WeatherContract.PATH_LOCATION, LOCATION);
return matcher;
}
/*
Students: We've coded this for you. We just create a new WeatherDbHelper for later use
here.
*/
@Override
public boolean onCreate() {
mOpenHelper = new WeatherDbHelper(getContext());
return true;
}
/*
Students: Here's where you'll code the getType function that uses the UriMatcher. You can
test this by uncommenting testGetType in TestProvider.
*/
@Override
public String getType(Uri uri) {
// Use the Uri Matcher to determine what kind of URI this is.
final int match = sUriMatcher.match(uri);
switch (match) {
// Student: Uncomment and fill out these two cases
case WEATHER_WITH_LOCATION_AND_DATE:
return WeatherContract.WeatherEntry.CONTENT_ITEM_TYPE;
case WEATHER_WITH_LOCATION:
return WeatherContract.WeatherEntry.CONTENT_TYPE;
case WEATHER:
return WeatherContract.WeatherEntry.CONTENT_TYPE;
case LOCATION:
return WeatherContract.LocationEntry.CONTENT_TYPE;
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
}
@Override
public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs,
String sortOrder) {
// Here's the switch statement that, given a URI, will determine what kind of request it is,
// and query the database accordingly.
Cursor retCursor;
switch (sUriMatcher.match(uri)) {
// "weather/*/*"
case WEATHER_WITH_LOCATION_AND_DATE:
retCursor = getWeatherByLocationSettingAndDate(uri, projection, sortOrder);
break;
// "weather/*"
case WEATHER_WITH_LOCATION:
retCursor = getWeatherByLocationSetting(uri, projection, sortOrder);
break;
// "weather"
case WEATHER:
retCursor = mOpenHelper.getReadableDatabase().query(
WeatherContract.WeatherEntry.TABLE_NAME,
projection,
selection,
selectionArgs,
null,
null,
sortOrder
);
break;
// "location"
case LOCATION:
retCursor = mOpenHelper.getReadableDatabase().query(
WeatherContract.LocationEntry.TABLE_NAME,
projection,
selection,
selectionArgs,
null,
null,
sortOrder
);
break;
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
retCursor.setNotificationUri(getContext().getContentResolver(), uri);
return retCursor;
}
/*
Student: Add the ability to insert Locations to the implementation of this function.
*/
@Override
public Uri insert(Uri uri, ContentValues values) {
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
Uri returnUri;
switch (match) {
case WEATHER: {
normalizeDate(values);
long _id = db.insert(WeatherContract.WeatherEntry.TABLE_NAME, null, values);
if (_id > 0)
returnUri = WeatherContract.WeatherEntry.buildWeatherUri(_id);
else
throw new android.database.SQLException("Failed to insert row into " + uri);
break;
}
case LOCATION: {
long _id = db.insert(WeatherContract.LocationEntry.TABLE_NAME, null, values);
if (_id > 0)
returnUri = WeatherContract.LocationEntry.buildLocationUri(_id);
else
throw new android.database.SQLException("Failed to insert row into " + uri);
break;
}
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
getContext().getContentResolver().notifyChange(uri, null);
db.close();
return returnUri;
}
@Override
public int delete(Uri uri, String selection, String[] selectionArgs) {
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
int rowsDeleted;
// this makes delete all rows return the number of rows deleted
if (null == selection) selection = "1";
switch (match) {
case WEATHER:
rowsDeleted = db.delete(
WeatherContract.WeatherEntry.TABLE_NAME, selection, selectionArgs);
break;
case LOCATION:
rowsDeleted = db.delete(
WeatherContract.LocationEntry.TABLE_NAME, selection, selectionArgs);
break;
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
// Because a null deletes all rows
if (rowsDeleted != 0) {
getContext().getContentResolver().notifyChange(uri, null);
}
db.close();
return rowsDeleted;
}
private void normalizeDate(ContentValues values) {
// normalize the date value
if (values.containsKey(WeatherContract.WeatherEntry.COLUMN_DATE)) {
long dateValue = values.getAsLong(WeatherContract.WeatherEntry.COLUMN_DATE);
values.put(WeatherContract.WeatherEntry.COLUMN_DATE, WeatherContract.normalizeDate(dateValue));
}
}
@Override
public int update(
Uri uri, ContentValues values, String selection, String[] selectionArgs) {
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
int rowsUpdated;
switch (match) {
case WEATHER:
normalizeDate(values);
rowsUpdated = db.update(WeatherContract.WeatherEntry.TABLE_NAME, values, selection,
selectionArgs);
break;
case LOCATION:
rowsUpdated = db.update(WeatherContract.LocationEntry.TABLE_NAME, values, selection,
selectionArgs);
break;
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
if (rowsUpdated != 0) {
getContext().getContentResolver().notifyChange(uri, null);
}
db.close();
return rowsUpdated;
}
@Override
public int bulkInsert(Uri uri, ContentValues[] values) {
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
switch (match) {
case WEATHER:
db.beginTransaction();
int returnCount = 0;
try {
for (ContentValues value : values) {
normalizeDate(value);
long _id = db.insert(WeatherContract.WeatherEntry.TABLE_NAME, null, value);
if (_id != -1) {
returnCount++;
}
}
db.setTransactionSuccessful();
} finally {
db.endTransaction();
}
getContext().getContentResolver().notifyChange(uri, null);
return returnCount;
default:
return super.bulkInsert(uri, values);
}
}
// You do not need to call this method. This is a method specifically to assist the testing
// framework in running smoothly. You can read more at:
// http://developer.android.com/reference/android/content/ContentProvider.html#shutdown()
@Override
@TargetApi(11)
public void shutdown() {
mOpenHelper.close();
super.shutdown();
}
} | jhpx/Sunshine-Version-2 | app/src/main/java/com/example/android/sunshine/app/data/WeatherProvider.java | Java | apache-2.0 | 14,322 |
/*
* Autopsy Forensic Browser
*
* Copyright 2011-2017 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.datamodel;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.stream.Collectors;
import javax.swing.Action;
import org.apache.commons.lang3.StringUtils;
import org.openide.nodes.Children;
import org.openide.nodes.Sheet;
import org.openide.util.Lookup;
import org.openide.util.NbBundle;
import org.openide.util.lookup.Lookups;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
import org.sleuthkit.autopsy.casemodule.events.BlackBoardArtifactTagAddedEvent;
import org.sleuthkit.autopsy.casemodule.events.BlackBoardArtifactTagDeletedEvent;
import org.sleuthkit.autopsy.casemodule.events.ContentTagAddedEvent;
import org.sleuthkit.autopsy.casemodule.events.ContentTagDeletedEvent;
import static org.sleuthkit.autopsy.datamodel.DisplayableItemNode.findLinked;
import org.sleuthkit.autopsy.timeline.actions.ViewArtifactInTimelineAction;
import org.sleuthkit.autopsy.timeline.actions.ViewFileInTimelineAction;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE;
import org.sleuthkit.datamodel.BlackboardAttribute;
import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.Tag;
import org.sleuthkit.datamodel.TskCoreException;
/**
* Node wrapping a blackboard artifact object. This is generated from several
* places in the tree.
*/
public class BlackboardArtifactNode extends DisplayableItemNode {
private final BlackboardArtifact artifact;
private final Content associated;
private List<NodeProperty<? extends Object>> customProperties;
private static final Logger LOGGER = Logger.getLogger(BlackboardArtifactNode.class.getName());
/*
* Artifact types which should have the full unique path of the associated
* content as a property.
*/
private static final Integer[] SHOW_UNIQUE_PATH = new Integer[]{
BlackboardArtifact.ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID(),
BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID(),
BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT.getTypeID(),
BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_ARTIFACT_HIT.getTypeID(),};
// TODO (RC): This is an unattractive alternative to subclassing BlackboardArtifactNode,
// cut from the same cloth as the equally unattractive SHOW_UNIQUE_PATH array
// above. It should be removed when and if the subclassing is implemented.
private static final Integer[] SHOW_FILE_METADATA = new Integer[]{
BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT.getTypeID(),};
private final PropertyChangeListener pcl = new PropertyChangeListener() {
@Override
public void propertyChange(PropertyChangeEvent evt) {
String eventType = evt.getPropertyName();
if (eventType.equals(Case.Events.BLACKBOARD_ARTIFACT_TAG_ADDED.toString())) {
BlackBoardArtifactTagAddedEvent event = (BlackBoardArtifactTagAddedEvent) evt;
if (event.getAddedTag().getArtifact().equals(artifact)) {
updateSheet();
}
} else if (eventType.equals(Case.Events.BLACKBOARD_ARTIFACT_TAG_DELETED.toString())) {
BlackBoardArtifactTagDeletedEvent event = (BlackBoardArtifactTagDeletedEvent) evt;
if (event.getDeletedTagInfo().getArtifactID() == artifact.getArtifactID()) {
updateSheet();
}
} else if (eventType.equals(Case.Events.CONTENT_TAG_ADDED.toString())) {
ContentTagAddedEvent event = (ContentTagAddedEvent) evt;
if (event.getAddedTag().getContent().equals(associated)) {
updateSheet();
}
} else if (eventType.equals(Case.Events.CONTENT_TAG_DELETED.toString())) {
ContentTagDeletedEvent event = (ContentTagDeletedEvent) evt;
if (event.getDeletedTagInfo().getContentID() == associated.getId()) {
updateSheet();
}
} else if (eventType.equals(Case.Events.CURRENT_CASE.toString())) {
if (evt.getNewValue() == null) {
// case was closed. Remove listeners so that we don't get called with a stale case handle
removeListeners();
}
}
}
};
/**
* Construct blackboard artifact node from an artifact and using provided
* icon
*
* @param artifact artifact to encapsulate
* @param iconPath icon to use for the artifact
*/
public BlackboardArtifactNode(BlackboardArtifact artifact, String iconPath) {
super(Children.LEAF, createLookup(artifact));
this.artifact = artifact;
//this.associated = getAssociatedContent(artifact);
this.associated = this.getLookup().lookup(Content.class);
this.setName(Long.toString(artifact.getArtifactID()));
this.setDisplayName();
this.setIconBaseWithExtension(iconPath);
Case.addPropertyChangeListener(pcl);
}
/**
* Construct blackboard artifact node from an artifact and using default
* icon for artifact type
*
* @param artifact artifact to encapsulate
*/
public BlackboardArtifactNode(BlackboardArtifact artifact) {
super(Children.LEAF, createLookup(artifact));
this.artifact = artifact;
//this.associated = getAssociatedContent(artifact);
this.associated = this.getLookup().lookup(Content.class);
this.setName(Long.toString(artifact.getArtifactID()));
this.setDisplayName();
this.setIconBaseWithExtension(ExtractedContent.getIconFilePath(artifact.getArtifactTypeID())); //NON-NLS
Case.addPropertyChangeListener(pcl);
}
private void removeListeners() {
Case.removePropertyChangeListener(pcl);
}
@Override
@NbBundle.Messages({
"BlackboardArtifactNode.getAction.errorTitle=Error getting actions",
"BlackboardArtifactNode.getAction.resultErrorMessage=There was a problem getting actions for the selected result."
+ " The 'View Result in Timeline' action will not be available.",
"BlackboardArtifactNode.getAction.linkedFileMessage=There was a problem getting actions for the selected result. "
+ " The 'View File in Timeline' action will not be available."})
public Action[] getActions(boolean context) {
List<Action> actionsList = new ArrayList<>();
actionsList.addAll(Arrays.asList(super.getActions(context)));
//if this artifact has a time stamp add the action to view it in the timeline
try {
if (ViewArtifactInTimelineAction.hasSupportedTimeStamp(artifact)) {
actionsList.add(new ViewArtifactInTimelineAction(artifact));
}
} catch (TskCoreException ex) {
LOGGER.log(Level.SEVERE, MessageFormat.format("Error getting arttribute(s) from blackboard artifact{0}.", artifact.getArtifactID()), ex); //NON-NLS
MessageNotifyUtil.Notify.error(Bundle.BlackboardArtifactNode_getAction_errorTitle(), Bundle.BlackboardArtifactNode_getAction_resultErrorMessage());
}
// if the artifact links to another file, add an action to go to that file
try {
AbstractFile c = findLinked(artifact);
if (c != null) {
actionsList.add(ViewFileInTimelineAction.createViewFileAction(c));
}
} catch (TskCoreException ex) {
LOGGER.log(Level.SEVERE, MessageFormat.format("Error getting linked file from blackboard artifact{0}.", artifact.getArtifactID()), ex); //NON-NLS
MessageNotifyUtil.Notify.error(Bundle.BlackboardArtifactNode_getAction_errorTitle(), Bundle.BlackboardArtifactNode_getAction_linkedFileMessage());
}
//if this artifact has associated content, add the action to view the content in the timeline
AbstractFile file = getLookup().lookup(AbstractFile.class);
if (null != file) {
actionsList.add(ViewFileInTimelineAction.createViewSourceFileAction(file));
}
return actionsList.toArray(new Action[actionsList.size()]);
}
@NbBundle.Messages({"# {0} - artifactDisplayName", "BlackboardArtifactNode.displayName.artifact={0} Artifact"})
/**
* Set the filter node display name. The value will either be the file name
* or something along the lines of e.g. "Messages Artifact" for keyword hits
* on artifacts.
*/
private void setDisplayName() {
String displayName = ""; //NON-NLS
if (associated != null) {
displayName = associated.getName();
}
// If this is a node for a keyword hit on an artifact, we set the
// display name to be the artifact type name followed by " Artifact"
// e.g. "Messages Artifact".
if (artifact != null
&& (artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()
|| artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_INTERESTING_ARTIFACT_HIT.getTypeID())) {
try {
for (BlackboardAttribute attribute : artifact.getAttributes()) {
if (attribute.getAttributeType().getTypeID() == ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT.getTypeID()) {
BlackboardArtifact associatedArtifact = Case.getCurrentCase().getSleuthkitCase().getBlackboardArtifact(attribute.getValueLong());
if (associatedArtifact != null) {
if (artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_INTERESTING_ARTIFACT_HIT.getTypeID()) {
artifact.getDisplayName();
} else {
displayName = NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.displayName.artifact", associatedArtifact.getDisplayName());
}
}
}
}
} catch (TskCoreException ex) {
// Do nothing since the display name will be set to the file name.
}
}
this.setDisplayName(displayName);
}
@NbBundle.Messages({
"BlackboardArtifactNode.createSheet.artifactType.displayName=Artifact Type",
"BlackboardArtifactNode.createSheet.artifactType.name=Artifact Type",
"BlackboardArtifactNode.createSheet.artifactDetails.displayName=Artifact Details",
"BlackboardArtifactNode.createSheet.artifactDetails.name=Artifact Details",
"BlackboardArtifactNode.artifact.displayName=Artifact"})
@Override
protected Sheet createSheet() {
Sheet s = super.createSheet();
Sheet.Set ss = s.get(Sheet.PROPERTIES);
if (ss == null) {
ss = Sheet.createPropertiesSet();
s.put(ss);
}
final String NO_DESCR = NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.noDesc.text");
Map<String, Object> map = new LinkedHashMap<>();
fillPropertyMap(map, artifact);
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.srcFile.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.srcFile.displayName"),
NO_DESCR,
this.getDisplayName()));
if (artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_INTERESTING_ARTIFACT_HIT.getTypeID()) {
try {
BlackboardAttribute attribute = artifact.getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT));
if (attribute != null) {
BlackboardArtifact associatedArtifact = Case.getCurrentCase().getSleuthkitCase().getBlackboardArtifact(attribute.getValueLong());
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.artifactType.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.artifactType.displayName"),
NO_DESCR,
associatedArtifact.getDisplayName() + " " + NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.artifact.displayName")));
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.artifactDetails.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.artifactDetails.displayName"),
NO_DESCR,
associatedArtifact.getShortDescription()));
}
} catch (TskCoreException ex) {
// Do nothing since the display name will be set to the file name.
}
}
for (Map.Entry<String, Object> entry : map.entrySet()) {
ss.put(new NodeProperty<>(entry.getKey(),
entry.getKey(),
NO_DESCR,
entry.getValue()));
}
//append custom node properties
if (customProperties != null) {
for (NodeProperty<? extends Object> np : customProperties) {
ss.put(np);
}
}
final int artifactTypeId = artifact.getArtifactTypeID();
// If mismatch, add props for extension and file type
if (artifactTypeId == BlackboardArtifact.ARTIFACT_TYPE.TSK_EXT_MISMATCH_DETECTED.getTypeID()) {
String ext = ""; //NON-NLS
String actualMimeType = ""; //NON-NLS
if (associated instanceof AbstractFile) {
AbstractFile af = (AbstractFile) associated;
ext = af.getNameExtension();
actualMimeType = af.getMIMEType();
if (actualMimeType == null) {
actualMimeType = ""; //NON-NLS
}
}
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.ext.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.ext.displayName"),
NO_DESCR,
ext));
ss.put(new NodeProperty<>(
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.mimeType.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.mimeType.displayName"),
NO_DESCR,
actualMimeType));
}
if (Arrays.asList(SHOW_UNIQUE_PATH).contains(artifactTypeId)) {
String sourcePath = ""; //NON-NLS
try {
sourcePath = associated.getUniquePath();
} catch (TskCoreException ex) {
LOGGER.log(Level.WARNING, "Failed to get unique path from: {0}", associated.getName()); //NON-NLS
}
if (sourcePath.isEmpty() == false) {
ss.put(new NodeProperty<>(
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.filePath.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.filePath.displayName"),
NO_DESCR,
sourcePath));
}
if (Arrays.asList(SHOW_FILE_METADATA).contains(artifactTypeId)) {
AbstractFile file = associated instanceof AbstractFile ? (AbstractFile) associated : null;
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileModifiedTime.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileModifiedTime.displayName"),
"",
file != null ? ContentUtils.getStringTime(file.getMtime(), file) : ""));
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileChangedTime.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileChangedTime.displayName"),
"",
file != null ? ContentUtils.getStringTime(file.getCtime(), file) : ""));
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileAccessedTime.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileAccessedTime.displayName"),
"",
file != null ? ContentUtils.getStringTime(file.getAtime(), file) : ""));
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileCreatedTime.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileCreatedTime.displayName"),
"",
file != null ? ContentUtils.getStringTime(file.getCrtime(), file) : ""));
ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileSize.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileSize.displayName"),
"",
associated.getSize()));
}
} else {
String dataSourceStr = "";
try {
Content dataSource = associated.getDataSource();
if (dataSource != null) {
dataSourceStr = dataSource.getName();
} else {
dataSourceStr = getRootParentName();
}
} catch (TskCoreException ex) {
LOGGER.log(Level.WARNING, "Failed to get image name from {0}", associated.getName()); //NON-NLS
}
if (dataSourceStr.isEmpty() == false) {
ss.put(new NodeProperty<>(
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.dataSrc.name"),
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.dataSrc.displayName"),
NO_DESCR,
dataSourceStr));
}
}
// add properties for tags
List<Tag> tags = new ArrayList<>();
try {
tags.addAll(Case.getCurrentCase().getServices().getTagsManager().getBlackboardArtifactTagsByArtifact(artifact));
tags.addAll(Case.getCurrentCase().getServices().getTagsManager().getContentTagsByContent(associated));
} catch (TskCoreException ex) {
LOGGER.log(Level.SEVERE, "Failed to get tags for artifact " + artifact.getDisplayName(), ex);
}
ss.put(new NodeProperty<>("Tags", NbBundle.getMessage(AbstractAbstractFileNode.class, "BlackboardArtifactNode.createSheet.tags.displayName"),
NO_DESCR, tags.stream().map(t -> t.getName().getDisplayName()).collect(Collectors.joining(", "))));
return s;
}
private void updateSheet() {
this.setSheet(createSheet());
}
private String getRootParentName() {
String parentName = associated.getName();
Content parent = associated;
try {
while ((parent = parent.getParent()) != null) {
parentName = parent.getName();
}
} catch (TskCoreException ex) {
LOGGER.log(Level.WARNING, "Failed to get parent name from {0}", associated.getName()); //NON-NLS
return "";
}
return parentName;
}
/**
* Add an additional custom node property to that node before it is
* displayed
*
* @param np NodeProperty to add
*/
public void addNodeProperty(NodeProperty<?> np) {
if (null == customProperties) {
//lazy create the list
customProperties = new ArrayList<>();
}
customProperties.add(np);
}
/**
* Fill map with Artifact properties
*
* @param map map with preserved ordering, where property names/values
* are put
* @param artifact to extract properties from
*/
@SuppressWarnings("deprecation")
private void fillPropertyMap(Map<String, Object> map, BlackboardArtifact artifact) {
try {
for (BlackboardAttribute attribute : artifact.getAttributes()) {
final int attributeTypeID = attribute.getAttributeType().getTypeID();
//skip some internal attributes that user shouldn't see
if (attributeTypeID == ATTRIBUTE_TYPE.TSK_PATH_ID.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_TAGGED_ARTIFACT.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_KEYWORD_SEARCH_TYPE.getTypeID()) {
}
else if (artifact.getArtifactTypeID() == BlackboardArtifact.ARTIFACT_TYPE.TSK_EMAIL_MSG.getTypeID()) {
addEmailMsgProperty (map, attribute);
}
else if (attribute.getAttributeType().getValueType() == BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.DATETIME) {
map.put(attribute.getAttributeType().getDisplayName(), ContentUtils.getStringTime(attribute.getValueLong(), associated));
} else if (artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_TOOL_OUTPUT.getTypeID()
&& attributeTypeID == ATTRIBUTE_TYPE.TSK_TEXT.getTypeID()) {
/*
* This was added because the RegRipper output would often
* cause the UI to get a black line accross it and hang if
* you hovered over large output or selected it. This
* reduces the amount of data in the table. Could consider
* doing this for all fields in the UI.
*/
String value = attribute.getDisplayString();
if (value.length() > 512) {
value = value.substring(0, 512);
}
map.put(attribute.getAttributeType().getDisplayName(), value);
}
else {
map.put(attribute.getAttributeType().getDisplayName(), attribute.getDisplayString());
}
}
} catch (TskCoreException ex) {
LOGGER.log(Level.SEVERE, "Getting attributes failed", ex); //NON-NLS
}
}
/**
* Fill map with EmailMsg properties, not all attributes are filled
*
* @param map map with preserved ordering, where property names/values
* are put
* @param attribute attribute to check/fill as property
*/
private void addEmailMsgProperty(Map<String, Object> map, BlackboardAttribute attribute ) {
final int attributeTypeID = attribute.getAttributeType().getTypeID();
// Skip certain Email msg attributes
if (attributeTypeID == ATTRIBUTE_TYPE.TSK_DATETIME_SENT.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_CONTENT_HTML.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_CONTENT_RTF.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_BCC.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_CC.getTypeID()
|| attributeTypeID == ATTRIBUTE_TYPE.TSK_HEADERS.getTypeID()
) {
// do nothing
}
else if (attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_CONTENT_PLAIN.getTypeID()) {
String value = attribute.getDisplayString();
if (value.length() > 160) {
value = value.substring(0, 160) + "...";
}
map.put(attribute.getAttributeType().getDisplayName(), value);
}
else if (attribute.getAttributeType().getValueType() == BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.DATETIME) {
map.put(attribute.getAttributeType().getDisplayName(), ContentUtils.getStringTime(attribute.getValueLong(), associated));
}
else {
map.put(attribute.getAttributeType().getDisplayName(), attribute.getDisplayString());
}
}
@Override
public <T> T accept(DisplayableItemNodeVisitor<T> v) {
return v.visit(this);
}
/**
* Create a Lookup based on what is in the passed in artifact.
*
* @param artifact
*
* @return
*/
private static Lookup createLookup(BlackboardArtifact artifact) {
List<Object> forLookup = new ArrayList<>();
forLookup.add(artifact);
// Add the content the artifact is associated with
Content content = getAssociatedContent(artifact);
if (content != null) {
forLookup.add(content);
}
return Lookups.fixed(forLookup.toArray(new Object[forLookup.size()]));
}
private static Content getAssociatedContent(BlackboardArtifact artifact) {
try {
return artifact.getSleuthkitCase().getContentById(artifact.getObjectID());
} catch (TskCoreException ex) {
LOGGER.log(Level.WARNING, "Getting file failed", ex); //NON-NLS
}
throw new IllegalArgumentException(
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.getAssocCont.exception.msg"));
}
@Override
public boolean isLeafTypeNode() {
return true;
}
@Override
public String getItemType() {
return getClass().getName();
}
}
| narfindustries/autopsy | Core/src/org/sleuthkit/autopsy/datamodel/BlackboardArtifactNode.java | Java | apache-2.0 | 27,314 |
package com.bazaarvoice.emodb.databus.repl;
import com.bazaarvoice.emodb.databus.core.UpdateRefSerializer;
import com.bazaarvoice.emodb.event.api.EventData;
import com.bazaarvoice.emodb.event.api.EventStore;
import com.bazaarvoice.emodb.sor.core.UpdateRef;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import java.util.Collection;
import java.util.List;
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
public class DefaultReplicationSource implements ReplicationSource {
private final EventStore _eventStore;
@Inject
public DefaultReplicationSource(EventStore eventStore) {
_eventStore = eventStore;
}
@Override
public List<ReplicationEvent> get(String channel, int limit) {
requireNonNull(channel, "channel");
checkArgument(limit > 0, "Limit must be >0");
List<EventData> rawEvents = _eventStore.peek(channel, limit);
return Lists.transform(rawEvents, new Function<EventData, ReplicationEvent>() {
@Override
public ReplicationEvent apply(EventData rawEvent) {
UpdateRef ref = UpdateRefSerializer.fromByteBuffer(rawEvent.getData());
return new ReplicationEvent(rawEvent.getId(), ref);
}
});
}
@Override
public void delete(String channel, Collection<String> eventIds) {
requireNonNull(channel, "channel");
requireNonNull(eventIds, "eventIds");
_eventStore.delete(channel, eventIds, false);
}
}
| bazaarvoice/emodb | databus/src/main/java/com/bazaarvoice/emodb/databus/repl/DefaultReplicationSource.java | Java | apache-2.0 | 1,617 |
<?php
namespace CHStudio\LaravelTransclude\Exceptions;
class MissingTranscludeDirective extends \RuntimeException
{
}
| CHStudio/laravel-transclude | src/Exceptions/MissingTranscludeDirective.php | PHP | apache-2.0 | 121 |
package com.boot.service;
import com.boot.model.User;
import org.springframework.stereotype.Component;
/**
* Created by Admin on 2017/6/29.
* 熔断机制
*/
@Component
public class HelloServiceFallback implements HelloService {
@Override
public String hello() {
return "error";
}
@Override
public String hello(String name) {
return "error";
}
@Override
public User hello(String name, Integer age) {
return new User("未知", 0);
}
@Override
public String hello(User user) {
return "error";
}
}
| TianYunZi/15springcloud | 6.1.1.spring-cloud-feign/src/main/java/com/boot/service/HelloServiceFallback.java | Java | apache-2.0 | 583 |
package nl.jqno.equalsverifier.internal.reflection;
import static nl.jqno.equalsverifier.internal.util.Rethrow.rethrow;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.Set;
import java.util.function.Predicate;
import nl.jqno.equalsverifier.internal.prefabvalues.PrefabValues;
import nl.jqno.equalsverifier.internal.prefabvalues.TypeTag;
import nl.jqno.equalsverifier.internal.reflection.annotations.AnnotationCache;
import nl.jqno.equalsverifier.internal.reflection.annotations.NonnullAnnotationVerifier;
/**
* Instantiates and populates objects of a given class. {@link ClassAccessor} can create two
* different instances of T, which are guaranteed not to be equal to each other, and which contain
* no null values.
*
* @param <T> A class.
*/
public class ClassAccessor<T> {
private final Class<T> type;
private final PrefabValues prefabValues;
/** Private constructor. Call {@link #of(Class, PrefabValues)} instead. */
ClassAccessor(Class<T> type, PrefabValues prefabValues) {
this.type = type;
this.prefabValues = prefabValues;
}
/**
* Factory method.
*
* @param <T> The class on which {@link ClassAccessor} operates.
* @param type The class on which {@link ClassAccessor} operates. Should be the same as T.
* @param prefabValues Prefabricated values with which to fill instantiated objects.
* @return A {@link ClassAccessor} for T.
*/
public static <T> ClassAccessor<T> of(Class<T> type, PrefabValues prefabValues) {
return new ClassAccessor<>(type, prefabValues);
}
/** @return The class on which {@link ClassAccessor} operates. */
public Class<T> getType() {
return type;
}
/**
* Determines whether T is a Java Record.
*
* @return true if T is a Java Record.
*/
public boolean isRecord() {
return RecordsHelper.isRecord(type);
}
/**
* Determines whether T is a sealed class.
*
* @return true if T is a sealed class
*/
public boolean isSealed() {
return SealedClassesHelper.isSealed(type);
}
/**
* Determines whether T declares a field. This does not include inherited fields.
*
* @param field The field that we want to detect.
* @return True if T declares the field.
*/
public boolean declaresField(Field field) {
try {
type.getDeclaredField(field.getName());
return true;
} catch (NoSuchFieldException e) {
return false;
}
}
/**
* Determines whether T has an {@code equals} method.
*
* @return True if T has an {@code equals} method.
*/
public boolean declaresEquals() {
return declaresMethod("equals", Object.class);
}
/**
* Determines whether T has an {@code hashCode} method.
*
* @return True if T has an {@code hashCode} method.
*/
public boolean declaresHashCode() {
return declaresMethod("hashCode");
}
private boolean declaresMethod(String name, Class<?>... parameterTypes) {
try {
type.getDeclaredMethod(name, parameterTypes);
return true;
} catch (NoSuchMethodException e) {
return false;
}
}
/**
* Determines whether T's {@code equals} method is abstract.
*
* @return True if T's {@code equals} method is abstract.
*/
public boolean isEqualsAbstract() {
return isMethodAbstract("equals", Object.class);
}
/**
* Determines whether T's {@code hashCode} method is abstract.
*
* @return True if T's {@code hashCode} method is abstract.
*/
public boolean isHashCodeAbstract() {
return isMethodAbstract("hashCode");
}
private boolean isMethodAbstract(String name, Class<?>... parameterTypes) {
return rethrow(() ->
Modifier.isAbstract(type.getMethod(name, parameterTypes).getModifiers())
);
}
/**
* Determines whether T's {@code equals} method is inherited from {@link Object}.
*
* @return true if T's {@code equals} method is inherited from {@link Object}; false if it is
* overridden in T or in any of its superclasses (except {@link Object}).
*/
public boolean isEqualsInheritedFromObject() {
ClassAccessor<? super T> i = this;
while (i.getType() != Object.class) {
if (i.declaresEquals() && !i.isEqualsAbstract()) {
return false;
}
i = i.getSuperAccessor();
}
return true;
}
/**
* Returns an accessor for T's superclass.
*
* @return An accessor for T's superclass.
*/
public ClassAccessor<? super T> getSuperAccessor() {
return ClassAccessor.of(type.getSuperclass(), prefabValues);
}
/**
* Returns an instance of T that is not equal to the instance of T returned by {@link
* #getBlueObject(TypeTag)}.
*
* @param enclosingType Describes the type that contains this object as a field, to determine
* any generic parameters it may contain.
* @return An instance of T.
*/
public T getRedObject(TypeTag enclosingType) {
return getRedAccessor(enclosingType).get();
}
/**
* Returns an {@link ObjectAccessor} for {@link #getRedObject(TypeTag)}.
*
* @param enclosingType Describes the type that contains this object as a field, to determine
* any generic parameters it may contain.
* @return An {@link ObjectAccessor} for {@link #getRedObject(TypeTag)}.
*/
public ObjectAccessor<T> getRedAccessor(TypeTag enclosingType) {
return buildObjectAccessor().scramble(prefabValues, enclosingType);
}
/**
* Returns an instance of T that is not equal to the instance of T returned by {@link
* #getRedObject(TypeTag)}.
*
* @param enclosingType Describes the type that contains this object as a field, to determine
* any generic parameters it may contain.
* @return An instance of T.
*/
public T getBlueObject(TypeTag enclosingType) {
return getBlueAccessor(enclosingType).get();
}
/**
* Returns an {@link ObjectAccessor} for {@link #getBlueObject(TypeTag)}.
*
* @param enclosingType Describes the type that contains this object as a field, to determine
* any generic parameters it may contain.
* @return An {@link ObjectAccessor} for {@link #getBlueObject(TypeTag)}.
*/
public ObjectAccessor<T> getBlueAccessor(TypeTag enclosingType) {
return buildObjectAccessor()
.scramble(prefabValues, enclosingType)
.scramble(prefabValues, enclosingType);
}
/**
* Returns an {@link ObjectAccessor} for an instance of T where all the fields are initialized
* to their default values. I.e., 0 for ints, and null for objects (except when the field is
* marked with a NonNull annotation).
*
* @param enclosingType Describes the type that contains this object as a field, to determine
* any generic parameters it may contain.
* @param nonnullFields Fields which are not allowed to be set to null.
* @param annotationCache To check for any NonNull annotations.
* @return An {@link ObjectAccessor} for an instance of T where all the fields are initialized
* to their default values.
*/
public ObjectAccessor<T> getDefaultValuesAccessor(
TypeTag enclosingType,
Set<String> nonnullFields,
AnnotationCache annotationCache
) {
Predicate<Field> canBeDefault = f ->
!NonnullAnnotationVerifier.fieldIsNonnull(f, annotationCache) &&
!nonnullFields.contains(f.getName());
return buildObjectAccessor().clear(canBeDefault, prefabValues, enclosingType);
}
private ObjectAccessor<T> buildObjectAccessor() {
T object = Instantiator.of(type).instantiate();
return ObjectAccessor.of(object);
}
}
| jqno/equalsverifier | equalsverifier-core/src/main/java/nl/jqno/equalsverifier/internal/reflection/ClassAccessor.java | Java | apache-2.0 | 8,056 |
package org.nd4j.linalg.api.ndarray;
import org.nd4j.linalg.api.buffer.DataBuffer;
import org.nd4j.linalg.api.shape.Shape;
/**
* @author raver119@gmail.com
*/
public abstract class BaseShapeInfoProvider implements ShapeInfoProvider {
@Override
public DataBuffer createShapeInformation(int[] shape, int[] stride, int offset, int elementWiseStride, char order) {
return Shape.createShapeInformation(shape, stride, offset, elementWiseStride, order);
}
}
| drlebedev/nd4j | nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseShapeInfoProvider.java | Java | apache-2.0 | 475 |
var clientSettings = require('../lib/plugins/client-settings');
var express = require('express');
var supertest = require('supertest');
var assert = require('assert');
describe('logout()', function() {
var server;
var clientConfigOptions;
beforeEach(function() {
server = express();
server.use(function(req, res, next) {
req.ext = {clientConfig: {}};
next();
});
server.use(function(req, res, next) {
clientSettings(clientConfigOptions)(req, res, next);
});
server.get('/', function(req, res, next) {
res.json(req.ext.clientConfig);
});
});
it('should redirect to index page', function(done) {
clientConfigOptions = {
option1: 'foo',
option2: { name: 'joe'}
};
supertest(server)
.get('/')
.expect(200)
.expect(function(res) {
assert.deepEqual(res.body.settings, clientConfigOptions);
})
.end(done);
});
});
| 4front/apphost | test/plugin.client-settings.js | JavaScript | apache-2.0 | 940 |
'use strict';
var express = require('express');
var app = express();
app.use('/components/gh-issues', express.static( __dirname));
app.use('/components', express.static(__dirname + '/bower_components'));
app.get('/', function(req, res){
res.redirect('/components/gh-issues/');
});
app.get('/hello', function (req, res) {
res.status(200).send('Hello, world!');
});
var server = app.listen(process.env.PORT || '8080', function () {
console.log('App listening on port %s', server.address().port);
});
| koopaworks/polymer-gh-issues | index.js | JavaScript | apache-2.0 | 510 |
/*-
* #%L
* Bobcat
* %%
* Copyright (C) 2018 Cognifide Ltd.
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.cognifide.qa.bb.junit5;
import org.junit.jupiter.api.extension.ExtensionContext.Namespace;
/**
* Contains common constants for the whole JUnit 5 module.
*/
public final class JUnit5Constants {
public static final Namespace NAMESPACE =
Namespace.create("com", "cognifide", "qa", "bb", "junit", "guice");
private JUnit5Constants() {
//util
}
}
| Cognifide/bobcat | bb-junit5/src/main/java/com/cognifide/qa/bb/junit5/JUnit5Constants.java | Java | apache-2.0 | 1,013 |
<?php
namespace DCarbone\PHPFHIRGenerated\R4\PHPFHIRTests\FHIRCodePrimitive;
/*!
* This class was generated with the PHPFHIR library (https://github.com/dcarbone/php-fhir) using
* class definitions from HL7 FHIR (https://www.hl7.org/fhir/)
*
* Class creation date: December 26th, 2019 15:44+0000
*
* PHPFHIR Copyright:
*
* Copyright 2016-2019 Daniel Carbone (daniel.p.carbone@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* FHIR Copyright Notice:
*
* Copyright (c) 2011+, HL7, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of HL7 nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*
* Generated on Fri, Nov 1, 2019 09:29+1100 for FHIR v4.0.1
*
* Note: the schemas & schematrons do not contain all of the rules about what makes resources
* valid. Implementers will still need to be familiar with the content of the specification and with
* any profiles that apply to the resources in order to make a conformant implementation.
*
*/
use PHPUnit\Framework\TestCase;
use DCarbone\PHPFHIRGenerated\R4\FHIRCodePrimitive\FHIRCarePlanActivityStatusList;
/**
* Class FHIRCarePlanActivityStatusListTest
* @package \DCarbone\PHPFHIRGenerated\R4\PHPFHIRTests\FHIRCodePrimitive
*/
class FHIRCarePlanActivityStatusListTest extends TestCase
{
public function testCanConstructTypeNoArgs()
{
$type = new FHIRCarePlanActivityStatusList();
$this->assertInstanceOf('\DCarbone\PHPFHIRGenerated\R4\FHIRCodePrimitive\FHIRCarePlanActivityStatusList', $type);
}
}
| dcarbone/php-fhir-generated | src/DCarbone/PHPFHIRGenerated/R4/PHPFHIRTests/FHIRCodePrimitive/FHIRCarePlanActivityStatusListTest.php | PHP | apache-2.0 | 3,509 |
const CLI = require('CLI');
describe('CLI', () => {
function args(...arr) {
return [ 'node', 'polymer-lint.js', ...arr ];
}
let Options, Linter;
const filenames = [
'./spec/integration/good-component.html',
'./spec/integration/bad-component.html',
];
beforeEach(() => {
Options = require('Options');
spyOn(console, 'log');
});
describe('execute', () => {
describe('with no arguments', () => {
it('displays help', () => {
spyOn(Options, 'generateHelp').and.returnValue('Help');
CLI.execute(args('--help'));
expect(Options.generateHelp).toHaveBeenCalled();
expect(console.log).toHaveBeenCalledWith('Help');
});
});
describe('with filename arguments', () => {
let mockPromise;
beforeEach(() => {
mockPromise = jasmine.createSpyObj('promise', ['then']);
Linter = require('Linter');
spyOn(Linter, 'lintFiles').and.returnValue(mockPromise);
});
it('calls Linter.lintFiles with the given filenames', () => {
CLI.execute(args(...filenames));
expect(Linter.lintFiles).toHaveBeenCalledWith(
filenames, jasmine.objectContaining({ _: filenames }));
expect(mockPromise.then).toHaveBeenCalledWith(jasmine.any(Function));
});
describe('and --rules', () => {
it('calls Linter.lintFiles with the expected `rules` option', () => {
const ruleNames = ['no-missing-import', 'no-unused-import'];
CLI.execute(args('--rules', ruleNames.join(','), ...filenames));
expect(Linter.lintFiles).toHaveBeenCalledTimes(1);
const [ actualFilenames, { rules: actualRules } ] = Linter.lintFiles.calls.argsFor(0);
expect(actualFilenames).toEqual(filenames);
expect(actualRules).toEqual(ruleNames);
expect(mockPromise.then).toHaveBeenCalledWith(jasmine.any(Function));
});
});
});
describe('with --help', () => {
it('displays help', () => {
spyOn(Options, 'generateHelp').and.returnValue('Help');
CLI.execute(args('--help'));
expect(Options.generateHelp).toHaveBeenCalled();
expect(console.log).toHaveBeenCalledWith('Help');
});
});
describe('with --version', () => {
it('prints the version number', () => {
CLI.execute(args('--version'));
const expectedVersion = `v${require('../../package.json').version}`;
expect(console.log).toHaveBeenCalledWith(expectedVersion);
});
});
describe('with --color', () => {});
describe('with --no-color', () => {});
});
});
| Banno/polymer-lint | spec/lib/CLISpec.js | JavaScript | apache-2.0 | 2,623 |
package cn.elvea.platform.commons.storage.oss;
import lombok.Data;
import java.io.Serializable;
/**
* 阿里云存储配置参数
*
* @author elvea
* @since 0.0.1
*/
@Data
public class OssStorageConfig implements Serializable {
/**
* Endpoint
*/
private String endpoint = "";
/**
* Access Key Id
*/
private String accessKeyId = "";
/**
* Access Key Secret
*/
private String accessKeySecret = "";
/**
* Bucket Name
*/
private String bucketName = "";
/**
* 自定义域名
*/
private String domain = "";
}
| elveahuang/platform | platform-commons/platform-commons-storage/src/main/java/cn/elvea/platform/commons/storage/oss/OssStorageConfig.java | Java | apache-2.0 | 602 |
<?php
namespace VagueSoftware\Refuel2Bundle\Presenter;
use VagueSoftware\Refuel2Bundle\Exception\Presenter\PresenterNotFoundException;
/**
* Class PresenterFactory
* @package VagueSoftware\Refuel2Bundle\Presenter
*/
class PresenterFactory
{
/**
* @var array
*/
private $presenters = [];
/**
* @param string $class
* @param PresenterInterface $presenter
* @return PresenterFactory
*/
public function registerPresenter(string $class, PresenterInterface $presenter): PresenterFactory
{
$this->presenters[$class] = $presenter;
return $this;
}
/**
* @param string $class
* @return PresenterInterface
*/
public function getPresenter(string $class): PresenterInterface
{
if (!array_key_exists($class, $this->presenters)) {
throw new PresenterNotFoundException($class);
}
return $this->presenters[$class];
}
}
| evilfirefox/refuel2 | src/VagueSoftware/Refuel2Bundle/Presenter/PresenterFactory.php | PHP | apache-2.0 | 949 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Shrtn.Entity;
using Shrtn.Entity.Encoders;
namespace Shrtn
{
/// <summary>
/// Utility class that takes integers such as a primary key id and turns them into short strings using base conversion.
/// </summary>
public static class Shorten
{
/// <summary>
/// Encode an integer using the default encoder
/// </summary>
/// <param name="value">Value to be encoded</param>
/// <returns>An integer encoded to a string</returns>
public static string Encode(ulong value)
{
return Encode(value, EncoderTypes.CrockfordLower);
}
/// <summary>
/// Encode an integer and specify one of the builtin encoders
/// </summary>
/// <param name="value">Value to be encoded</param>
/// <param name="encoderType">The encoder to be used</param>
/// <returns>An integer encoded to a string</returns>
public static string Encode(ulong value, EncoderTypes encoderType)
{
EncoderFactory factory = new EncoderFactory();
BaseEncoder encoder = factory.GetEncoder(encoderType);
return encoder.Encode(value);
}
/// <summary>
/// Encode an integer using a custom encoder
/// </summary>
/// <param name="value">Value to be encoded</param>
/// <param name="encoder">The custom encoder to be used</param>
/// <returns>An integer encoded to a string</returns>
public static string Encode(ulong value, BaseEncoder encoder)
{
return encoder.Encode(value);
}
/// <summary>
/// Decode a string using the default encoder
/// </summary>
/// <param name="encodedValue">The encoded string</param>
/// <returns>A converted integer</returns>
public static ulong Decode(string encodedValue)
{
return Decode(encodedValue, EncoderTypes.CrockfordLower);
}
/// <summary>
/// Decode a string and specify one of the builtin encoders
/// </summary>
/// <param name="encodedValue">The encoded string</param>
/// <param name="encoderType">The encoder used on this string</param>
/// <returns>A converted integer</returns>
public static ulong Decode(string encodedValue, EncoderTypes encoderType)
{
EncoderFactory factory = new EncoderFactory();
BaseEncoder encoder = factory.GetEncoder(encoderType);
return encoder.Decode(encodedValue);
}
/// <summary>
/// Decode a string using a custom encoder
/// </summary>
/// <param name="encodedValue">The encoded string</param>
/// <param name="encoder">The custom encoder to be used</param>
/// <returns>A converted integer</returns>
public static ulong Decode(string encodedValue, BaseEncoder encoder)
{
return encoder.Decode(encodedValue);
}
}
}
| ryan-nauman/Shrtn | Shrtn/Shrtn.cs | C# | apache-2.0 | 3,095 |
package org.schema;
/**
*
* A collection of music tracks.
*
* @fullPath Thing > CreativeWork > MusicPlaylist > MusicAlbum
*
* @author Texelz (by Onhate)
*
*/
public class MusicAlbum extends MusicPlaylist {
private MusicGroup byArtist;
/**
* The artist that performed this album or recording.
*/
public MusicGroup getByArtist() {
return this.byArtist;
}
/**
* The artist that performed this album or recording.
*/
public void setByArtist(MusicGroup byArtist) {
this.byArtist = byArtist;
}
}
| onhate/schemorger | src/main/java/org/schema/MusicAlbum.java | Java | apache-2.0 | 525 |
package cl.puntocontrol.struts.action;
import java.util.ArrayList;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.struts.action.Action;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionForward;
import org.apache.struts.action.ActionMapping;
import cl.puntocontrol.hibernate.dao.DAOTransportista;
import cl.puntocontrol.hibernate.domain.Transportista;
import cl.puntocontrol.hibernate.domain.Usuario;
import cl.puntocontrol.struts.form.TransportistasForm;
public class TransportistasBuscarAction extends Action {
@Override
public ActionForward execute(ActionMapping mapping, ActionForm _form,
HttpServletRequest request, HttpServletResponse response)
throws Exception {
TransportistasForm form = (TransportistasForm)_form;
try{
List<Transportista> transportistas = new ArrayList<Transportista>();
transportistas=DAOTransportista.list("","");
form.setTransportistas(transportistas);
form.setEstado(0);
form.setNombre_transportista("");
form.setRut_transportista("");
form.setSap_transportista("");
String userName = (String)request.getSession().getAttribute("userName");
String password = (String)request.getSession().getAttribute("password");
Usuario usuario = UsuarioUtil.checkUser(userName, password);
form.setUsuario(usuario);
form.setSuccessMessage("");
return mapping.findForward("success");
}catch(Exception ex){
form.setErrorMessage("Ha Ocurrido Un Error Inesperado.");
return mapping.findForward("error");
}finally{
}
}
} | Claudio1986/Punto_control | src/cl/puntocontrol/struts/action/TransportistasBuscarAction.java | Java | apache-2.0 | 1,697 |
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.nd4j.tensorflow.conversion;
import org.nd4j.shade.protobuf.InvalidProtocolBufferException;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.indexer.*;
import org.nd4j.linalg.api.buffer.DataBuffer;
import org.nd4j.linalg.api.buffer.DataType;
import org.nd4j.linalg.api.concurrency.AffinityManager;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.compression.CompressedDataBuffer;
import org.nd4j.linalg.compression.CompressionDescriptor;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.common.util.ArrayUtil;
import org.nd4j.tensorflow.conversion.graphrunner.SavedModelConfig;
import org.tensorflow.framework.MetaGraphDef;
import org.tensorflow.framework.SignatureDef;
import org.tensorflow.framework.TensorInfo;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Map;
import org.bytedeco.tensorflow.*;
import static org.bytedeco.tensorflow.global.tensorflow.*;
/**
* Interop between nd4j {@link INDArray}
* and {@link TF_Tensor}
*
* @author Adam Gibson
*/
public class TensorflowConversion {
//used for passing to tensorflow: this dummy de allocator
//allows us to use nd4j buffers for memory management
//rather than having them managed by tensorflow
private static Deallocator_Pointer_long_Pointer calling;
private static TensorflowConversion INSTANCE;
/**
* Get a singleton instance
* @return
*/
public static TensorflowConversion getInstance() {
if(INSTANCE == null)
INSTANCE = new TensorflowConversion();
return INSTANCE;
}
private TensorflowConversion() {
if(calling == null)
calling = DummyDeAllocator.getInstance();
}
/**
* Convert an {@link INDArray}
* to a {@link TF_Tensor}
* with zero copy.
* Uses a direct pointer to the underlying ndarray's
* data
* @param ndArray the ndarray to use
* @return the equivalent {@link TF_Tensor}
*/
public TF_Tensor tensorFromNDArray(INDArray ndArray) {
if(ndArray == null) {
throw new IllegalArgumentException("NDArray must not be null!");
}
//we infer data type from the ndarray.databuffer()
//for now we throw an exception
if(ndArray.data() == null) {
throw new IllegalArgumentException("Unable to infer data type from null databuffer");
}
if(ndArray.isView() || ndArray.ordering() != 'c') {
ndArray = ndArray.dup('c');
}
long[] ndShape = ndArray.shape();
long[] tfShape = new long[ndShape.length];
System.arraycopy(ndShape, 0, tfShape, 0, ndShape.length);
int type;
DataBuffer data = ndArray.data();
DataType dataType = data.dataType();
switch (dataType) {
case DOUBLE: type = DT_DOUBLE; break;
case FLOAT: type = DT_FLOAT; break;
case INT: type = DT_INT32; break;
case HALF: type = DT_HALF; break;
case COMPRESSED:
CompressedDataBuffer compressedData = (CompressedDataBuffer)data;
CompressionDescriptor desc = compressedData.getCompressionDescriptor();
String algo = desc.getCompressionAlgorithm();
switch (algo) {
case "FLOAT16": type = DT_HALF; break;
case "INT8": type = DT_INT8; break;
case "UINT8": type = DT_UINT8; break;
case "INT16": type = DT_INT16; break;
case "UINT16": type = DT_UINT16; break;
default: throw new IllegalArgumentException("Unsupported compression algorithm: " + algo);
}
break;
case SHORT: type = DT_INT16; break;
case LONG: type = DT_INT64; break;
case UTF8: type = DT_STRING; break;
case BYTE: type = DT_INT8; break;
case UBYTE: type = DT_UINT8; break;
case UINT16: type = DT_UINT16; break;
case UINT32: type = DT_UINT32; break;
case UINT64: type = DT_UINT64; break;
case BFLOAT16: type = DT_BFLOAT16; break;
case BOOL: type = DT_BOOL; break;
default: throw new IllegalArgumentException("Unsupported data type: " + dataType);
}
try {
Nd4j.getAffinityManager().ensureLocation(ndArray, AffinityManager.Location.HOST);
} catch (Exception e) {
// ND4J won't let us access compressed data in GPU memory, so we'll leave TensorFlow do the conversion instead
ndArray.getDouble(0); // forces decompression and data copy to host
data = ndArray.data();
dataType = data.dataType();
switch (dataType) {
case DOUBLE: type = DT_DOUBLE; break;
case FLOAT: type = DT_FLOAT; break;
case INT: type = DT_INT32; break;
case LONG: type = DT_INT64; break;
case UTF8: type = DT_STRING; break;
default: throw new IllegalArgumentException("Unsupported data type: " + dataType);
}
}
LongPointer longPointer = new LongPointer(tfShape);
TF_Tensor tf_tensor = null;
if (type == DT_STRING) {
long size = 0;
long length = ndArray.length();
BytePointer[] strings = new BytePointer[(int)length];
for (int i = 0; i < length; i++) {
strings[i] = new BytePointer(ndArray.getString(i));
size += TF_StringEncodedSize(strings[i].capacity());
}
tf_tensor = TF_AllocateTensor(
type,
longPointer,
tfShape.length,
8 * length + size);
long offset = 0;
BytePointer tf_data = new BytePointer(TF_TensorData(tf_tensor)).capacity(TF_TensorByteSize(tf_tensor));
TF_Status status = TF_NewStatus();
for (int i = 0; i < length; i++) {
tf_data.position(8 * i).putLong(offset);
offset += TF_StringEncode(strings[i], strings[i].capacity() - 1, tf_data.position(8 * length + offset), tf_data.capacity() - tf_data.position(), status);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to convert tensor " + TF_Message(status).getString());
}
}
TF_DeleteStatus(status);
} else {
tf_tensor = TF_NewTensor(
type,
longPointer,
tfShape.length,
data.pointer(),
data.length() * data.getElementSize(),
calling,null);
}
return tf_tensor;
}
/**
* Convert a {@link INDArray}
* to a {@link TF_Tensor}
* using zero copy.
* It will use the underlying
* pointer with in nd4j.
* @param tensor the tensor to use
* @return
*/
public INDArray ndArrayFromTensor(TF_Tensor tensor) {
int rank = TF_NumDims(tensor);
int[] ndShape;
if (rank == 0) {
// scalar
ndShape = new int[] { 1 };
} else {
ndShape = new int[rank];
for (int i = 0; i < ndShape.length; i++) {
ndShape[i] = (int) TF_Dim(tensor,i);
}
}
int tfType = TF_TensorType(tensor);
DataType nd4jType = typeFor(tfType);
int length = ArrayUtil.prod(ndShape);
INDArray array;
if (nd4jType == DataType.UTF8) {
String[] strings = new String[length];
BytePointer data = new BytePointer(TF_TensorData(tensor)).capacity(TF_TensorByteSize(tensor));
BytePointer str = new BytePointer((Pointer)null);
SizeTPointer size = new SizeTPointer(1);
TF_Status status = TF_NewStatus();
for (int i = 0; i < length; i++) {
long offset = data.position(8 * i).getLong();
TF_StringDecode(data.position(8 * length + offset), data.capacity() - data.position(), str, size, status);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to convert tensor " + TF_Message(status).getString());
}
strings[i] = str.position(0).capacity(size.get()).getString();
}
TF_DeleteStatus(status);
array = Nd4j.create(strings);
} else {
Pointer pointer = TF_TensorData(tensor).capacity(length);
Indexer indexer = indexerForType(nd4jType,pointer);
DataBuffer d = Nd4j.createBuffer(indexer.pointer(),nd4jType,length,indexer);
array = Nd4j.create(d,ndShape);
}
// we don't need this in this case. Device memory will be updated right in the constructor
//Nd4j.getAffinityManager().tagLocation(array, AffinityManager.Location.HOST);
return array;
}
private Indexer indexerForType(DataType type,Pointer pointer) {
switch(type) {
case DOUBLE: return DoubleIndexer.create(new DoublePointer(pointer));
case FLOAT: return FloatIndexer.create(new FloatPointer(pointer));
case INT: return IntIndexer.create(new IntPointer(pointer));
case LONG: return LongIndexer.create(new LongPointer(pointer));
case SHORT: return ShortIndexer.create(new ShortPointer(pointer));
case BYTE: return ByteIndexer.create(new BytePointer(pointer));
case UBYTE: return UByteIndexer.create(new BytePointer(pointer));
case UINT16: return UShortIndexer.create(new ShortPointer(pointer));
case UINT32: return UIntIndexer.create(new IntPointer(pointer));
case UINT64: return ULongIndexer.create(new LongPointer(pointer));
case BFLOAT16: return Bfloat16Indexer.create(new ShortPointer(pointer));
case HALF: return HalfIndexer.create(new ShortPointer(pointer));
case BOOL: return BooleanIndexer.create(new BooleanPointer(pointer));
default: throw new IllegalArgumentException("Illegal type " + type);
}
}
private DataType typeFor(int tensorflowType) {
switch(tensorflowType) {
case DT_DOUBLE: return DataType.DOUBLE;
case DT_FLOAT: return DataType.FLOAT;
case DT_HALF: return DataType.HALF;
case DT_INT16: return DataType.SHORT;
case DT_INT32: return DataType.INT;
case DT_INT64: return DataType.LONG;
case DT_STRING: return DataType.UTF8;
case DT_INT8: return DataType.BYTE;
case DT_UINT8: return DataType.UBYTE;
case DT_UINT16: return DataType.UINT16;
case DT_UINT32: return DataType.UINT32;
case DT_UINT64: return DataType.UINT64;
case DT_BFLOAT16: return DataType.BFLOAT16;
case DT_BOOL: return DataType.BOOL;
default: throw new IllegalArgumentException("Illegal type " + tensorflowType);
}
}
/**
* Get an initialized {@link TF_Graph}
* based on the passed in file
* (the file must be a binary protobuf/pb file)
* The graph will be modified to be associated
* with the device associated with this current thread.
*
* Depending on the active {@link Nd4j#getBackend()}
* the device will either be the gpu pinned to the current thread
* or the cpu
* @param filePath the path to the file to read
* @return the initialized graph
* @throws IOException
*/
public TF_Graph loadGraph(String filePath, TF_Status status) throws IOException {
byte[] bytes = Files.readAllBytes(Paths.get(filePath));
return loadGraph(bytes, status);
}
/**
* Infers the device for the given thread
* based on the {@link Nd4j#getAffinityManager()}
* Usually, this will either be a gpu or cpu
* reserved for the current device.
* You can think of the "current thread"
* as a worker. This is mainly useful with multiple gpus
* @return
*/
public static String defaultDeviceForThread() {
Integer deviceForThread = Nd4j.getAffinityManager().getDeviceForCurrentThread();
String deviceName = null;
//gpu
if(Nd4j.getBackend().getClass().getName().contains("JCublasBackend")) {
deviceName = "/device:gpu:" + deviceForThread;
}
else {
deviceName = "/device:cpu:" + deviceForThread;
}
return deviceName;
}
/**
* Get an initialized {@link TF_Graph}
* based on the passed in byte array content
* (the content must be a binary protobuf/pb file)
* The graph will be modified to be associated
* with the device associated with this current thread.
*
* Depending on the active {@link Nd4j#getBackend()}
* the device will either be the gpu pinned to the current thread
* or the content
* @param content the path to the file to read
* @return the initialized graph
* @throws IOException
*/
public TF_Graph loadGraph(byte[] content, TF_Status status) {
byte[] toLoad = content;
TF_Buffer graph_def = TF_NewBufferFromString(new BytePointer(toLoad), content.length);
TF_Graph graphC = TF_NewGraph();
TF_ImportGraphDefOptions opts = TF_NewImportGraphDefOptions();
TF_GraphImportGraphDef(graphC, graph_def, opts, status);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to import graph " + TF_Message(status).getString());
}
TF_DeleteImportGraphDefOptions(opts);
return graphC;
}
/**
* Load a session based on the saved model
* @param savedModelConfig the configuration for the saved model
* @param options the session options to use
* @param runOptions the run configuration to use
* @param graph the tf graph to use
* @param inputsMap the input map
* @param outputsMap the output names
* @param status the status object to use for verifying the results
* @return
*/
public TF_Session loadSavedModel(SavedModelConfig savedModelConfig, TF_SessionOptions options, TF_Buffer runOptions, TF_Graph graph, Map<String, String> inputsMap, Map<String, String> outputsMap, TF_Status status) {
TF_Buffer metaGraph = TF_Buffer.newBuffer();
TF_Session session = TF_LoadSessionFromSavedModel(options, runOptions, new BytePointer(savedModelConfig.getSavedModelPath()),
new BytePointer(savedModelConfig.getModelTag()), 1, graph, metaGraph, status);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to import model " + TF_Message(status).getString());
}
MetaGraphDef metaGraphDef;
try {
metaGraphDef = MetaGraphDef.parseFrom(metaGraph.data().capacity(metaGraph.length()).asByteBuffer());
} catch (InvalidProtocolBufferException ex) {
throw new IllegalStateException("ERROR: Unable to import model " + ex);
}
Map<String, SignatureDef> signatureDefMap = metaGraphDef.getSignatureDefMap();
SignatureDef signatureDef = signatureDefMap.get(savedModelConfig.getSignatureKey());
Map<String, TensorInfo> inputs = signatureDef.getInputsMap();
for (Map.Entry<String, TensorInfo> e : inputs.entrySet()) {
inputsMap.put(e.getKey(), e.getValue().getName());
}
Map<String, TensorInfo> outputs = signatureDef.getOutputsMap();
for (Map.Entry<String, TensorInfo> e : outputs.entrySet()) {
outputsMap.put(e.getKey(), e.getValue().getName());
}
return session;
}
}
| deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | Java | apache-2.0 | 16,741 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.spatial4j.core.shape.jts;
import com.spatial4j.core.context.SpatialContext;
import com.spatial4j.core.context.jts.JtsSpatialContext;
import com.spatial4j.core.exception.InvalidShapeException;
import com.spatial4j.core.shape.Circle;
import com.spatial4j.core.shape.Point;
import com.spatial4j.core.shape.Rectangle;
import com.spatial4j.core.shape.Shape;
import com.spatial4j.core.shape.SpatialRelation;
import com.spatial4j.core.shape.impl.BufferedLineString;
import com.spatial4j.core.shape.impl.PointImpl;
import com.spatial4j.core.shape.impl.Range;
import com.spatial4j.core.shape.impl.RectangleImpl;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.CoordinateSequence;
import com.vividsolutions.jts.geom.CoordinateSequenceFilter;
import com.vividsolutions.jts.geom.Envelope;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryCollection;
import com.vividsolutions.jts.geom.GeometryFilter;
import com.vividsolutions.jts.geom.IntersectionMatrix;
import com.vividsolutions.jts.geom.LineString;
import com.vividsolutions.jts.geom.Lineal;
import com.vividsolutions.jts.geom.LinearRing;
import com.vividsolutions.jts.geom.Polygon;
import com.vividsolutions.jts.geom.Puntal;
import com.vividsolutions.jts.geom.prep.PreparedGeometry;
import com.vividsolutions.jts.geom.prep.PreparedGeometryFactory;
import com.vividsolutions.jts.operation.union.UnaryUnionOp;
import com.vividsolutions.jts.operation.valid.IsValidOp;
import java.util.ArrayList;
import java.util.List;
/**
* Wraps a JTS {@link Geometry} (i.e. may be a polygon or basically anything).
* JTS does a great deal of the hard work, but there is work here in handling
* dateline wrap.
*/
public class JtsGeometry implements Shape {
/**
* System property boolean that can disable auto validation in an assert.
*/
public static final String SYSPROP_ASSERT_VALIDATE = "spatial4j.JtsGeometry.assertValidate";
private final Geometry geom;//cannot be a direct instance of GeometryCollection as it doesn't support relate()
private final boolean hasArea;
private final Rectangle bbox;
protected final JtsSpatialContext ctx;
protected PreparedGeometry preparedGeometry;
protected boolean validated = false;
public JtsGeometry(Geometry geom, JtsSpatialContext ctx, boolean dateline180Check, boolean allowMultiOverlap) {
this.ctx = ctx;
//GeometryCollection isn't supported in relate()
if (geom.getClass().equals(GeometryCollection.class))
throw new IllegalArgumentException("JtsGeometry does not support GeometryCollection but does support its subclasses.");
//NOTE: All this logic is fairly expensive. There are some short-circuit checks though.
if (ctx.isGeo()) {
//Unwraps the geometry across the dateline so it exceeds the standard geo bounds (-180 to +180).
if (dateline180Check)
unwrapDateline(geom);//potentially modifies geom
//If given multiple overlapping polygons, fix it by union
if (allowMultiOverlap)
geom = unionGeometryCollection(geom);//returns same or new geom
//Cuts an unwrapped geometry back into overlaid pages in the standard geo bounds.
geom = cutUnwrappedGeomInto360(geom);//returns same or new geom
assert geom.getEnvelopeInternal().getWidth() <= 360;
assert !geom.getClass().equals(GeometryCollection.class) : "GeometryCollection unsupported";//double check
//Compute bbox
bbox = computeGeoBBox(geom);
} else {//not geo
//If given multiple overlapping polygons, fix it by union
if (allowMultiOverlap)
geom = unionGeometryCollection(geom);//returns same or new geom
Envelope env = geom.getEnvelopeInternal();
bbox = new RectangleImpl(env.getMinX(), env.getMaxX(), env.getMinY(), env.getMaxY(), ctx);
}
geom.getEnvelopeInternal();//ensure envelope is cached internally, which is lazy evaluated. Keeps this thread-safe.
this.geom = geom;
assert assertValidate();//kinda expensive but caches valid state
this.hasArea = !((geom instanceof Lineal) || (geom instanceof Puntal));
}
/**
* called via assertion
*/
private boolean assertValidate() {
String assertValidate = System.getProperty(SYSPROP_ASSERT_VALIDATE);
if (assertValidate == null || Boolean.parseBoolean(assertValidate))
validate();
return true;
}
/**
* Validates the shape, throwing a descriptive error if it isn't valid. Note that this
* is usually called automatically by default, but that can be disabled.
*
* @throws InvalidShapeException with descriptive error if the shape isn't valid
*/
public void validate() throws InvalidShapeException {
if (!validated) {
IsValidOp isValidOp = new IsValidOp(geom);
if (!isValidOp.isValid())
throw new InvalidShapeException(isValidOp.getValidationError().toString());
validated = true;
}
}
/**
* Adds an index to this class internally to compute spatial relations faster. In JTS this
* is called a {@link com.vividsolutions.jts.geom.prep.PreparedGeometry}. This
* isn't done by default because it takes some time to do the optimization, and it uses more
* memory. Calling this method isn't thread-safe so be careful when this is done. If it was
* already indexed then nothing happens.
*/
public void index() {
if (preparedGeometry == null)
preparedGeometry = PreparedGeometryFactory.prepare(geom);
}
@Override
public boolean isEmpty() {
return geom.isEmpty();
}
/**
* Given {@code geoms} which has already been checked for being in world
* bounds, return the minimal longitude range of the bounding box.
*/
protected Rectangle computeGeoBBox(Geometry geoms) {
if (geoms.isEmpty())
return new RectangleImpl(Double.NaN, Double.NaN, Double.NaN, Double.NaN, ctx);
final Envelope env = geoms.getEnvelopeInternal();//for minY & maxY (simple)
if (env.getWidth() > 180 && geoms.getNumGeometries() > 1) {
// This is ShapeCollection's bbox algorithm
Range xRange = null;
for (int i = 0; i < geoms.getNumGeometries(); i++) {
Envelope envI = geoms.getGeometryN(i).getEnvelopeInternal();
Range xRange2 = new Range.LongitudeRange(envI.getMinX(), envI.getMaxX());
if (xRange == null) {
xRange = xRange2;
} else {
xRange = xRange.expandTo(xRange2);
}
if (xRange == Range.LongitudeRange.WORLD_180E180W)
break; // can't grow any bigger
}
return new RectangleImpl(xRange.getMin(), xRange.getMax(), env.getMinY(), env.getMaxY(), ctx);
} else {
return new RectangleImpl(env.getMinX(), env.getMaxX(), env.getMinY(), env.getMaxY(), ctx);
}
}
@Override
public JtsGeometry getBuffered(double distance, SpatialContext ctx) {
//TODO doesn't work correctly across the dateline. The buffering needs to happen
// when it's transiently unrolled, prior to being sliced.
return this.ctx.makeShape(geom.buffer(distance), true, true);
}
@Override
public boolean hasArea() {
return hasArea;
}
@Override
public double getArea(SpatialContext ctx) {
double geomArea = geom.getArea();
if (ctx == null || geomArea == 0)
return geomArea;
//Use the area proportional to how filled the bbox is.
double bboxArea = getBoundingBox().getArea(null);//plain 2d area
assert bboxArea >= geomArea;
double filledRatio = geomArea / bboxArea;
return getBoundingBox().getArea(ctx) * filledRatio;
// (Future: if we know we use an equal-area projection then we don't need to
// estimate)
}
@Override
public Rectangle getBoundingBox() {
return bbox;
}
@Override
public JtsPoint getCenter() {
if (isEmpty()) //geom.getCentroid == null
return new JtsPoint(ctx.getGeometryFactory().createPoint((Coordinate) null), ctx);
return new JtsPoint(geom.getCentroid(), ctx);
}
@Override
public SpatialRelation relate(Shape other) {
if (other instanceof Point)
return relate((Point) other);
else if (other instanceof Rectangle)
return relate((Rectangle) other);
else if (other instanceof Circle)
return relate((Circle) other);
else if (other instanceof JtsGeometry)
return relate((JtsGeometry) other);
else if (other instanceof BufferedLineString)
throw new UnsupportedOperationException("Can't use BufferedLineString with JtsGeometry");
return other.relate(this).transpose();
}
public SpatialRelation relate(Point pt) {
if (!getBoundingBox().relate(pt).intersects())
return SpatialRelation.DISJOINT;
Geometry ptGeom;
if (pt instanceof JtsPoint)
ptGeom = ((JtsPoint) pt).getGeom();
else
ptGeom = ctx.getGeometryFactory().createPoint(new Coordinate(pt.getX(), pt.getY()));
return relate(ptGeom);//is point-optimized
}
public SpatialRelation relate(Rectangle rectangle) {
SpatialRelation bboxR = bbox.relate(rectangle);
if (bboxR == SpatialRelation.WITHIN || bboxR == SpatialRelation.DISJOINT)
return bboxR;
// FYI, the right answer could still be DISJOINT or WITHIN, but we don't know yet.
return relate(ctx.getGeometryFrom(rectangle));
}
public SpatialRelation relate(Circle circle) {
SpatialRelation bboxR = bbox.relate(circle);
if (bboxR == SpatialRelation.WITHIN || bboxR == SpatialRelation.DISJOINT)
return bboxR;
//Test each point to see how many of them are outside of the circle.
//TODO consider instead using geom.apply(CoordinateSequenceFilter) -- maybe faster since avoids Coordinate[] allocation
Coordinate[] coords = geom.getCoordinates();
int outside = 0;
int i = 0;
for (Coordinate coord : coords) {
i++;
SpatialRelation sect = circle.relate(new PointImpl(coord.x, coord.y, ctx));
if (sect == SpatialRelation.DISJOINT)
outside++;
if (i != outside && outside != 0)//short circuit: partially outside, partially inside
return SpatialRelation.INTERSECTS;
}
if (i == outside) {
return (relate(circle.getCenter()) == SpatialRelation.DISJOINT)
? SpatialRelation.DISJOINT : SpatialRelation.CONTAINS;
}
assert outside == 0;
return SpatialRelation.WITHIN;
}
public SpatialRelation relate(JtsGeometry jtsGeometry) {
//don't bother checking bbox since geom.relate() does this already
return relate(jtsGeometry.geom);
}
protected SpatialRelation relate(Geometry oGeom) {
//see http://docs.geotools.org/latest/userguide/library/jts/dim9.html#preparedgeometry
if (oGeom instanceof com.vividsolutions.jts.geom.Point) {
if (preparedGeometry != null)
return preparedGeometry.disjoint(oGeom) ? SpatialRelation.DISJOINT : SpatialRelation.CONTAINS;
return geom.disjoint(oGeom) ? SpatialRelation.DISJOINT : SpatialRelation.CONTAINS;
}
if (preparedGeometry == null)
return intersectionMatrixToSpatialRelation(geom.relate(oGeom));
else if (preparedGeometry.covers(oGeom))
return SpatialRelation.CONTAINS;
else if (preparedGeometry.coveredBy(oGeom))
return SpatialRelation.WITHIN;
else if (preparedGeometry.intersects(oGeom))
return SpatialRelation.INTERSECTS;
return SpatialRelation.DISJOINT;
}
public static SpatialRelation intersectionMatrixToSpatialRelation(IntersectionMatrix matrix) {
//As indicated in SpatialRelation javadocs, Spatial4j CONTAINS & WITHIN are
// OGC's COVERS & COVEREDBY
if (matrix.isCovers())
return SpatialRelation.CONTAINS;
else if (matrix.isCoveredBy())
return SpatialRelation.WITHIN;
else if (matrix.isDisjoint())
return SpatialRelation.DISJOINT;
return SpatialRelation.INTERSECTS;
}
@Override
public String toString() {
return geom.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
JtsGeometry that = (JtsGeometry) o;
return geom.equalsExact(that.geom);//fast equality for normalized geometries
}
@Override
public int hashCode() {
//FYI if geometry.equalsExact(that.geometry), then their envelopes are the same.
return geom.getEnvelopeInternal().hashCode();
}
public Geometry getGeom() {
return geom;
}
/**
* If <code>geom</code> spans the dateline, then this modifies it to be a
* valid JTS geometry that extends to the right of the standard -180 to +180
* width such that some points are greater than +180 but some remain less.
* Takes care to invoke {@link com.vividsolutions.jts.geom.Geometry#geometryChanged()}
* if needed.
*
* @return The number of times the geometry spans the dateline. >= 0
*/
private static int unwrapDateline(Geometry geom) {
if (geom.getEnvelopeInternal().getWidth() < 180)
return 0;//can't possibly cross the dateline
final int[] crossings = {0};//an array so that an inner class can modify it.
geom.apply(new GeometryFilter() {
@Override
public void filter(Geometry geom) {
int cross = 0;
if (geom instanceof LineString) {//note: LinearRing extends LineString
if (geom.getEnvelopeInternal().getWidth() < 180)
return;//can't possibly cross the dateline
cross = unwrapDateline((LineString) geom);
} else if (geom instanceof Polygon) {
if (geom.getEnvelopeInternal().getWidth() < 180)
return;//can't possibly cross the dateline
cross = unwrapDateline((Polygon) geom);
} else
return;
crossings[0] = Math.max(crossings[0], cross);
}
});//geom.apply()
return crossings[0];
}
/**
* See {@link #unwrapDateline(Geometry)}.
*/
private static int unwrapDateline(Polygon poly) {
LineString exteriorRing = poly.getExteriorRing();
int cross = unwrapDateline(exteriorRing);
if (cross > 0) {
//TODO TEST THIS! Maybe bug if doesn't cross but is in another page?
for (int i = 0; i < poly.getNumInteriorRing(); i++) {
LineString innerLineString = poly.getInteriorRingN(i);
unwrapDateline(innerLineString);
for (int shiftCount = 0; !exteriorRing.contains(innerLineString); shiftCount++) {
if (shiftCount > cross)
throw new IllegalArgumentException("The inner ring doesn't appear to be within the exterior: "
+ exteriorRing + " inner: " + innerLineString);
shiftGeomByX(innerLineString, 360);
}
}
poly.geometryChanged();
}
return cross;
}
/**
* See {@link #unwrapDateline(Geometry)}.
*/
private static int unwrapDateline(LineString lineString) {
CoordinateSequence cseq = lineString.getCoordinateSequence();
int size = cseq.size();
if (size <= 1)
return 0;
int shiftX = 0;//invariant: == shiftXPage*360
int shiftXPage = 0;
int shiftXPageMin = 0/* <= 0 */, shiftXPageMax = 0; /* >= 0 */
double prevX = cseq.getX(0);
for (int i = 1; i < size; i++) {
double thisX_orig = cseq.getX(i);
assert thisX_orig >= -180 && thisX_orig <= 180 : "X not in geo bounds";
double thisX = thisX_orig + shiftX;
if (prevX - thisX > 180) {//cross dateline from left to right
thisX += 360;
shiftX += 360;
shiftXPage += 1;
shiftXPageMax = Math.max(shiftXPageMax, shiftXPage);
} else if (thisX - prevX > 180) {//cross dateline from right to left
thisX -= 360;
shiftX -= 360;
shiftXPage -= 1;
shiftXPageMin = Math.min(shiftXPageMin, shiftXPage);
}
if (shiftXPage != 0)
cseq.setOrdinate(i, CoordinateSequence.X, thisX);
prevX = thisX;
}
if (lineString instanceof LinearRing) {
assert cseq.getCoordinate(0).equals(cseq.getCoordinate(size - 1));
assert shiftXPage == 0;//starts and ends at 0
}
assert shiftXPageMax >= 0 && shiftXPageMin <= 0;
//Unfortunately we are shifting again; it'd be nice to be smarter and shift once
shiftGeomByX(lineString, shiftXPageMin * -360);
int crossings = shiftXPageMax - shiftXPageMin;
if (crossings > 0)
lineString.geometryChanged();
return crossings;
}
private static void shiftGeomByX(Geometry geom, final int xShift) {
if (xShift == 0)
return;
geom.apply(new CoordinateSequenceFilter() {
@Override
public void filter(CoordinateSequence seq, int i) {
seq.setOrdinate(i, CoordinateSequence.X, seq.getX(i) + xShift);
}
@Override
public boolean isDone() {
return false;
}
@Override
public boolean isGeometryChanged() {
return true;
}
});
}
private static Geometry unionGeometryCollection(Geometry geom) {
if (geom instanceof GeometryCollection) {
return geom.union();
}
return geom;
}
/**
* This "pages" through standard geo boundaries offset by multiples of 360
* longitudinally that intersect geom, and the intersecting results of a page
* and the geom are shifted into the standard -180 to +180 and added to a new
* geometry that is returned.
*/
private static Geometry cutUnwrappedGeomInto360(Geometry geom) {
Envelope geomEnv = geom.getEnvelopeInternal();
if (geomEnv.getMinX() >= -180 && geomEnv.getMaxX() <= 180)
return geom;
assert geom.isValid() : "geom";
//TODO opt: support geom's that start at negative pages --
// ... will avoid need to previously shift in unwrapDateline(geom).
List<Geometry> geomList = new ArrayList<Geometry>();
//page 0 is the standard -180 to 180 range
for (int page = 0; true; page++) {
double minX = -180 + page * 360;
if (geomEnv.getMaxX() <= minX)
break;
Geometry rect = geom.getFactory().toGeometry(new Envelope(minX, minX + 360, -90, 90));
assert rect.isValid() : "rect";
Geometry pageGeom = rect.intersection(geom);//JTS is doing some hard work
assert pageGeom.isValid() : "pageGeom";
shiftGeomByX(pageGeom, page * -360);
geomList.add(pageGeom);
}
return UnaryUnionOp.union(geomList);
}
// private static Geometry removePolyHoles(Geometry geom) {
// //TODO this does a deep copy of geom even if no changes needed; be smarter
// GeometryTransformer gTrans = new GeometryTransformer() {
// @Override
// protected Geometry transformPolygon(Polygon geom, Geometry parent) {
// if (geom.getNumInteriorRing() == 0)
// return geom;
// return factory.createPolygon((LinearRing) geom.getExteriorRing(),null);
// }
// };
// return gTrans.transform(geom);
// }
//
// private static Geometry snapAndClean(Geometry geom) {
// return new GeometrySnapper(geom).snapToSelf(GeometrySnapper.computeOverlaySnapTolerance(geom), true);
// }
}
| yipen9/spatial4j | src/main/java/com/spatial4j/core/shape/jts/JtsGeometry.java | Java | apache-2.0 | 21,456 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"SimulationTypeEnum",},
)
class SimulationTypeEnum(proto.Message):
r"""Container for enum describing the field a simulation
modifies.
"""
class SimulationType(proto.Enum):
r"""Enum describing the field a simulation modifies."""
UNSPECIFIED = 0
UNKNOWN = 1
CPC_BID = 2
CPV_BID = 3
TARGET_CPA = 4
BID_MODIFIER = 5
TARGET_ROAS = 6
PERCENT_CPC_BID = 7
TARGET_IMPRESSION_SHARE = 8
BUDGET = 9
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v9/enums/types/simulation_type.py | Python | apache-2.0 | 1,302 |
package com.topie.asset;
import com.topie.core.dbmigrate.ModuleSpecification;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
@Component
public class AssetModuleSpecification implements ModuleSpecification {
private static final String MODULE_NAME = "asset";
private static final String MODULE_NAME_UPPER = MODULE_NAME.toUpperCase();
private String type;
private boolean enabled;
private boolean initData;
public boolean isEnabled() {
return enabled;
}
public String getSchemaTable() {
return "SCHEMA_VERSION_" + MODULE_NAME_UPPER;
}
public String getSchemaLocation() {
return "dbmigrate." + type + "." + MODULE_NAME;
}
public boolean isInitData() {
return initData;
}
public String getDataTable() {
return "SCHEMA_VERSION_DATA_" + MODULE_NAME_UPPER;
}
public String getDataLocation() {
return "dbmigrate." + type + ".data_" + MODULE_NAME;
}
@Value("${application.database.type}")
public void setType(String type) {
this.type = type;
}
@Value("${" + MODULE_NAME + ".dbmigrate.enabled}")
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
@Value("${" + MODULE_NAME + ".dbmigrate.initData}")
public void setInitData(boolean initData) {
this.initData = initData;
}
}
| topie/topie-oa | src/main/java/com/topie/asset/AssetModuleSpecification.java | Java | apache-2.0 | 1,430 |
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Net;
using System.Threading;
using System.Threading.Tasks;
using AutoMapper;
using RegTesting.Contracts;
using RegTesting.Contracts.Domain;
using RegTesting.Contracts.DTO;
using RegTesting.Tests.Core;
using Browser = RegTesting.Tests.Core.Browser;
namespace RegTesting.Node
{
class NodeLogic
{
/// <summary>
/// The types for a testfile
/// </summary>
public string[] Types { get; set; }
private readonly string _serverAdr;
private readonly string _nodename;
private TestcaseProvider _testcaseProvider;
private readonly List<string> _browsers;
private int _pollingIntervall;
public NodeLogic(string serverAdr, string nodeName, List<string> browsers)
{
_serverAdr = serverAdr;
_nodename = nodeName;
_browsers = browsers;
_pollingIntervall = NodeConfiguration.PollingIntervall;
}
public void Run()
{
Register();
do
{
EnsureBrowserClosed();
WorkItemDto workItemDto = WaitForWorkItem();
WorkItem workItem = Mapper.Map<WorkItem>(workItemDto);
Console.WriteLine(@"Loading " + workItem.Testsystem.Name);
UpdateTestcases(workItem.Testsystem);
Console.WriteLine(@"Received" + workItem.Testsystem.Name);
TestResult objTestResult = HandleTest(workItem);
SendTestResultToServer(objTestResult);
UnloadTestcases();
} while (true);
}
private void EnsureBrowserClosed()
{
CloseProcesses("iexplore", "firefox");
}
private void CloseProcesses(params string[] processNames)
{
foreach (Process[] processes in processNames.Select(Process.GetProcessesByName))
{
KillAll(processes);
}
}
private void KillAll(IEnumerable<Process> processes)
{
foreach (Process process in processes)
{
try
{
process.Kill();
process.WaitForExit(1000 * 15);
}
catch (Exception)
{
/*Could not close Process - But at least we tried*/
}
}
}
private void UnloadTestcases()
{
_testcaseProvider.Unload();
}
private void SendTestResultToServer(TestResult testResult)
{
Console.Out.WriteLine("Result: " + testResult.TestState);
using (WcfClient objWcfClient = new WcfClient(_serverAdr))
{
objWcfClient.FinishedWork(_nodename, testResult);
}
Console.Out.WriteLine("Finished.");
}
private void UpdateTestcases(Testsystem testsystem)
{
const string testfile = @"LocalTests.dll";
byte[] data;
using (WcfClient wcfClient = new WcfClient(_serverAdr))
{
data = wcfClient.FetchDll(_nodename, testsystem.Name);
}
using (FileStream fileStream = new FileStream(testfile, FileMode.Create, FileAccess.Write))
{
fileStream.Write(data, 0, data.Length);
}
_testcaseProvider = new TestcaseProvider(testfile);
_testcaseProvider.CreateAppDomain();
}
private ITestable LoadTestable(WorkItem workItem)
{
return _testcaseProvider.GetTestableFromTypeName(workItem.Testcase.Type);
}
private WorkItemDto WaitForWorkItem()
{
Console.Out.WriteLine("Wait for WorkItem");
do
{
WorkItemDto workItem = FetchWork();
if (workItem != null) return workItem;
Thread.Sleep(_pollingIntervall);
} while (true);
}
private void Register()
{
Console.Out.WriteLine("Register at server...");
using (WcfClient wcfClient = new WcfClient(_serverAdr))
{
wcfClient.Register(_nodename, _browsers);
}
}
private WorkItemDto FetchWork()
{
using (WcfClient wcfClient = new WcfClient(_serverAdr))
{
return wcfClient.GetWork(_nodename);
}
}
private TestResult HandleTest(WorkItem workItem)
{
TestResult testResult = new TestResult();
ITestable testable = null;
List<string> log = new List<string>();
try
{
log.Add("Test on " + _nodename);
/**1: Load Testclass **/
Console.WriteLine(@"Testing {0} {1} ({2}/{3})", workItem.Testcase.Name, workItem.Browser.Name, workItem.Testsystem.Name, workItem.Language.Languagecode);
testable = LoadTestable(workItem);
if (testable == null)
return new TestResult { TestState = TestState.NotAvailable };
/**2: Wait for branch get ready **/
WaitOnWebExceptions(workItem);
/**3: Prepare Test **/
Browser browser = new Browser()
{
Browserstring = workItem.Browser.Browserstring,
Versionsstring = workItem.Browser.Versionsstring
};
testable.SetupTest(WebDriverInitStrategy.SeleniumLocal, browser, workItem.Testsystem.Url,
workItem.Language.Languagecode);
/**4: Run Test **/
testable.Test();
testResult.TestState = TestState.Success;
}
catch (NotSupportedException notSupportedException)
{
Error error = CreateErrorFromException(notSupportedException);
testResult.TestState = TestState.NotSupported;
testResult.Error = error;
}
catch (TaskCanceledException taskCanceledException)
{
Error error = CreateErrorFromException(taskCanceledException);
testResult.TestState = TestState.Canceled;
testResult.Error = error;
}
catch (Exception exception)
{
ServerErrorModel serverException = null;
try
{
if (testable != null)
serverException = testable.CheckForServerError();
}
catch
{
//Error catching serverException
}
Error error = CreateErrorFromException(exception);
if (serverException != null)
{
error.Type = serverException.Type;
error.Message = serverException.Message;
error.InnerException = serverException.InnerException;
//objError.StackTrace = serverException.StackTrace; Keep error stacktrace.
}
testResult.TestState = TestState.Error;
testResult.Error = error;
if (testable != null)
testResult.Screenshot = testable.SaveScreenshot("");
}
finally
{
if (testable != null)
{
testable.TeardownTest();
log.AddRange(testable.GetLogLastTime());
}
testResult.Log = log;
}
return testResult;
}
private Error CreateErrorFromException(Exception exception)
{
Error error = new Error
{
Type = exception.GetType().ToString(),
Message = exception.Message,
StackTrace = exception.StackTrace ?? "",
InnerException = (exception.InnerException != null ? exception.InnerException.ToString() : null),
};
return error;
}
private void WaitOnWebExceptions(WorkItem workItem)
{
for (int intTryCount = 0; intTryCount < 10; intTryCount++)
{
WebClient webClient = new WebClient();
try
{
webClient.DownloadString("http://" + workItem.Testsystem.Url);
break;
}
catch
{
//Catched an exception. Waiting for retry...
Thread.Sleep(10000);
}
}
}
}
}
| AlexEndris/regtesting | RegTesting.Node/NodeLogic.cs | C# | apache-2.0 | 6,729 |
package com.netflix.governator.lifecycle;
import com.google.inject.Injector;
import com.netflix.governator.LifecycleInjectorBuilderProvider;
import com.netflix.governator.annotations.WarmUp;
import com.netflix.governator.guice.LifecycleInjector;
import com.netflix.governator.guice.LifecycleInjectorBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
/**
* There is a infinite recursion in InternalLifecycleModule.warmUpIsInDag(InternalLifecycleModule.java:150)
* and InternalLifecycleModule.warmUpIsInDag(InternalLifecycleModule.java:171) that will ultimately lead to
* an StackOverflowError.
*/
public class CircularDAG extends LifecycleInjectorBuilderProvider
{
@Singleton
public static class A
{
@Inject
private B b;
}
@Singleton
public static class B
{
@Inject
private A a;
}
@Singleton
public static class Service
{
private final Logger log = LoggerFactory.getLogger(getClass());
@Inject
private A a;
@WarmUp
public void connect()
{
log.info("connect");
}
@PreDestroy
public void disconnect()
{
log.info("disconnect");
}
}
@Test(dataProvider = "builders")
public void circle(LifecycleInjectorBuilder lifecycleInjectorBuilder) throws Exception
{
Injector injector = lifecycleInjectorBuilder.createInjector();
injector.getInstance(Service.class);
LifecycleManager manager = injector.getInstance(LifecycleManager.class);
manager.start();
}
}
| gorcz/governator | governator-core/src/test/java/com/netflix/governator/lifecycle/CircularDAG.java | Java | apache-2.0 | 1,747 |
/*
This file is part of TopPI - see https://github.com/slide-lig/TopPI/
Copyright 2016 Martin Kirchgessner, Vincent Leroy, Alexandre Termier, Sihem Amer-Yahia, Marie-Christine Rousset, Université Grenoble Alpes, LIG, CNRS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
or see the LICENSE.txt file joined with this program.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fr.liglab.mining.mapred;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Mapper;
import fr.liglab.mining.mapred.writables.ItemAndSupportWritable;
import fr.liglab.mining.mapred.writables.SupportAndTransactionWritable;
public class AggregationMapper extends Mapper<IntWritable, SupportAndTransactionWritable, ItemAndSupportWritable, SupportAndTransactionWritable> {
private final ItemAndSupportWritable keyW = new ItemAndSupportWritable();
@Override
protected void map(IntWritable key, SupportAndTransactionWritable value, Context context)
throws IOException, InterruptedException {
keyW.set(key.get(), value.getSupport());
context.write(this.keyW, value);
}
}
| slide-lig/TopPI | src/main/java/fr/liglab/mining/mapred/AggregationMapper.java | Java | apache-2.0 | 1,561 |
package dk.apaq.cordova.geolocationx;
import de.greenrobot.event.EventBus;
import java.util.List;
import java.util.Iterator;
import java.util.Date;
import org.json.JSONException;
import org.json.JSONObject;
import android.annotation.TargetApi;
import android.app.NotificationManager;
import android.app.Notification;
import android.app.PendingIntent;
import android.app.Service;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.location.Criteria;
import android.location.Location;
import android.location.LocationListener;
import android.location.LocationManager;
import android.os.Build;
import android.os.Bundle;
import android.os.IBinder;
import android.util.Log;
import android.widget.Toast;
import static java.lang.Math.*;
public class LocationUpdateService extends Service implements LocationListener {
private static final String TAG = "LocationUpdateService";
public static final String ACTION_START = "dk.apaq.cordova.geolocationx.START";
public static final String ACTION_STOP = "dk.apaq.cordova.geolocationx.STOP";
public static final String ACTION_CONFIGURE = "dk.apaq.cordova.geolocationx.CONFIGURE";
public static final String ACTION_SET_MINIMUM_DISTANCE = "dk.apaq.cordova.geolocationx.SET_MINIMUM_DISTANCE";
public static final String ACTION_SET_MINIMUM_INTERVAL = "dk.apaq.cordova.geolocationx.SET_MINIMUM_INTERVAL";
public static final String ACTION_SET_PRECISION = "dk.apaq.cordova.geolocationx.SET_PRECISION";
private static final int TWO_MINUTES = 1000 * 60 * 2;
private Location lastLocation;
private Boolean isDebugging = false;
private String notificationTitle = "";
private String notificationText = "";
private Long locationTimeout;
private String activityType;
private LocationManager locationManager;
private NotificationManager notificationManager;
@Override
public IBinder onBind(Intent intent) {
Log.i(TAG, "OnBind" + intent);
return null;
}
@Override
public void onCreate() {
super.onCreate();
Log.i(TAG, "OnCreate");
locationManager = (LocationManager)this.getSystemService(Context.LOCATION_SERVICE);
notificationManager = (NotificationManager)this.getSystemService(Context.NOTIFICATION_SERVICE);
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
Log.i(TAG, "Received start id " + startId + ": " + intent);
if (intent != null) {
Log.d(TAG, "Action: " + intent.getAction());
// debug intent values values
Bundle bundle = intent.getExtras();
if(bundle != null) {
for (String key : bundle.keySet()) {
Object value = bundle.get(key);
Log.d(TAG, String.format("%s %s (%s)", key,
value.toString(), value.getClass().getName()));
}
}
if(intent.getAction().equals(ACTION_START)) {
this.startRecording();
// Build a Notification required for running service in foreground.
Intent main = new Intent(this, BackgroundGpsPlugin.class);
main.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP | Intent.FLAG_ACTIVITY_SINGLE_TOP);
PendingIntent pendingIntent = PendingIntent.getActivity(this, 0, main, PendingIntent.FLAG_UPDATE_CURRENT);
Notification.Builder builder = new Notification.Builder(this);
builder.setContentTitle(notificationTitle);
builder.setContentText(notificationText);
builder.setSmallIcon(android.R.drawable.ic_menu_mylocation);
builder.setContentIntent(pendingIntent);
Notification notification;
notification = builder.build();
notification.flags |= Notification.FLAG_ONGOING_EVENT | Notification.FLAG_FOREGROUND_SERVICE | Notification.FLAG_NO_CLEAR;
startForeground(startId, notification);
}
if(intent.getAction().equals(ACTION_CONFIGURE)) {
locationTimeout = Long.parseLong(intent.getStringExtra("locationTimeout"));
isDebugging = Boolean.parseBoolean(intent.getStringExtra("isDebugging"));
notificationTitle = intent.getStringExtra("notificationTitle");
notificationText = intent.getStringExtra("notificationText");
activityType = intent.getStringExtra("activityType");
Log.i(TAG, "- notificationTitle: " + notificationTitle);
Log.i(TAG, "- notificationText: " + notificationText);
}
if(intent.getAction().equals(ACTION_SET_MINIMUM_DISTANCE)) {
// TODO
Log.i(TAG, "- minimumDistance: " + intent.getStringExtra("value"));
}
if(intent.getAction().equals(ACTION_SET_MINIMUM_INTERVAL)) {
// TODO
Log.i(TAG, "- minimumInterval: " + intent.getStringExtra("value"));
}
if(intent.getAction().equals(ACTION_SET_PRECISION)) {
// TODO
Log.i(TAG, "- precision: " + intent.getStringExtra("value"));
}
}
//We want this service to continue running until it is explicitly stopped
return START_REDELIVER_INTENT;
}
@Override
public void onDestroy() {
Log.w(TAG, "------------------------------------------ Destroyed Location update Service");
cleanUp();
super.onDestroy();
}
@TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH)
@Override
public void onTaskRemoved(Intent rootIntent) {
this.stopSelf();
super.onTaskRemoved(rootIntent);
}
@Override
public boolean stopService(Intent intent) {
Log.i(TAG, "- Received stop: " + intent);
cleanUp();
if (isDebugging) {
Toast.makeText(this, "Background location tracking stopped", Toast.LENGTH_SHORT).show();
}
return super.stopService(intent);
}
/**
* Start recording aggresively from all found providers
*/
private void startRecording() {
Log.i(TAG, "startRecording");
locationManager.removeUpdates(this);
// Turn on all providers aggressively
List<String> matchingProviders = locationManager.getAllProviders();
for (String provider: matchingProviders) {
if (provider != LocationManager.PASSIVE_PROVIDER) {
locationManager.requestLocationUpdates(provider, 0, 0, this);
}
}
}
private void cleanUp() {
locationManager.removeUpdates(this);
stopForeground(true);
}
/** Determines whether one Location reading is better than the current Location fix
* @param location The new Location that you want to evaluate
* @param currentBestLocation The current Location fix, to which you want to compare the new one
*/
protected boolean isBetterLocation(Location location, Location currentBestLocation) {
if (currentBestLocation == null) {
// A new location is always better than no location
return true;
}
// Check whether the new location fix is newer or older
long timeDelta = location.getTime() - currentBestLocation.getTime();
boolean isSignificantlyNewer = timeDelta > TWO_MINUTES;
boolean isSignificantlyOlder = timeDelta < -TWO_MINUTES;
boolean isNewer = timeDelta > 0;
// If it's been more than two minutes since the current location, use the new location
// because the user has likely moved
if (isSignificantlyNewer) {
return true;
// If the new location is more than two minutes older, it must be worse
} else if (isSignificantlyOlder) {
return false;
}
// Check whether the new location fix is more or less accurate
int accuracyDelta = (int) (location.getAccuracy() - currentBestLocation.getAccuracy());
boolean isLessAccurate = accuracyDelta > 0;
boolean isMoreAccurate = accuracyDelta < 0;
boolean isSignificantlyLessAccurate = accuracyDelta > 200;
// Check if the old and new location are from the same provider
boolean isFromSameProvider = isSameProvider(location.getProvider(),
currentBestLocation.getProvider());
// Determine location quality using a combination of timeliness and accuracy
if (isMoreAccurate) {
return true;
} else if (isNewer && !isLessAccurate) {
return true;
} else if (isNewer && !isSignificantlyLessAccurate && isFromSameProvider) {
return true;
}
return false;
}
/** Checks whether two providers are the same */
private boolean isSameProvider(String provider1, String provider2) {
if (provider1 == null) {
return provider2 == null;
}
return provider1.equals(provider2);
}
// ------------------ LOCATION LISTENER INTERFACE -------------------------
public void onLocationChanged(Location location) {
Log.d(TAG, "- onLocationChanged: " + location.getLatitude() + "," + location.getLongitude() + ", accuracy: " + location.getAccuracy() + ", speed: " + location.getSpeed());
if(isDebugging){
Toast.makeText(this, "acy:"+location.getAccuracy()+",v:"+location.getSpeed(), Toast.LENGTH_LONG).show();
}
if(isBetterLocation(location, lastLocation)){
Log.d(TAG, "Location is better");
lastLocation = location;
// send it via bus to activity
try{
JSONObject pos = new JSONObject();
JSONObject loc = new JSONObject();
loc.put("latitude", location.getLatitude());
loc.put("longitude", location.getLongitude());
loc.put("accuracy", location.getAccuracy());
loc.put("speed", location.getSpeed());
loc.put("bearing", location.getBearing());
loc.put("altitude", location.getAltitude());
pos.put("coords", loc);
pos.put("timestamp", new Date().getTime());
EventBus.getDefault().post(pos);
Log.d(TAG, "posting location to bus");
}catch(JSONException e){
Log.e(TAG, "could not parse location");
}
}else{
Log.d(TAG, "Location is no better than current");
}
}
public void onProviderDisabled(String provider) {
// TODO Auto-generated method stub
Log.d(TAG, "- onProviderDisabled: " + provider);
}
public void onProviderEnabled(String provider) {
// TODO Auto-generated method stub
Log.d(TAG, "- onProviderEnabled: " + provider);
}
public void onStatusChanged(String provider, int status, Bundle extras) {
// TODO Auto-generated method stub
Log.d(TAG, "- onStatusChanged: " + provider + ", status: " + status);
}
// -------------------------- LOCATION LISTENER INTERFACE END -------------------------
}
| michaelkrog/cordova-plugin-geolocation-x | src/android/dk/apaq/cordova/geolocationx/LocationUpdateService.java | Java | apache-2.0 | 11,318 |
package vandy.mooc.provider;
import android.content.ContentUris;
import android.net.Uri;
import android.provider.BaseColumns;
/**
* Defines table and column names for the Acronym database.
*/
public final class VideoContract {
/**
* The "Content authority" is a name for the entire content provider,
* similar to the relationship between a domain name and its website. A
* convenient string to use for the content authority is the package name
* for the app, which must be unique on the device.
*/
public static final String CONTENT_AUTHORITY = "vandy.mooc.video";
/**
* Use CONTENT_AUTHORITY to create the base of all URI's that apps will use
* to contact the content provider.
*/
public static final Uri BASE_CONTENT_URI = Uri.parse("content://"
+ CONTENT_AUTHORITY);
/**
* Possible paths (appended to base content URI for possible URI's), e.g.,
* content://vandy.mooc/acronym/ is a valid path for Acronym data. However,
* content://vandy.mooc/givemeroot/ will fail since the ContentProvider
* hasn't been given any information on what to do with "givemeroot".
*/
public static final String PATH_VIDEO = VideoEntry.TABLE_NAME;
/**
* Inner class that defines the contents of the Acronym table.
*/
public static final class VideoEntry implements BaseColumns {
/**
* Use BASE_CONTENT_URI to create the unique URI for Acronym Table that
* apps will use to contact the content provider.
*/
public static final Uri CONTENT_URI = BASE_CONTENT_URI.buildUpon()
.appendPath(PATH_VIDEO).build();
/**
* When the Cursor returned for a given URI by the ContentProvider
* contains 0..x items.
*/
public static final String CONTENT_ITEMS_TYPE = "vnd.android.cursor.dir/"
+ CONTENT_AUTHORITY + "/" + PATH_VIDEO;
/**
* When the Cursor returned for a given URI by the ContentProvider
* contains 1 item.
*/
public static final String CONTENT_ITEM_TYPE = "vnd.android.cursor.item/"
+ CONTENT_AUTHORITY + "/" + PATH_VIDEO;
/**
* Name of the database table.
*/
public static final String TABLE_NAME = "video_table";
/**
* Columns to store Data of each Acronym Expansion.
*/
public static final String COLUMN_TITLE = "title";
public static final String COLUMN_DURATION = "duration";
public static final String COLUMN_CONTENT_TYPE = "content_type";
public static final String COLUMN_DATA_URL = "data_url";
public static final String COLUMN_STAR_RATING = "star_rating";
/**
* Return a Uri that points to the row containing a given id.
*
* @param id
* @return Uri
*/
public static Uri buildVideoUri(Long id) {
return ContentUris.withAppendedId(CONTENT_URI, id);
}
}
}
| dexter-at-git/coursera-android-spring | assignments/assignment3/client/src/vandy/mooc/provider/VideoContract.java | Java | apache-2.0 | 2,701 |
package org.jasig.cas.authentication.principal;
import com.google.common.collect.ImmutableMap;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.jasig.cas.logout.SingleLogoutService;
import org.jasig.cas.validation.ValidationResponseType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URLDecoder;
import java.util.Map;
/**
* Abstract implementation of a WebApplicationService.
*
* @author Scott Battaglia
* @since 3.1
*/
public abstract class AbstractWebApplicationService implements SingleLogoutService {
private static final long serialVersionUID = 610105280927740076L;
private static final Map<String, Object> EMPTY_MAP = ImmutableMap.of();
/** Logger instance. **/
protected final Logger logger = LoggerFactory.getLogger(this.getClass());
/** The id of the service. */
private final String id;
/** The original url provided, used to reconstruct the redirect url. */
private final String originalUrl;
private final String artifactId;
private Principal principal;
private boolean loggedOutAlready;
private final ResponseBuilder<WebApplicationService> responseBuilder;
private ValidationResponseType format = ValidationResponseType.XML;
/**
* Instantiates a new abstract web application service.
*
* @param id the id
* @param originalUrl the original url
* @param artifactId the artifact id
* @param responseBuilder the response builder
*/
protected AbstractWebApplicationService(final String id, final String originalUrl,
final String artifactId, final ResponseBuilder<WebApplicationService> responseBuilder) {
this.id = id;
this.originalUrl = originalUrl;
this.artifactId = artifactId;
this.responseBuilder = responseBuilder;
}
@Override
public final String toString() {
return this.id;
}
@Override
public final String getId() {
return this.id;
}
@Override
public final String getArtifactId() {
return this.artifactId;
}
@Override
public final Map<String, Object> getAttributes() {
return EMPTY_MAP;
}
/**
* Return the original url provided (as {@code service} or {@code targetService} request parameter).
* Used to reconstruct the redirect url.
*
* @return the original url provided.
*/
@Override
public final String getOriginalUrl() {
return this.originalUrl;
}
@Override
public boolean equals(final Object object) {
if (object == null) {
return false;
}
if (object instanceof Service) {
final Service service = (Service) object;
return getId().equals(service.getId());
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(this.id)
.toHashCode();
}
public Principal getPrincipal() {
return this.principal;
}
@Override
public void setPrincipal(final Principal principal) {
this.principal = principal;
}
@Override
public boolean matches(final Service service) {
try {
final String thisUrl = URLDecoder.decode(this.id, "UTF-8");
final String serviceUrl = URLDecoder.decode(service.getId(), "UTF-8");
logger.trace("Decoded urls and comparing [{}] with [{}]", thisUrl, serviceUrl);
return thisUrl.equalsIgnoreCase(serviceUrl);
} catch (final Exception e) {
logger.error(e.getMessage(), e);
}
return false;
}
/**
* Return if the service is already logged out.
*
* @return if the service is already logged out.
*/
@Override
public boolean isLoggedOutAlready() {
return loggedOutAlready;
}
/**
* Set if the service is already logged out.
*
* @param loggedOutAlready if the service is already logged out.
*/
@Override
public final void setLoggedOutAlready(final boolean loggedOutAlready) {
this.loggedOutAlready = loggedOutAlready;
}
protected ResponseBuilder<? extends WebApplicationService> getResponseBuilder() {
return responseBuilder;
}
@Override
public ValidationResponseType getFormat() {
return format;
}
public void setFormat(final ValidationResponseType format) {
this.format = format;
}
@Override
public Response getResponse(final String ticketId) {
return this.responseBuilder.build(this, ticketId);
}
}
| PetrGasparik/cas | cas-server-core-services/src/main/java/org/jasig/cas/authentication/principal/AbstractWebApplicationService.java | Java | apache-2.0 | 4,645 |
/*
* Copyright 2008-2011 Wolfgang Keller
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GuiOpenGL/GuiComponentsBasic.h"
#ifdef _WIN32
#include <windows.h>
#endif
#include <GL/gl.h>
#include <cassert>
void createStraightBorder(Vertex2<float> prevVertex,
Vertex2<float> currVertex,
Vertex2<float> nextVertex,
std::vector<Vertex2<float> >* pBorderTriangleStrip,
float borderWidth,
size_t)
{
Vector2<float> prevToCurrVect = currVertex - prevVertex;
Vector2<float> currToNextVect = nextVertex - currVertex;
normalize(&prevToCurrVect);
normalize(&currToNextVect);
Vector2<float> direction = prevToCurrVect+currToNextVect;
normalize(&direction);
Vector2<float> rightFromDirection = normal(direction);
float scaleFactor =
-borderWidth/(currToNextVect.x*rightFromDirection.x+
currToNextVect.y*rightFromDirection.y);
pBorderTriangleStrip->push_back(currVertex);
pBorderTriangleStrip->push_back(currVertex+rightFromDirection*scaleFactor);
}
void createRoundBorder(Vertex2<float> prevVertex,
Vertex2<float> currVertex,
Vertex2<float> nextVertex,
std::vector<Vertex2<float> >* pBorderTriangleStrip,
float borderWidth,
size_t curveSegmentsCount)
{
assert(curveSegmentsCount>=1);
Vector2<float> prevToCurrVect = currVertex - prevVertex;
Vector2<float> currToNextVect = nextVertex - currVertex;
normalize(&prevToCurrVect);
normalize(&currToNextVect);
Vector2<float> prevToCurrNormal = normal(prevToCurrVect);
Vector2<float> currToNextNormal = normal(currToNextVect);
/*
* The orthogonal matrix that rotates (1, 0) to prevToCurrNormal is
*
* | prevToCurrNormal.x prevToCurrVect.x |
* | prevToCurrNormal.y prevToCurrVect.y |
*
* Since this is an orthogonal matrix the inverse one is this one transposed
*/
Matrix22<float> orth = Matrix22<float>(prevToCurrNormal.x, prevToCurrNormal.y,
prevToCurrVect.x, prevToCurrVect.y).transpose();
Vector2<float> angleVector = orth * currToNextNormal;
// The order has to be y, x -- see declaration of atan2f
float angle = atan2f(angleVector.y, angleVector.x);
for (size_t i=0; i<=curveSegmentsCount; i++)
{
float currentAngle = i*angle/curveSegmentsCount;
Matrix22<float> currentRotation = Matrix22<float>(
cosf(currentAngle), sinf(currentAngle),
-sinf(currentAngle), cosf(currentAngle));
Vector2<float> movement = currentRotation*prevToCurrNormal*borderWidth;
pBorderTriangleStrip->push_back(currVertex);
pBorderTriangleStrip->push_back(currVertex+movement);
}
}
/*!
* vertices output:
* [0]: bottom left
* [1]: bottom right
* [2]: top left
* [3]: top right
*/
void createBoxVertices(std::vector<Vertex2<float> >* boxVertices,
float left, float top, float width, float height,
float currentHeight)
{
/*
* 2-3
* |\|
* 0-1
*/
boxVertices->push_back(Vertex2<float>(left, currentHeight-top-height)); // bottom left
boxVertices->push_back(Vertex2<float>(left+width, currentHeight-top-height)); // bottom right
boxVertices->push_back(Vertex2<float>(left, currentHeight-top)); // top left
boxVertices->push_back(Vertex2<float>(left+width, currentHeight-top)); // top right
}
void drawVertexArray(const std::vector<Vertex2<float> >* vertices, Color4<float> colors[4])
{
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, &vertices->at(0));
glColorPointer(4, GL_FLOAT, 0, colors);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
}
void drawVertexArray(const std::vector<Vertex2<float> >* vertices, Color4<float> color)
{
glEnableClientState(GL_VERTEX_ARRAY);
glColor4fv(&color.r);
glVertexPointer(2, GL_FLOAT, 0, &vertices->at(0));
glDrawArrays(GL_TRIANGLE_STRIP, 0, (GLsizei) vertices->size());
glDisableClientState(GL_VERTEX_ARRAY);
}
| dreamsxin/101_browser | src/GuiOpenGL/GuiComponentsBasic.cpp | C++ | apache-2.0 | 4,410 |
/**
* Copyright (c) 2013-2021 Nikita Koksharov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.redisson;
import org.redisson.api.RFuture;
import org.redisson.api.RPatternTopic;
import org.redisson.api.listener.PatternMessageListener;
import org.redisson.api.listener.PatternStatusListener;
import org.redisson.client.ChannelName;
import org.redisson.client.RedisPubSubListener;
import org.redisson.client.RedisTimeoutException;
import org.redisson.client.codec.Codec;
import org.redisson.client.protocol.pubsub.PubSubType;
import org.redisson.command.CommandAsyncExecutor;
import org.redisson.config.MasterSlaveServersConfig;
import org.redisson.misc.CompletableFutureWrapper;
import org.redisson.pubsub.AsyncSemaphore;
import org.redisson.pubsub.PubSubConnectionEntry;
import org.redisson.pubsub.PublishSubscribeService;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
/**
* Distributed topic implementation. Messages are delivered to all message listeners across Redis cluster.
*
* @author Nikita Koksharov
*
*/
public class RedissonPatternTopic implements RPatternTopic {
final PublishSubscribeService subscribeService;
final CommandAsyncExecutor commandExecutor;
private final String name;
private final ChannelName channelName;
private final Codec codec;
protected RedissonPatternTopic(CommandAsyncExecutor commandExecutor, String name) {
this(commandExecutor.getConnectionManager().getCodec(), commandExecutor, name);
}
protected RedissonPatternTopic(Codec codec, CommandAsyncExecutor commandExecutor, String name) {
this.commandExecutor = commandExecutor;
this.name = name;
this.channelName = new ChannelName(name);
this.codec = codec;
this.subscribeService = commandExecutor.getConnectionManager().getSubscribeService();
}
@Override
public int addListener(PatternStatusListener listener) {
return addListener(new PubSubPatternStatusListener(listener, name));
};
@Override
public <T> int addListener(Class<T> type, PatternMessageListener<T> listener) {
PubSubPatternMessageListener<T> pubSubListener = new PubSubPatternMessageListener<T>(type, listener, name);
return addListener(pubSubListener);
}
private int addListener(RedisPubSubListener<?> pubSubListener) {
CompletableFuture<Collection<PubSubConnectionEntry>> future = subscribeService.psubscribe(channelName, codec, pubSubListener);
commandExecutor.get(future);
return System.identityHashCode(pubSubListener);
}
@Override
public RFuture<Integer> addListenerAsync(PatternStatusListener listener) {
PubSubPatternStatusListener pubSubListener = new PubSubPatternStatusListener(listener, name);
return addListenerAsync(pubSubListener);
}
@Override
public <T> RFuture<Integer> addListenerAsync(Class<T> type, PatternMessageListener<T> listener) {
PubSubPatternMessageListener<T> pubSubListener = new PubSubPatternMessageListener<T>(type, listener, name);
return addListenerAsync(pubSubListener);
}
private RFuture<Integer> addListenerAsync(RedisPubSubListener<?> pubSubListener) {
CompletableFuture<Collection<PubSubConnectionEntry>> future = subscribeService.psubscribe(channelName, codec, pubSubListener);
CompletableFuture<Integer> f = future.thenApply(res -> {
return System.identityHashCode(pubSubListener);
});
return new CompletableFutureWrapper<>(f);
}
protected void acquire(AsyncSemaphore semaphore) {
MasterSlaveServersConfig config = commandExecutor.getConnectionManager().getConfig();
int timeout = config.getTimeout() + config.getRetryInterval() * config.getRetryAttempts();
if (!semaphore.tryAcquire(timeout)) {
throw new RedisTimeoutException("Remove listeners operation timeout: (" + timeout + "ms) for " + name + " topic");
}
}
@Override
public RFuture<Void> removeListenerAsync(int listenerId) {
CompletableFuture<Void> f = subscribeService.removeListenerAsync(PubSubType.PUNSUBSCRIBE, channelName, listenerId);
return new CompletableFutureWrapper<>(f);
}
@Override
public void removeListener(int listenerId) {
commandExecutor.get(removeListenerAsync(listenerId).toCompletableFuture());
}
@Override
public void removeAllListeners() {
AsyncSemaphore semaphore = subscribeService.getSemaphore(channelName);
acquire(semaphore);
PubSubConnectionEntry entry = subscribeService.getPubSubEntry(channelName);
if (entry == null) {
semaphore.release();
return;
}
if (entry.hasListeners(channelName)) {
subscribeService.unsubscribe(PubSubType.PUNSUBSCRIBE, channelName).toCompletableFuture().join();
}
semaphore.release();
}
@Override
public void removeListener(PatternMessageListener<?> listener) {
CompletableFuture<Void> future = subscribeService.removeListenerAsync(PubSubType.PUNSUBSCRIBE, channelName, listener);
commandExecutor.get(future);
}
@Override
public List<String> getPatternNames() {
return Collections.singletonList(name);
}
}
| redisson/redisson | redisson/src/main/java/org/redisson/RedissonPatternTopic.java | Java | apache-2.0 | 5,917 |
#Please read "usefull links" before going on, they are necessary for better understanding
import StringIO
import json #Imports the json library that decodes json tokens recieved from telegram api
import logging #Imports the library that puts messages in the log info of the google app engine
import random #Library that creates random numbers
import urllib
import urllib2
# for sending images
from PIL import Image
import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
TOKEN = 'YOUR_BOT_TOKEN_HERE'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model): #NDB entity called EnabledStatus
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False) #Entity has atribute enabled
# ================================
def setEnabled(chat_id, yes):
es = ndb.Key(EnableStatus, str(chat_id)).get() #Gets the entity
if es: #If it exists
es.enabled = yes #Sets its enabled atribute
es.put()
return
es = EnableStatus(id = str(chat_id)) #If not creates a new entity
es.put()
def getEnabled(chat_id):
es = ndb.Key(EnableStatus, str(chat_id)).get()
if es:
return es.enabled #Return the atual state
es = EnableStatus(id = str(chat_id))
es.put()
return False
# ================================ This part makes the comunication google-telegram
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
#From here you can take message information, now it only uses the chat_id and text,
#you can take more things from it, search how to use json on google
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text') #Takes the 'text' string
fr = message.get('from')
chat = message['chat']
chat_id = chat['id'] #Chat id string
if not text:
logging.info('no text')
return
def reply(msg=None, img=None): #Function used to send messages, it recieves a string message or a binary image
if msg:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'disable_web_page_preview': 'true',
'reply_to_message_id': str(message_id),
})).read()
elif img:
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('reply_to_message_id', str(message_id)),
], [
('photo', 'image.jpg', img),
])
else:
logging.error('no msg or img specified') #If there is no image it puts in the google log the string
resp = None
logging.info('send response:')
logging.info(resp)
#From here you can make custom commands, just add an 'elif'
if text.startswith('/'):
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True) #Sets the status to True (read above comments)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False) #Changes it to false
elif text == '/image': #Creates an aleatory image
img = Image.new('RGB', (512, 512)) #Size of the image
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)] # generate sample image
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
"""If you want to send a different image use this piece of code:
img = Image.open("image.jpg")
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())"""
else:
reply('What command?')
#If it is not a command (does not start with /)
elif 'who are you' in text:
reply('telebot starter kit, created by yukuku: https://github.com/yukuku/telebot')
elif 'what time' in text:
reply('look at the top-right corner of your screen!')
else:
if getEnabled(chat_id): #If the status of the bot is enabled the bot answers you
try:
resp1 = json.load(urllib2.urlopen('http://www.simsimi.com/requestChat?lc=en&ft=1.0&req=' + urllib.quote_plus(text.encode('utf-8')))) #Sends you mesage to simsimi IA
back = resp1.get('res')
except urllib2.HTTPError, err:
logging.error(err)
back = str(err)
if not back:
reply('okay...')
elif 'I HAVE NO RESPONSE' in back:
reply('you said something with no meaning')
else:
reply(back)
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
#Telegram comunication (dont change)
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
| 0Cristofer/telebot | main.py | Python | apache-2.0 | 6,516 |
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.test.internal.runner.junit3;
import java.util.Enumeration;
import junit.framework.Test;
import junit.framework.TestResult;
import junit.framework.TestSuite;
import org.junit.Ignore;
/** A {@link TestSuite} that delegates all calls to another {@link TestSuite}. */
@Ignore
class DelegatingTestSuite extends TestSuite {
private TestSuite wrappedSuite;
public DelegatingTestSuite(TestSuite suiteDelegate) {
super();
wrappedSuite = suiteDelegate;
}
/** Return the suite to delegate to */
public TestSuite getDelegateSuite() {
return wrappedSuite;
}
/**
* Replace the suite to delegate to
*
* @param newSuiteDelegate
*/
public void setDelegateSuite(TestSuite newSuiteDelegate) {
wrappedSuite = newSuiteDelegate;
}
@Override
public void addTest(Test test) {
wrappedSuite.addTest(test);
}
@Override
public int countTestCases() {
return wrappedSuite.countTestCases();
}
@Override
public String getName() {
return wrappedSuite.getName();
}
@Override
public void runTest(Test test, TestResult result) {
wrappedSuite.runTest(test, result);
}
@Override
public void setName(String name) {
wrappedSuite.setName(name);
}
@Override
public Test testAt(int index) {
return wrappedSuite.testAt(index);
}
@Override
public int testCount() {
return wrappedSuite.testCount();
}
@Override
public Enumeration<Test> tests() {
return wrappedSuite.tests();
}
@Override
public String toString() {
return wrappedSuite.toString();
}
@Override
public void run(TestResult result) {
wrappedSuite.run(result);
}
}
| android/android-test | runner/android_junit_runner/java/androidx/test/internal/runner/junit3/DelegatingTestSuite.java | Java | apache-2.0 | 2,279 |
from flask import Blueprint, render_template, Response, current_app, send_from_directory
from pyox import ServiceError
from pyox.apps.monitor.api import get_cluster_client
from datetime import datetime
cluster_ui = Blueprint('cluster_ui',__name__,template_folder='templates')
@cluster_ui.route('/')
def index():
client = get_cluster_client()
try:
info = client.info();
scheduler = client.scheduler();
metrics = client.metrics();
info['startedOn'] = datetime.fromtimestamp(info['startedOn'] / 1e3).isoformat()
return render_template('cluster.html',info=info,scheduler=scheduler,metrics=metrics)
except ServiceError as err:
return Response(status=err.status_code,response=err.message if err.status_code!=401 else 'Authentication Required',mimetype="text/plain",headers={'WWW-Authenticate': 'Basic realm="Login Required"'})
assets = Blueprint('assets_ui',__name__)
@assets.route('/assets/<path:path>')
def send_asset(path):
dir = current_app.config.get('ASSETS')
if dir is None:
dir = __file__[:__file__.rfind('/')] + '/assets/'
return send_from_directory(dir, path)
| alexmilowski/python-hadoop-rest-api | pyox/apps/monitor/views.py | Python | apache-2.0 | 1,129 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import base64
import codecs
import cherrypy
import io
import json
import logging
import os
import shutil
import signal
import six
import sys
import unittest
import uuid
from six import BytesIO
from six.moves import urllib
from girder.utility import model_importer
from girder.utility.server import setup as setupServer
from girder.constants import AccessType, ROOT_DIR, SettingKey
from girder.models import getDbConnection
from . import mock_smtp
from . import mock_s3
from . import mongo_replicaset
local = cherrypy.lib.httputil.Host('127.0.0.1', 30000)
remote = cherrypy.lib.httputil.Host('127.0.0.1', 30001)
mockSmtp = mock_smtp.MockSmtpReceiver()
mockS3Server = None
enabledPlugins = []
def startServer(mock=True, mockS3=False):
"""
Test cases that communicate with the server should call this
function in their setUpModule() function.
"""
server = setupServer(test=True, plugins=enabledPlugins)
if mock:
cherrypy.server.unsubscribe()
cherrypy.engine.start()
# Make server quiet (won't announce start/stop or requests)
cherrypy.config.update({'environment': 'embedded'})
# Log all requests if we asked to do so
if 'cherrypy' in os.environ.get('EXTRADEBUG', '').split():
cherrypy.config.update({'log.screen': True})
logHandler = logging.StreamHandler(sys.stdout)
logHandler.setLevel(logging.DEBUG)
cherrypy.log.error_log.addHandler(logHandler)
mockSmtp.start()
if mockS3:
global mockS3Server
mockS3Server = mock_s3.startMockS3Server()
return server
def stopServer():
"""
Test cases that communicate with the server should call this
function in their tearDownModule() function.
"""
cherrypy.engine.exit()
mockSmtp.stop()
def dropTestDatabase(dropModels=True):
"""
Call this to clear all contents from the test database. Also forces models
to reload.
"""
db_connection = getDbConnection()
dbName = cherrypy.config['database']['uri'].split('/')[-1]
if 'girder_test_' not in dbName:
raise Exception('Expected a testing database name, but got %s' % dbName)
db_connection.drop_database(dbName)
if dropModels:
model_importer.reinitializeAll()
def dropGridFSDatabase(dbName):
"""
Clear all contents from a gridFS database used as an assetstore.
:param dbName: the name of the database to drop.
"""
db_connection = getDbConnection()
db_connection.drop_database(dbName)
def dropFsAssetstore(path):
"""
Delete all of the files in a filesystem assetstore. This unlinks the path,
which is potentially dangerous.
:param path: the path to remove.
"""
if os.path.isdir(path):
shutil.rmtree(path)
class TestCase(unittest.TestCase, model_importer.ModelImporter):
"""
Test case base class for the application. Adds helpful utilities for
database and HTTP communication.
"""
def setUp(self, assetstoreType=None, dropModels=True):
"""
We want to start with a clean database each time, so we drop the test
database before each test. We then add an assetstore so the file model
can be used without 500 errors.
:param assetstoreType: if 'gridfs' or 's3', use that assetstore. For
any other value, use a filesystem assetstore.
"""
self.assetstoreType = assetstoreType
dropTestDatabase(dropModels=dropModels)
assetstoreName = os.environ.get('GIRDER_TEST_ASSETSTORE', 'test')
assetstorePath = os.path.join(
ROOT_DIR, 'tests', 'assetstore', assetstoreName)
if assetstoreType == 'gridfs':
# Name this as '_auto' to prevent conflict with assetstores created
# within test methods
gridfsDbName = 'girder_test_%s_assetstore_auto' % assetstoreName
dropGridFSDatabase(gridfsDbName)
self.assetstore = self.model('assetstore'). \
createGridFsAssetstore(name='Test', db=gridfsDbName)
elif assetstoreType == 'gridfsrs':
gridfsDbName = 'girder_test_%s_rs_assetstore_auto' % assetstoreName
mongo_replicaset.startMongoReplicaSet()
self.assetstore = self.model('assetstore'). \
createGridFsAssetstore(
name='Test', db=gridfsDbName,
mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,'
'127.0.0.1:27072', replicaset='replicaset')
elif assetstoreType == 's3':
self.assetstore = self.model('assetstore'). \
createS3Assetstore(name='Test', bucket='bucketname',
accessKeyId='test', secret='test',
service=mockS3Server.service)
else:
dropFsAssetstore(assetstorePath)
self.assetstore = self.model('assetstore'). \
createFilesystemAssetstore(name='Test', root=assetstorePath)
addr = ':'.join(map(str, mockSmtp.address))
self.model('setting').set(SettingKey.SMTP_HOST, addr)
self.model('setting').set(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE, 0)
self.model('setting').set(SettingKey.PLUGINS_ENABLED, enabledPlugins)
def tearDown(self):
"""
Stop any services that we started just for this test.
"""
# If "self.setUp" is overridden, "self.assetstoreType" may not be set
if getattr(self, 'assetstoreType', None) == 'gridfsrs':
mongo_replicaset.stopMongoReplicaSet()
def assertStatusOk(self, response):
"""
Call this to assert that the response yielded a 200 OK output_status.
:param response: The response object.
"""
self.assertStatus(response, 200)
def assertStatus(self, response, code):
"""
Call this to assert that a given HTTP status code was returned.
:param response: The response object.
:param code: The status code.
:type code: int or str
"""
code = str(code)
if not response.output_status.startswith(code.encode()):
msg = 'Response status was %s, not %s.' % (response.output_status,
code)
if hasattr(response, 'json'):
msg += ' Response body was:\n%s' % json.dumps(
response.json, sort_keys=True, indent=4,
separators=(',', ': '))
self.fail(msg)
def assertHasKeys(self, obj, keys):
"""
Assert that the given object has the given list of keys.
:param obj: The dictionary object.
:param keys: The keys it must contain.
:type keys: list or tuple
"""
for k in keys:
self.assertTrue(k in obj, 'Object does not contain key "%s"' % k)
def assertRedirect(self, resp, url=None):
"""
Assert that we were given an HTTP redirect response, and optionally
assert that you were redirected to a specific URL.
:param resp: The response object.
:param url: If you know the URL you expect to be redirected to, you
should pass it here.
:type url: str
"""
self.assertStatus(resp, 303)
self.assertTrue('Location' in resp.headers)
if url:
self.assertEqual(url, resp.headers['Location'])
def assertNotHasKeys(self, obj, keys):
"""
Assert that the given object does not have any of the given list of
keys.
:param obj: The dictionary object.
:param keys: The keys it must not contain.
:type keys: list or tuple
"""
for k in keys:
self.assertFalse(k in obj, 'Object contains key "%s"' % k)
def assertValidationError(self, response, field=None):
"""
Assert that a ValidationException was thrown with the given field.
:param response: The response object.
:param field: The field that threw the validation exception.
:type field: str
"""
self.assertStatus(response, 400)
self.assertEqual(response.json['type'], 'validation')
self.assertEqual(response.json.get('field', None), field)
def assertAccessDenied(self, response, level, modelName, user=None):
if level == AccessType.READ:
ls = 'Read'
elif level == AccessType.WRITE:
ls = 'Write'
else:
ls = 'Admin'
if user is None:
self.assertStatus(response, 401)
else:
self.assertStatus(response, 403)
self.assertEqual('%s access denied for %s.' % (ls, modelName),
response.json['message'])
def assertMissingParameter(self, response, param):
"""
Assert that the response was a "parameter missing" error response.
:param response: The response object.
:param param: The name of the missing parameter.
:type param: str
"""
self.assertEqual("Parameter '%s' is required." % param,
response.json.get('message', ''))
self.assertStatus(response, 400)
def getSseMessages(self, resp):
messages = self.getBody(resp).strip().split('\n\n')
if not messages or messages == ['']:
return ()
return [json.loads(m.replace('data: ', '')) for m in messages]
def uploadFile(self, name, contents, user, parent, parentType='folder',
mimeType=None):
"""
Upload a file. This is meant for small testing files, not very large
files that should be sent in multiple chunks.
:param name: The name of the file.
:type name: str
:param contents: The file contents
:type contents: str
:param user: The user performing the upload.
:type user: dict
:param parent: The parent document.
:type parent: dict
:param parentType: The type of the parent ("folder" or "item")
:type parentType: str
:param mimeType: Explicit MIME type to set on the file.
:type mimeType: str
:returns: The file that was created.
:rtype: dict
"""
mimeType = mimeType or 'application/octet-stream'
resp = self.request(
path='/file', method='POST', user=user, params={
'parentType': parentType,
'parentId': str(parent['_id']),
'name': name,
'size': len(contents),
'mimeType': mimeType
})
self.assertStatusOk(resp)
fields = [('offset', 0), ('uploadId', resp.json['_id'])]
files = [('chunk', name, contents)]
resp = self.multipartRequest(
path='/file/chunk', user=user, fields=fields, files=files)
self.assertStatusOk(resp)
file = resp.json
self.assertHasKeys(file, ['itemId'])
self.assertEqual(file['name'], name)
self.assertEqual(file['size'], len(contents))
self.assertEqual(file['mimeType'], mimeType)
return self.model('file').load(file['_id'], force=True)
def ensureRequiredParams(self, path='/', method='GET', required=(),
user=None):
"""
Ensure that a set of parameters is required by the endpoint.
:param path: The endpoint path to test.
:param method: The HTTP method of the endpoint.
:param required: The required parameter set.
:type required: sequence of str
"""
for exclude in required:
params = dict.fromkeys([p for p in required if p != exclude], '')
resp = self.request(path=path, method=method, params=params,
user=user)
self.assertMissingParameter(resp, exclude)
def _genToken(self, user):
"""
Helper method for creating an authentication token for the user.
"""
token = self.model('token').createToken(user)
return str(token['_id'])
def _buildHeaders(self, headers, cookie, user, token, basicAuth,
authHeader):
if cookie is not None:
headers.append(('Cookie', cookie))
if user is not None:
headers.append(('Girder-Token', self._genToken(user)))
elif token is not None:
if isinstance(token, dict):
headers.append(('Girder-Token', token['_id']))
else:
headers.append(('Girder-Token', token))
if basicAuth is not None:
auth = base64.b64encode(basicAuth.encode('utf8'))
headers.append((authHeader, 'Basic %s' % auth.decode()))
def request(self, path='/', method='GET', params=None, user=None,
prefix='/api/v1', isJson=True, basicAuth=None, body=None,
type=None, exception=False, cookie=None, token=None,
additionalHeaders=None, useHttps=False,
authHeader='Girder-Authorization'):
"""
Make an HTTP request.
:param path: The path part of the URI.
:type path: str
:param method: The HTTP method.
:type method: str
:param params: The HTTP parameters.
:type params: dict
:param prefix: The prefix to use before the path.
:param isJson: Whether the response is a JSON object.
:param basicAuth: A string to pass with the Authorization: Basic header
of the form 'login:password'
:param exception: Set this to True if a 500 is expected from this call.
:param cookie: A custom cookie value to set.
:param token: If you want to use an existing token to login, pass
the token ID.
:type token: str
:param additionalHeaders: A list of headers to add to the
request. Each item is a tuple of the form
(header-name, header-value).
:param useHttps: If True, pretend to use HTTPS.
:param authHeader: The HTTP request header to use for authentication.
:type authHeader: str
:returns: The cherrypy response object from the request.
"""
if not params:
params = {}
headers = [('Host', '127.0.0.1'), ('Accept', 'application/json')]
qs = fd = None
if additionalHeaders:
headers.extend(additionalHeaders)
if method in ['POST', 'PUT', 'PATCH'] or body:
if isinstance(body, six.string_types):
body = body.encode('utf8')
qs = urllib.parse.urlencode(params).encode('utf8')
if type is None:
headers.append(('Content-Type',
'application/x-www-form-urlencoded'))
else:
headers.append(('Content-Type', type))
qs = body
headers.append(('Content-Length', '%d' % len(qs)))
fd = BytesIO(qs)
qs = None
elif params:
qs = urllib.parse.urlencode(params)
app = cherrypy.tree.apps['']
request, response = app.get_serving(
local, remote, 'http' if not useHttps else 'https', 'HTTP/1.1')
request.show_tracebacks = True
self._buildHeaders(headers, cookie, user, token, basicAuth, authHeader)
# Python2 will not match Unicode URLs
url = str(prefix + path)
try:
response = request.run(method, url, qs, 'HTTP/1.1', headers, fd)
finally:
if fd:
fd.close()
if isJson:
body = self.getBody(response)
try:
response.json = json.loads(body)
except Exception:
print(body)
raise AssertionError('Did not receive JSON response')
if not exception and response.output_status.startswith(b'500'):
raise AssertionError("Internal server error: %s" %
self.getBody(response))
return response
def getBody(self, response, text=True):
"""
Returns the response body as a text type or binary string.
:param response: The response object from the server.
:param text: If true, treat the data as a text string, otherwise, treat
as binary.
"""
data = '' if text else b''
for chunk in response.body:
if text and isinstance(chunk, six.binary_type):
chunk = chunk.decode('utf8')
elif not text and not isinstance(chunk, six.binary_type):
chunk = chunk.encode('utf8')
data += chunk
return data
def multipartRequest(self, fields, files, path, method='POST', user=None,
prefix='/api/v1', isJson=True):
"""
Make an HTTP request with multipart/form-data encoding. This can be
used to send files with the request.
:param fields: List of (name, value) tuples.
:param files: List of (name, filename, content) tuples.
:param path: The path part of the URI.
:type path: str
:param method: The HTTP method.
:type method: str
:param prefix: The prefix to use before the path.
:param isJson: Whether the response is a JSON object.
:returns: The cherrypy response object from the request.
"""
contentType, body, size = MultipartFormdataEncoder().encode(
fields, files)
headers = [('Host', '127.0.0.1'),
('Accept', 'application/json'),
('Content-Type', contentType),
('Content-Length', str(size))]
app = cherrypy.tree.apps['']
request, response = app.get_serving(local, remote, 'http', 'HTTP/1.1')
request.show_tracebacks = True
if user is not None:
headers.append(('Girder-Token', self._genToken(user)))
fd = io.BytesIO(body)
# Python2 will not match Unicode URLs
url = str(prefix + path)
try:
response = request.run(method, url, None, 'HTTP/1.1', headers, fd)
finally:
fd.close()
if isJson:
body = self.getBody(response)
try:
response.json = json.loads(body)
except Exception:
print(body)
raise AssertionError('Did not receive JSON response')
if response.output_status.startswith(b'500'):
raise AssertionError("Internal server error: %s" %
self.getBody(response))
return response
class MultipartFormdataEncoder(object):
"""
This class is adapted from http://stackoverflow.com/a/18888633/2550451
It is used as a helper for creating multipart/form-data requests to
simulate file uploads.
"""
def __init__(self):
self.boundary = uuid.uuid4().hex
self.contentType = \
'multipart/form-data; boundary=%s' % self.boundary
@classmethod
def u(cls, s):
if sys.hexversion < 0x03000000 and isinstance(s, str):
s = s.decode('utf-8')
if sys.hexversion >= 0x03000000 and isinstance(s, bytes):
s = s.decode('utf-8')
return s
def iter(self, fields, files):
encoder = codecs.getencoder('utf-8')
for (key, value) in fields:
key = self.u(key)
yield encoder('--%s\r\n' % self.boundary)
yield encoder(self.u('Content-Disposition: form-data; '
'name="%s"\r\n') % key)
yield encoder('\r\n')
if isinstance(value, int) or isinstance(value, float):
value = str(value)
yield encoder(self.u(value))
yield encoder('\r\n')
for (key, filename, content) in files:
key = self.u(key)
filename = self.u(filename)
yield encoder('--%s\r\n' % self.boundary)
yield encoder(self.u('Content-Disposition: form-data; name="%s";'
' filename="%s"\r\n' % (key, filename)))
yield encoder('Content-Type: application/octet-stream\r\n')
yield encoder('\r\n')
yield (content, len(content))
yield encoder('\r\n')
yield encoder('--%s--\r\n' % self.boundary)
def encode(self, fields, files):
body = io.BytesIO()
size = 0
for chunk, chunkLen in self.iter(fields, files):
if not isinstance(chunk, six.binary_type):
chunk = chunk.encode('utf8')
body.write(chunk)
size += chunkLen
return self.contentType, body.getvalue(), size
def _sigintHandler(*args):
print('Received SIGINT, shutting down mock SMTP server...')
mockSmtp.stop()
sys.exit(1)
signal.signal(signal.SIGINT, _sigintHandler)
| salamb/girder | tests/base.py | Python | apache-2.0 | 21,800 |
# -*- coding: utf-8 -*-
'''
Module for listing programs that automatically run on startup
(very alpha...not tested on anything but my Win 7x64)
'''
# Import python libs
import os
# Import salt libs
import salt.utils
# Define a function alias in order not to shadow built-in's
__func_alias__ = {
'list_': 'list'
}
# Define the module's virtual name
__virtualname__ = 'autoruns'
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows():
return __virtualname__
return False
def list_():
'''
Get a list of automatically running programs
CLI Example:
.. code-block:: bash
salt '*' autoruns.list
'''
autoruns = {}
# Find autoruns in registry
keys = ['HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run',
'HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run /reg:64',
'HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run'
]
winver = __grains__['osfullname']
for key in keys:
autoruns[key] = []
cmd = 'reg query ' + key
print cmd
for line in __salt__['cmd.run'](cmd).splitlines():
if line and line[0:4] != "HKEY" and line[0:5] != "ERROR": # Remove junk lines
autoruns[key].append(line)
# Find autoruns in user's startup folder
if '7' in winver:
user_dir = 'C:\\Users\\'
startup_dir = '\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup'
else:
user_dir = 'C:\\Documents and Settings\\'
startup_dir = '\\Start Menu\\Programs\\Startup'
for user in os.listdir(user_dir):
try:
full_dir = user_dir + user + startup_dir
files = os.listdir(full_dir)
autoruns[full_dir] = []
for afile in files:
autoruns[full_dir].append(afile)
except Exception:
pass
return autoruns
| victorywang80/Maintenance | saltstack/src/salt/modules/win_autoruns.py | Python | apache-2.0 | 1,932 |
package com.github.snailycy.androidhybridlib;
import android.widget.Toast;
import com.github.snailycy.hybridlib.bridge.BaseJSPluginSync;
import org.json.JSONObject;
/**
* Created by ycy on 2017/9/27.
*/
public class JSGetCachePlugin extends BaseJSPluginSync {
@Override
public String jsCallNative(String requestParams) {
Toast.makeText(getContext(), "jsCallNative , requestParams = " + requestParams, Toast.LENGTH_LONG).show();
try {
JSONObject jsonObject1 = new JSONObject(requestParams);
JSONObject jsonObject = new JSONObject();
jsonObject.put("aaa", "hahahahah");
return jsonObject.toString();
} catch (Exception e) {
}
return null;
}
}
| snailycy/AndroidHybridLib | sample/src/main/java/com/github/snailycy/androidhybridlib/JSGetCachePlugin.java | Java | apache-2.0 | 748 |
/**
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex;
import java.util.NoSuchElementException;
import java.util.concurrent.*;
import org.reactivestreams.*;
import io.reactivex.annotations.*;
import io.reactivex.disposables.Disposable;
import io.reactivex.exceptions.Exceptions;
import io.reactivex.functions.*;
import io.reactivex.internal.functions.*;
import io.reactivex.internal.fuseable.*;
import io.reactivex.internal.observers.BlockingMultiObserver;
import io.reactivex.internal.operators.flowable.*;
import io.reactivex.internal.operators.maybe.*;
import io.reactivex.internal.operators.mixed.*;
import io.reactivex.internal.util.*;
import io.reactivex.observers.TestObserver;
import io.reactivex.plugins.RxJavaPlugins;
import io.reactivex.schedulers.Schedulers;
/**
* The {@code Maybe} class represents a deferred computation and emission of a single value, no value at all or an exception.
* <p>
* The {@code Maybe} class implements the {@link MaybeSource} base interface and the default consumer
* type it interacts with is the {@link MaybeObserver} via the {@link #subscribe(MaybeObserver)} method.
* <p>
* The {@code Maybe} operates with the following sequential protocol:
* <pre><code>
* onSubscribe (onSuccess | onError | onComplete)?
* </code></pre>
* <p>
* Note that {@code onSuccess}, {@code onError} and {@code onComplete} are mutually exclusive events; unlike {@code Observable},
* {@code onSuccess} is never followed by {@code onError} or {@code onComplete}.
* <p>
* Like {@link Observable}, a running {@code Maybe} can be stopped through the {@link Disposable} instance
* provided to consumers through {@link MaybeObserver#onSubscribe}.
* <p>
* Like an {@code Observable}, a {@code Maybe} is lazy, can be either "hot" or "cold", synchronous or
* asynchronous. {@code Maybe} instances returned by the methods of this class are <em>cold</em>
* and there is a standard <em>hot</em> implementation in the form of a subject:
* {@link io.reactivex.subjects.MaybeSubject MaybeSubject}.
* <p>
* The documentation for this class makes use of marble diagrams. The following legend explains these diagrams:
* <p>
* <img width="640" height="370" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/maybe.png" alt="">
* <p>
* See {@link Flowable} or {@link Observable} for the
* implementation of the Reactive Pattern for a stream or vector of values.
* <p>
* Example:
* <pre><code>
* Disposable d = Maybe.just("Hello World")
* .delay(10, TimeUnit.SECONDS, Schedulers.io())
* .subscribeWith(new DisposableMaybeObserver<String>() {
* @Override
* public void onStart() {
* System.out.println("Started");
* }
*
* @Override
* public void onSuccess(String value) {
* System.out.println("Success: " + value);
* }
*
* @Override
* public void onError(Throwable error) {
* error.printStackTrace();
* }
*
* @Override
* public void onComplete() {
* System.out.println("Done!");
* }
* });
*
* Thread.sleep(5000);
*
* d.dispose();
* </code></pre>
* <p>
* Note that by design, subscriptions via {@link #subscribe(MaybeObserver)} can't be disposed
* from the outside (hence the
* {@code void} return of the {@link #subscribe(MaybeObserver)} method) and it is the
* responsibility of the implementor of the {@code MaybeObserver} to allow this to happen.
* RxJava supports such usage with the standard
* {@link io.reactivex.observers.DisposableMaybeObserver DisposableMaybeObserver} instance.
* For convenience, the {@link #subscribeWith(MaybeObserver)} method is provided as well to
* allow working with a {@code MaybeObserver} (or subclass) instance to be applied with in
* a fluent manner (such as in the example above).
*
* @param <T> the value type
* @since 2.0
* @see io.reactivex.observers.DisposableMaybeObserver
*/
public abstract class Maybe<T> implements MaybeSource<T> {
/**
* Runs multiple MaybeSources and signals the events of the first one that signals (disposing
* the rest).
* <p>
* <img width="640" height="519" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.amb.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code amb} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources the Iterable sequence of sources. A subscription to each source will
* occur in the same order as in the Iterable.
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> amb(final Iterable<? extends MaybeSource<? extends T>> sources) {
ObjectHelper.requireNonNull(sources, "sources is null");
return RxJavaPlugins.onAssembly(new MaybeAmb<T>(null, sources));
}
/**
* Runs multiple MaybeSources and signals the events of the first one that signals (disposing
* the rest).
* <p>
* <img width="640" height="519" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.ambArray.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code ambArray} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources the array of sources. A subscription to each source will
* occur in the same order as in the array.
* @return the new Maybe instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Maybe<T> ambArray(final MaybeSource<? extends T>... sources) {
if (sources.length == 0) {
return empty();
}
if (sources.length == 1) {
return wrap((MaybeSource<T>)sources[0]);
}
return RxJavaPlugins.onAssembly(new MaybeAmb<T>(sources, null));
}
/**
* Concatenate the single values, in a non-overlapping fashion, of the MaybeSource sources provided by
* an Iterable sequence.
* <p>
* <img width="640" height="526" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.i.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources the Iterable sequence of MaybeSource instances
* @return the new Flowable instance
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> concat(Iterable<? extends MaybeSource<? extends T>> sources) {
ObjectHelper.requireNonNull(sources, "sources is null");
return RxJavaPlugins.onAssembly(new MaybeConcatIterable<T>(sources));
}
/**
* Returns a Flowable that emits the items emitted by two MaybeSources, one after the other.
* <p>
* <img width="640" height="422" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common value type
* @param source1
* a MaybeSource to be concatenated
* @param source2
* a MaybeSource to be concatenated
* @return a Flowable that emits items emitted by the two source MaybeSources, one after the other.
* @see <a href="http://reactivex.io/documentation/operators/concat.html">ReactiveX operators documentation: Concat</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Flowable<T> concat(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
return concatArray(source1, source2);
}
/**
* Returns a Flowable that emits the items emitted by three MaybeSources, one after the other.
* <p>
* <img width="640" height="422" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common value type
* @param source1
* a MaybeSource to be concatenated
* @param source2
* a MaybeSource to be concatenated
* @param source3
* a MaybeSource to be concatenated
* @return a Flowable that emits items emitted by the three source MaybeSources, one after the other.
* @see <a href="http://reactivex.io/documentation/operators/concat.html">ReactiveX operators documentation: Concat</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Flowable<T> concat(
MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, MaybeSource<? extends T> source3) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
return concatArray(source1, source2, source3);
}
/**
* Returns a Flowable that emits the items emitted by four MaybeSources, one after the other.
* <p>
* <img width="640" height="422" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common value type
* @param source1
* a MaybeSource to be concatenated
* @param source2
* a MaybeSource to be concatenated
* @param source3
* a MaybeSource to be concatenated
* @param source4
* a MaybeSource to be concatenated
* @return a Flowable that emits items emitted by the four source MaybeSources, one after the other.
* @see <a href="http://reactivex.io/documentation/operators/concat.html">ReactiveX operators documentation: Concat</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Flowable<T> concat(
MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, MaybeSource<? extends T> source3, MaybeSource<? extends T> source4) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
return concatArray(source1, source2, source3, source4);
}
/**
* Concatenate the single values, in a non-overlapping fashion, of the MaybeSource sources provided by
* a Publisher sequence.
* <p>
* <img width="640" height="416" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.p.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer and
* expects the {@code Publisher} to honor backpressure as well. If the sources {@code Publisher}
* violates this, a {@link io.reactivex.exceptions.MissingBackpressureException} is signalled.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources the Publisher of MaybeSource instances
* @return the new Flowable instance
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> concat(Publisher<? extends MaybeSource<? extends T>> sources) {
return concat(sources, 2);
}
/**
* Concatenate the single values, in a non-overlapping fashion, of the MaybeSource sources provided by
* a Publisher sequence.
* <p>
* <img width="640" height="416" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.pn.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer and
* expects the {@code Publisher} to honor backpressure as well. If the sources {@code Publisher}
* violates this, a {@link io.reactivex.exceptions.MissingBackpressureException} is signalled.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources the Publisher of MaybeSource instances
* @param prefetch the number of MaybeSources to prefetch from the Publisher
* @return the new Flowable instance
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings({ "unchecked", "rawtypes" })
public static <T> Flowable<T> concat(Publisher<? extends MaybeSource<? extends T>> sources, int prefetch) {
ObjectHelper.requireNonNull(sources, "sources is null");
ObjectHelper.verifyPositive(prefetch, "prefetch");
return RxJavaPlugins.onAssembly(new FlowableConcatMapPublisher(sources, MaybeToPublisher.instance(), prefetch, ErrorMode.IMMEDIATE));
}
/**
* Concatenate the single values, in a non-overlapping fashion, of the MaybeSource sources in the array.
* <p>
* <img width="640" height="526" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatArray.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concatArray} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources the array of MaybeSource instances
* @return the new Flowable instance
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Flowable<T> concatArray(MaybeSource<? extends T>... sources) {
ObjectHelper.requireNonNull(sources, "sources is null");
if (sources.length == 0) {
return Flowable.empty();
}
if (sources.length == 1) {
return RxJavaPlugins.onAssembly(new MaybeToFlowable<T>((MaybeSource<T>)sources[0]));
}
return RxJavaPlugins.onAssembly(new MaybeConcatArray<T>(sources));
}
/**
* Concatenates a variable number of MaybeSource sources and delays errors from any of them
* till all terminate.
* <p>
* <img width="640" height="425" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatArrayDelayError.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concatArrayDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param sources the array of sources
* @param <T> the common base value type
* @return the new Flowable instance
* @throws NullPointerException if sources is null
*/
@SuppressWarnings("unchecked")
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> concatArrayDelayError(MaybeSource<? extends T>... sources) {
if (sources.length == 0) {
return Flowable.empty();
} else
if (sources.length == 1) {
return RxJavaPlugins.onAssembly(new MaybeToFlowable<T>((MaybeSource<T>)sources[0]));
}
return RxJavaPlugins.onAssembly(new MaybeConcatArrayDelayError<T>(sources));
}
/**
* Concatenates a sequence of MaybeSource eagerly into a single stream of values.
* <p>
* Eager concatenation means that once a subscriber subscribes, this operator subscribes to all of the
* source MaybeSources. The operator buffers the value emitted by these MaybeSources and then drains them
* in order, each one after the previous one completes.
* <p>
* <img width="640" height="489" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatArrayEager.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>This method does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources a sequence of MaybeSources that need to be eagerly concatenated
* @return the new Flowable instance with the specified concatenation behavior
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> concatArrayEager(MaybeSource<? extends T>... sources) {
return Flowable.fromArray(sources).concatMapEager((Function)MaybeToPublisher.instance());
}
/**
* Concatenates the Iterable sequence of MaybeSources into a single sequence by subscribing to each MaybeSource,
* one after the other, one at a time and delays any errors till the all inner MaybeSources terminate.
* <p>
* <img width="640" height="469" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatDelayError.i.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concatDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element base type
* @param sources the Iterable sequence of MaybeSources
* @return the new Flowable with the concatenating behavior
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> concatDelayError(Iterable<? extends MaybeSource<? extends T>> sources) {
ObjectHelper.requireNonNull(sources, "sources is null");
return Flowable.fromIterable(sources).concatMapDelayError((Function)MaybeToPublisher.instance());
}
/**
* Concatenates the Publisher sequence of Publishers into a single sequence by subscribing to each inner Publisher,
* one after the other, one at a time and delays any errors till the all inner and the outer Publishers terminate.
* <p>
* <img width="640" height="360" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatDelayError.p.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>{@code concatDelayError} fully supports backpressure.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concatDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element base type
* @param sources the Publisher sequence of Publishers
* @return the new Publisher with the concatenating behavior
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> concatDelayError(Publisher<? extends MaybeSource<? extends T>> sources) {
return Flowable.fromPublisher(sources).concatMapDelayError((Function)MaybeToPublisher.instance());
}
/**
* Concatenates a sequence of MaybeSources eagerly into a single stream of values.
* <p>
* Eager concatenation means that once a subscriber subscribes, this operator subscribes to all of the
* source MaybeSources. The operator buffers the values emitted by these MaybeSources and then drains them
* in order, each one after the previous one completes.
* <p>
* <img width="640" height="526" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatEager.i.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>Backpressure is honored towards the downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>This method does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources a sequence of MaybeSource that need to be eagerly concatenated
* @return the new Flowable instance with the specified concatenation behavior
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> concatEager(Iterable<? extends MaybeSource<? extends T>> sources) {
return Flowable.fromIterable(sources).concatMapEager((Function)MaybeToPublisher.instance());
}
/**
* Concatenates a Publisher sequence of MaybeSources eagerly into a single stream of values.
* <p>
* Eager concatenation means that once a subscriber subscribes, this operator subscribes to all of the
* emitted source Publishers as they are observed. The operator buffers the values emitted by these
* Publishers and then drains them in order, each one after the previous one completes.
* <p>
* <img width="640" height="511" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatEager.p.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>Backpressure is honored towards the downstream and the outer Publisher is
* expected to support backpressure. Violating this assumption, the operator will
* signal {@link io.reactivex.exceptions.MissingBackpressureException}.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>This method does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param sources a sequence of Publishers that need to be eagerly concatenated
* @return the new Publisher instance with the specified concatenation behavior
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> concatEager(Publisher<? extends MaybeSource<? extends T>> sources) {
return Flowable.fromPublisher(sources).concatMapEager((Function)MaybeToPublisher.instance());
}
/**
* Provides an API (via a cold Maybe) that bridges the reactive world with the callback-style world.
* <p>
* Example:
* <pre><code>
* Maybe.<Event>create(emitter -> {
* Callback listener = new Callback() {
* @Override
* public void onEvent(Event e) {
* if (e.isNothing()) {
* emitter.onComplete();
* } else {
* emitter.onSuccess(e);
* }
* }
*
* @Override
* public void onFailure(Exception e) {
* emitter.onError(e);
* }
* };
*
* AutoCloseable c = api.someMethod(listener);
*
* emitter.setCancellable(c::close);
*
* });
* </code></pre>
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code create} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param onSubscribe the emitter that is called when a MaybeObserver subscribes to the returned {@code Maybe}
* @return the new Maybe instance
* @see MaybeOnSubscribe
* @see Cancellable
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> create(MaybeOnSubscribe<T> onSubscribe) {
ObjectHelper.requireNonNull(onSubscribe, "onSubscribe is null");
return RxJavaPlugins.onAssembly(new MaybeCreate<T>(onSubscribe));
}
/**
* Calls a Callable for each individual MaybeObserver to return the actual MaybeSource source to
* be subscribed to.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code defer} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param maybeSupplier the Callable that is called for each individual MaybeObserver and
* returns a MaybeSource instance to subscribe to
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> defer(final Callable<? extends MaybeSource<? extends T>> maybeSupplier) {
ObjectHelper.requireNonNull(maybeSupplier, "maybeSupplier is null");
return RxJavaPlugins.onAssembly(new MaybeDefer<T>(maybeSupplier));
}
/**
* Returns a (singleton) Maybe instance that calls {@link MaybeObserver#onComplete onComplete}
* immediately.
* <p>
* <img width="640" height="190" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/empty.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code empty} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @return the new Maybe instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Maybe<T> empty() {
return RxJavaPlugins.onAssembly((Maybe<T>)MaybeEmpty.INSTANCE);
}
/**
* Returns a Maybe that invokes a subscriber's {@link MaybeObserver#onError onError} method when the
* subscriber subscribes to it.
* <p>
* <img width="640" height="447" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.error.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code error} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param exception
* the particular Throwable to pass to {@link MaybeObserver#onError onError}
* @param <T>
* the type of the item (ostensibly) emitted by the Maybe
* @return a Maybe that invokes the subscriber's {@link MaybeObserver#onError onError} method when
* the subscriber subscribes to it
* @see <a href="http://reactivex.io/documentation/operators/empty-never-throw.html">ReactiveX operators documentation: Throw</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> error(Throwable exception) {
ObjectHelper.requireNonNull(exception, "exception is null");
return RxJavaPlugins.onAssembly(new MaybeError<T>(exception));
}
/**
* Returns a Maybe that invokes a {@link MaybeObserver}'s {@link MaybeObserver#onError onError} method when the
* MaybeObserver subscribes to it.
* <p>
* <img width="640" height="190" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/error.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code error} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param supplier
* a Callable factory to return a Throwable for each individual MaybeObserver
* @param <T>
* the type of the items (ostensibly) emitted by the Maybe
* @return a Maybe that invokes the {@link MaybeObserver}'s {@link MaybeObserver#onError onError} method when
* the MaybeObserver subscribes to it
* @see <a href="http://reactivex.io/documentation/operators/empty-never-throw.html">ReactiveX operators documentation: Throw</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> error(Callable<? extends Throwable> supplier) {
ObjectHelper.requireNonNull(supplier, "errorSupplier is null");
return RxJavaPlugins.onAssembly(new MaybeErrorCallable<T>(supplier));
}
/**
* Returns a Maybe instance that runs the given Action for each subscriber and
* emits either its exception or simply completes.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code fromAction} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd> If the {@link Action} throws an exception, the respective {@link Throwable} is
* delivered to the downstream via {@link MaybeObserver#onError(Throwable)},
* except when the downstream has disposed this {@code Maybe} source.
* In this latter case, the {@code Throwable} is delivered to the global error handler via
* {@link RxJavaPlugins#onError(Throwable)} as an {@link io.reactivex.exceptions.UndeliverableException UndeliverableException}.
* </dd>
* </dl>
* @param <T> the target type
* @param run the runnable to run for each subscriber
* @return the new Maybe instance
* @throws NullPointerException if run is null
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> fromAction(final Action run) {
ObjectHelper.requireNonNull(run, "run is null");
return RxJavaPlugins.onAssembly(new MaybeFromAction<T>(run));
}
/**
* Wraps a CompletableSource into a Maybe.
*
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code fromCompletable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the target type
* @param completableSource the CompletableSource to convert from
* @return the new Maybe instance
* @throws NullPointerException if completable is null
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> fromCompletable(CompletableSource completableSource) {
ObjectHelper.requireNonNull(completableSource, "completableSource is null");
return RxJavaPlugins.onAssembly(new MaybeFromCompletable<T>(completableSource));
}
/**
* Wraps a SingleSource into a Maybe.
*
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code fromSingle} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the target type
* @param singleSource the SingleSource to convert from
* @return the new Maybe instance
* @throws NullPointerException if single is null
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> fromSingle(SingleSource<T> singleSource) {
ObjectHelper.requireNonNull(singleSource, "singleSource is null");
return RxJavaPlugins.onAssembly(new MaybeFromSingle<T>(singleSource));
}
/**
* Returns a {@link Maybe} that invokes the given {@link Callable} for each individual {@link MaybeObserver} that
* subscribes and emits the resulting non-null item via {@code onSuccess} while
* considering a {@code null} result from the {@code Callable} as indication for valueless completion
* via {@code onComplete}.
* <p>
* This operator allows you to defer the execution of the given {@code Callable} until a {@code MaybeObserver}
* subscribes to the returned {@link Maybe}. In other terms, this source operator evaluates the given
* {@code Callable} "lazily".
* <p>
* Note that the {@code null} handling of this operator differs from the similar source operators in the other
* {@link io.reactivex base reactive classes}. Those operators signal a {@code NullPointerException} if the value returned by their
* {@code Callable} is {@code null} while this {@code fromCallable} considers it to indicate the
* returned {@code Maybe} is empty.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code fromCallable} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>Any non-fatal exception thrown by {@link Callable#call()} will be forwarded to {@code onError},
* except if the {@code MaybeObserver} disposed the subscription in the meantime. In this latter case,
* the exception is forwarded to the global error handler via
* {@link io.reactivex.plugins.RxJavaPlugins#onError(Throwable)} wrapped into a
* {@link io.reactivex.exceptions.UndeliverableException UndeliverableException}.
* Fatal exceptions are rethrown and usually will end up in the executing thread's
* {@link java.lang.Thread.UncaughtExceptionHandler#uncaughtException(Thread, Throwable)} handler.</dd>
* </dl>
*
* @param callable
* a {@link Callable} instance whose execution should be deferred and performed for each individual
* {@code MaybeObserver} that subscribes to the returned {@link Maybe}.
* @param <T>
* the type of the item emitted by the {@link Maybe}.
* @return a new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> fromCallable(@NonNull final Callable<? extends T> callable) {
ObjectHelper.requireNonNull(callable, "callable is null");
return RxJavaPlugins.onAssembly(new MaybeFromCallable<T>(callable));
}
/**
* Converts a {@link Future} into a Maybe, treating a null result as an indication of emptiness.
* <p>
* <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/from.Future.png" alt="">
* <p>
* You can convert any object that supports the {@link Future} interface into a Maybe that emits the
* return value of the {@link Future#get} method of that object, by passing the object into the {@code from}
* method.
* <p>
* <em>Important note:</em> This Maybe is blocking; you cannot dispose it.
* <p>
* Unlike 1.x, disposing the Maybe won't cancel the future. If necessary, one can use composition to achieve the
* cancellation effect: {@code futureMaybe.doOnDispose(() -> future.cancel(true));}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code fromFuture} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param future
* the source {@link Future}
* @param <T>
* the type of object that the {@link Future} returns, and also the type of item to be emitted by
* the resulting Maybe
* @return a Maybe that emits the item from the source {@link Future}
* @see <a href="http://reactivex.io/documentation/operators/from.html">ReactiveX operators documentation: From</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> fromFuture(Future<? extends T> future) {
ObjectHelper.requireNonNull(future, "future is null");
return RxJavaPlugins.onAssembly(new MaybeFromFuture<T>(future, 0L, null));
}
/**
* Converts a {@link Future} into a Maybe, with a timeout on the Future.
* <p>
* <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/from.Future.png" alt="">
* <p>
* You can convert any object that supports the {@link Future} interface into a Maybe that emits the
* return value of the {@link Future#get} method of that object, by passing the object into the {@code fromFuture}
* method.
* <p>
* Unlike 1.x, disposing the Maybe won't cancel the future. If necessary, one can use composition to achieve the
* cancellation effect: {@code futureMaybe.doOnCancel(() -> future.cancel(true));}.
* <p>
* <em>Important note:</em> This Maybe is blocking on the thread it gets subscribed on; you cannot dispose it.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code fromFuture} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param future
* the source {@link Future}
* @param timeout
* the maximum time to wait before calling {@code get}
* @param unit
* the {@link TimeUnit} of the {@code timeout} argument
* @param <T>
* the type of object that the {@link Future} returns, and also the type of item to be emitted by
* the resulting Maybe
* @return a Maybe that emits the item from the source {@link Future}
* @see <a href="http://reactivex.io/documentation/operators/from.html">ReactiveX operators documentation: From</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> fromFuture(Future<? extends T> future, long timeout, TimeUnit unit) {
ObjectHelper.requireNonNull(future, "future is null");
ObjectHelper.requireNonNull(unit, "unit is null");
return RxJavaPlugins.onAssembly(new MaybeFromFuture<T>(future, timeout, unit));
}
/**
* Returns a Maybe instance that runs the given Action for each subscriber and
* emits either its exception or simply completes.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code fromRunnable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the target type
* @param run the runnable to run for each subscriber
* @return the new Maybe instance
* @throws NullPointerException if run is null
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> fromRunnable(final Runnable run) {
ObjectHelper.requireNonNull(run, "run is null");
return RxJavaPlugins.onAssembly(new MaybeFromRunnable<T>(run));
}
/**
* Returns a {@code Maybe} that emits a specified item.
* <p>
* <img width="640" height="485" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.just.png" alt="">
* <p>
* To convert any object into a {@code Maybe} that emits that object, pass that object into the
* {@code just} method.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code just} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param item
* the item to emit
* @param <T>
* the type of that item
* @return a {@code Maybe} that emits {@code item}
* @see <a href="http://reactivex.io/documentation/operators/just.html">ReactiveX operators documentation: Just</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> just(T item) {
ObjectHelper.requireNonNull(item, "item is null");
return RxJavaPlugins.onAssembly(new MaybeJust<T>(item));
}
/**
* Merges an Iterable sequence of MaybeSource instances into a single Flowable sequence,
* running all MaybeSources at once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting
* {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed.
* If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the
* first one's error or, depending on the concurrency of the sources, may terminate with a
* {@code CompositeException} containing two or more of the various error signals.
* {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via
* {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s
* signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a
* (composite) error will be sent to the same global error handler.
* Use {@link #mergeDelayError(Iterable)} to merge sources and terminate only when all source {@code MaybeSource}s
* have completed or failed with an error.
* </dd>
* </dl>
* @param <T> the common and resulting value type
* @param sources the Iterable sequence of MaybeSource sources
* @return the new Flowable instance
* @see #mergeDelayError(Iterable)
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> merge(Iterable<? extends MaybeSource<? extends T>> sources) {
return merge(Flowable.fromIterable(sources));
}
/**
* Merges a Flowable sequence of MaybeSource instances into a single Flowable sequence,
* running all MaybeSources at once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting
* {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed.
* If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the
* first one's error or, depending on the concurrency of the sources, may terminate with a
* {@code CompositeException} containing two or more of the various error signals.
* {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via
* {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s
* signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a
* (composite) error will be sent to the same global error handler.
* Use {@link #mergeDelayError(Publisher)} to merge sources and terminate only when all source {@code MaybeSource}s
* have completed or failed with an error.
* </dd>
* </dl>
* @param <T> the common and resulting value type
* @param sources the Flowable sequence of MaybeSource sources
* @return the new Flowable instance
* @see #mergeDelayError(Publisher)
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> merge(Publisher<? extends MaybeSource<? extends T>> sources) {
return merge(sources, Integer.MAX_VALUE);
}
/**
* Merges a Flowable sequence of MaybeSource instances into a single Flowable sequence,
* running at most maxConcurrency MaybeSources at once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting
* {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed.
* If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the
* first one's error or, depending on the concurrency of the sources, may terminate with a
* {@code CompositeException} containing two or more of the various error signals.
* {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via
* {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s
* signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a
* (composite) error will be sent to the same global error handler.
* Use {@link #mergeDelayError(Publisher, int)} to merge sources and terminate only when all source {@code MaybeSource}s
* have completed or failed with an error.
* </dd>
* </dl>
* @param <T> the common and resulting value type
* @param sources the Flowable sequence of MaybeSource sources
* @param maxConcurrency the maximum number of concurrently running MaybeSources
* @return the new Flowable instance
* @see #mergeDelayError(Publisher, int)
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings({ "unchecked", "rawtypes" })
public static <T> Flowable<T> merge(Publisher<? extends MaybeSource<? extends T>> sources, int maxConcurrency) {
ObjectHelper.requireNonNull(sources, "source is null");
ObjectHelper.verifyPositive(maxConcurrency, "maxConcurrency");
return RxJavaPlugins.onAssembly(new FlowableFlatMapPublisher(sources, MaybeToPublisher.instance(), false, maxConcurrency, 1));
}
/**
* Flattens a {@code MaybeSource} that emits a {@code MaybeSource} into a single {@code MaybeSource} that emits the item
* emitted by the nested {@code MaybeSource}, without any transformation.
* <p>
* <img width="640" height="393" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.merge.oo.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>The resulting {@code Maybe} emits the outer source's or the inner {@code MaybeSource}'s {@code Throwable} as is.
* Unlike the other {@code merge()} operators, this operator won't and can't produce a {@code CompositeException} because there is
* only one possibility for the outer or the inner {@code MaybeSource} to emit an {@code onError} signal.
* Therefore, there is no need for a {@code mergeDelayError(MaybeSource<MaybeSource<T>>)} operator.
* </dd>
* </dl>
*
* @param <T> the value type of the sources and the output
* @param source
* a {@code MaybeSource} that emits a {@code MaybeSource}
* @return a {@code Maybe} that emits the item that is the result of flattening the {@code MaybeSource} emitted
* by {@code source}
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings({ "unchecked", "rawtypes" })
public static <T> Maybe<T> merge(MaybeSource<? extends MaybeSource<? extends T>> source) {
ObjectHelper.requireNonNull(source, "source is null");
return RxJavaPlugins.onAssembly(new MaybeFlatten(source, Functions.identity()));
}
/**
* Flattens two MaybeSources into a single Flowable, without any transformation.
* <p>
* <img width="640" height="483" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.merge.png" alt="">
* <p>
* You can combine items emitted by multiple MaybeSources so that they appear as a single Flowable, by
* using the {@code merge} method.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting
* {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed.
* If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the
* first one's error or, depending on the concurrency of the sources, may terminate with a
* {@code CompositeException} containing two or more of the various error signals.
* {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via
* {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s
* signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a
* (composite) error will be sent to the same global error handler.
* Use {@link #mergeDelayError(MaybeSource, MaybeSource)} to merge sources and terminate only when all source {@code MaybeSource}s
* have completed or failed with an error.
* </dd>
* </dl>
*
* @param <T> the common value type
* @param source1
* a MaybeSource to be merged
* @param source2
* a MaybeSource to be merged
* @return a Flowable that emits all of the items emitted by the source MaybeSources
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
* @see #mergeDelayError(MaybeSource, MaybeSource)
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Flowable<T> merge(
MaybeSource<? extends T> source1, MaybeSource<? extends T> source2
) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
return mergeArray(source1, source2);
}
/**
* Flattens three MaybeSources into a single Flowable, without any transformation.
* <p>
* <img width="640" height="483" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.merge.png" alt="">
* <p>
* You can combine items emitted by multiple MaybeSources so that they appear as a single Flowable, by using
* the {@code merge} method.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting
* {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed.
* If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the
* first one's error or, depending on the concurrency of the sources, may terminate with a
* {@code CompositeException} containing two or more of the various error signals.
* {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via
* {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s
* signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a
* (composite) error will be sent to the same global error handler.
* Use {@link #mergeDelayError(MaybeSource, MaybeSource, MaybeSource)} to merge sources and terminate only when all source {@code MaybeSource}s
* have completed or failed with an error.
* </dd>
* </dl>
*
* @param <T> the common value type
* @param source1
* a MaybeSource to be merged
* @param source2
* a MaybeSource to be merged
* @param source3
* a MaybeSource to be merged
* @return a Flowable that emits all of the items emitted by the source MaybeSources
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
* @see #mergeDelayError(MaybeSource, MaybeSource, MaybeSource)
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Flowable<T> merge(
MaybeSource<? extends T> source1, MaybeSource<? extends T> source2,
MaybeSource<? extends T> source3
) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
return mergeArray(source1, source2, source3);
}
/**
* Flattens four MaybeSources into a single Flowable, without any transformation.
* <p>
* <img width="640" height="483" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.merge.png" alt="">
* <p>
* You can combine items emitted by multiple MaybeSources so that they appear as a single Flowable, by using
* the {@code merge} method.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting
* {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed.
* If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the
* first one's error or, depending on the concurrency of the sources, may terminate with a
* {@code CompositeException} containing two or more of the various error signals.
* {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via
* {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s
* signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a
* (composite) error will be sent to the same global error handler.
* Use {@link #mergeDelayError(MaybeSource, MaybeSource, MaybeSource, MaybeSource)} to merge sources and terminate only when all source {@code MaybeSource}s
* have completed or failed with an error.
* </dd>
* </dl>
*
* @param <T> the common value type
* @param source1
* a MaybeSource to be merged
* @param source2
* a MaybeSource to be merged
* @param source3
* a MaybeSource to be merged
* @param source4
* a MaybeSource to be merged
* @return a Flowable that emits all of the items emitted by the source MaybeSources
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
* @see #mergeDelayError(MaybeSource, MaybeSource, MaybeSource, MaybeSource)
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Flowable<T> merge(
MaybeSource<? extends T> source1, MaybeSource<? extends T> source2,
MaybeSource<? extends T> source3, MaybeSource<? extends T> source4
) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
return mergeArray(source1, source2, source3, source4);
}
/**
* Merges an array sequence of MaybeSource instances into a single Flowable sequence,
* running all MaybeSources at once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeArray} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting
* {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed.
* If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the
* first one's error or, depending on the concurrency of the sources, may terminate with a
* {@code CompositeException} containing two or more of the various error signals.
* {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via
* {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s
* signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a
* (composite) error will be sent to the same global error handler.
* Use {@link #mergeArrayDelayError(MaybeSource...)} to merge sources and terminate only when all source {@code MaybeSource}s
* have completed or failed with an error.
* </dd>
* </dl>
* @param <T> the common and resulting value type
* @param sources the array sequence of MaybeSource sources
* @return the new Flowable instance
* @see #mergeArrayDelayError(MaybeSource...)
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Flowable<T> mergeArray(MaybeSource<? extends T>... sources) {
ObjectHelper.requireNonNull(sources, "sources is null");
if (sources.length == 0) {
return Flowable.empty();
}
if (sources.length == 1) {
return RxJavaPlugins.onAssembly(new MaybeToFlowable<T>((MaybeSource<T>)sources[0]));
}
return RxJavaPlugins.onAssembly(new MaybeMergeArray<T>(sources));
}
/**
* Flattens an array of MaybeSources into one Flowable, in a way that allows a Subscriber to receive all
* successfully emitted items from each of the source MaybeSources without being interrupted by an error
* notification from one of them.
* <p>
* This behaves like {@link #merge(Publisher)} except that if any of the merged MaybeSources notify of an
* error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from propagating that
* error notification until all of the merged MaybeSources have finished emitting items.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt="">
* <p>
* Even if multiple merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only
* invoke the {@code onError} method of its Subscribers once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeArrayDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element base type
* @param sources
* the Iterable of MaybeSources
* @return a Flowable that emits items that are the result of flattening the items emitted by the
* MaybeSources in the Iterable
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> mergeArrayDelayError(MaybeSource<? extends T>... sources) {
if (sources.length == 0) {
return Flowable.empty();
}
return Flowable.fromArray(sources).flatMap((Function)MaybeToPublisher.instance(), true, sources.length);
}
/**
* Flattens an Iterable of MaybeSources into one Flowable, in a way that allows a Subscriber to receive all
* successfully emitted items from each of the source MaybeSources without being interrupted by an error
* notification from one of them.
* <p>
* This behaves like {@link #merge(Publisher)} except that if any of the merged MaybeSources notify of an
* error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from propagating that
* error notification until all of the merged MaybeSources have finished emitting items.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt="">
* <p>
* Even if multiple merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only
* invoke the {@code onError} method of its Subscribers once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element base type
* @param sources
* the Iterable of MaybeSources
* @return a Flowable that emits items that are the result of flattening the items emitted by the
* MaybeSources in the Iterable
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> mergeDelayError(Iterable<? extends MaybeSource<? extends T>> sources) {
return Flowable.fromIterable(sources).flatMap((Function)MaybeToPublisher.instance(), true);
}
/**
* Flattens a Publisher that emits MaybeSources into one Publisher, in a way that allows a Subscriber to
* receive all successfully emitted items from all of the source MaybeSources without being interrupted by
* an error notification from one of them or even the main Publisher.
* <p>
* This behaves like {@link #merge(Publisher)} except that if any of the merged MaybeSources notify of an
* error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from propagating that
* error notification until all of the merged MaybeSources and the main Publisher have finished emitting items.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt="">
* <p>
* Even if multiple merged Publishers send {@code onError} notifications, {@code mergeDelayError} will only
* invoke the {@code onError} method of its Subscribers once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream. The outer {@code Publisher} is consumed
* in unbounded mode (i.e., no backpressure is applied to it).</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element base type
* @param sources
* a Publisher that emits MaybeSources
* @return a Flowable that emits all of the items emitted by the Publishers emitted by the
* {@code source} Publisher
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> mergeDelayError(Publisher<? extends MaybeSource<? extends T>> sources) {
return mergeDelayError(sources, Integer.MAX_VALUE);
}
/**
* Flattens a Publisher that emits MaybeSources into one Publisher, in a way that allows a Subscriber to
* receive all successfully emitted items from all of the source MaybeSources without being interrupted by
* an error notification from one of them or even the main Publisher as well as limiting the total number of active MaybeSources.
* <p>
* This behaves like {@link #merge(Publisher, int)} except that if any of the merged MaybeSources notify of an
* error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from propagating that
* error notification until all of the merged MaybeSources and the main Publisher have finished emitting items.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt="">
* <p>
* Even if multiple merged Publishers send {@code onError} notifications, {@code mergeDelayError} will only
* invoke the {@code onError} method of its Subscribers once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream. The outer {@code Publisher} is consumed
* in unbounded mode (i.e., no backpressure is applied to it).</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>History: 2.1.9 - experimental
* @param <T> the common element base type
* @param sources
* a Publisher that emits MaybeSources
* @param maxConcurrency the maximum number of active inner MaybeSources to be merged at a time
* @return a Flowable that emits all of the items emitted by the Publishers emitted by the
* {@code source} Publisher
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
* @since 2.2
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> mergeDelayError(Publisher<? extends MaybeSource<? extends T>> sources, int maxConcurrency) {
ObjectHelper.requireNonNull(sources, "source is null");
ObjectHelper.verifyPositive(maxConcurrency, "maxConcurrency");
return RxJavaPlugins.onAssembly(new FlowableFlatMapPublisher(sources, MaybeToPublisher.instance(), true, maxConcurrency, 1));
}
/**
* Flattens two MaybeSources into one Flowable, in a way that allows a Subscriber to receive all
* successfully emitted items from each of the source MaybeSources without being interrupted by an error
* notification from one of them.
* <p>
* This behaves like {@link #merge(MaybeSource, MaybeSource)} except that if any of the merged MaybeSources
* notify of an error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from
* propagating that error notification until all of the merged MaybeSources have finished emitting items.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt="">
* <p>
* Even if both merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only
* invoke the {@code onError} method of its Subscribers once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element base type
* @param source1
* a MaybeSource to be merged
* @param source2
* a MaybeSource to be merged
* @return a Flowable that emits all of the items that are emitted by the two source MaybeSources
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
*/
@SuppressWarnings({ "unchecked" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> mergeDelayError(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
return mergeArrayDelayError(source1, source2);
}
/**
* Flattens three MaybeSource into one Flowable, in a way that allows a Subscriber to receive all
* successfully emitted items from all of the source MaybeSources without being interrupted by an error
* notification from one of them.
* <p>
* This behaves like {@link #merge(MaybeSource, MaybeSource, MaybeSource)} except that if any of the merged
* MaybeSources notify of an error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain
* from propagating that error notification until all of the merged MaybeSources have finished emitting
* items.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt="">
* <p>
* Even if multiple merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only
* invoke the {@code onError} method of its Subscribers once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element base type
* @param source1
* a MaybeSource to be merged
* @param source2
* a MaybeSource to be merged
* @param source3
* a MaybeSource to be merged
* @return a Flowable that emits all of the items that are emitted by the source MaybeSources
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
*/
@SuppressWarnings({ "unchecked" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> mergeDelayError(MaybeSource<? extends T> source1,
MaybeSource<? extends T> source2, MaybeSource<? extends T> source3) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
return mergeArrayDelayError(source1, source2, source3);
}
/**
* Flattens four MaybeSources into one Flowable, in a way that allows a Subscriber to receive all
* successfully emitted items from all of the source MaybeSources without being interrupted by an error
* notification from one of them.
* <p>
* This behaves like {@link #merge(MaybeSource, MaybeSource, MaybeSource, MaybeSource)} except that if any of
* the merged MaybeSources notify of an error via {@link Subscriber#onError onError}, {@code mergeDelayError}
* will refrain from propagating that error notification until all of the merged MaybeSources have finished
* emitting items.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt="">
* <p>
* Even if multiple merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only
* invoke the {@code onError} method of its Subscribers once.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element base type
* @param source1
* a MaybeSource to be merged
* @param source2
* a MaybeSource to be merged
* @param source3
* a MaybeSource to be merged
* @param source4
* a MaybeSource to be merged
* @return a Flowable that emits all of the items that are emitted by the source MaybeSources
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
*/
@SuppressWarnings({ "unchecked" })
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Flowable<T> mergeDelayError(
MaybeSource<? extends T> source1, MaybeSource<? extends T> source2,
MaybeSource<? extends T> source3, MaybeSource<? extends T> source4) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
return mergeArrayDelayError(source1, source2, source3, source4);
}
/**
* Returns a Maybe that never sends any items or notifications to a {@link MaybeObserver}.
* <p>
* <img width="640" height="185" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/never.png" alt="">
* <p>
* This Maybe is useful primarily for testing purposes.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code never} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T>
* the type of items (not) emitted by the Maybe
* @return a Maybe that never emits any items or sends any notifications to a {@link MaybeObserver}
* @see <a href="http://reactivex.io/documentation/operators/empty-never-throw.html">ReactiveX operators documentation: Never</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
@SuppressWarnings("unchecked")
public static <T> Maybe<T> never() {
return RxJavaPlugins.onAssembly((Maybe<T>)MaybeNever.INSTANCE);
}
/**
* Returns a Single that emits a Boolean value that indicates whether two MaybeSource sequences are the
* same by comparing the items emitted by each MaybeSource pairwise.
* <p>
* <img width="640" height="385" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/sequenceEqual.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code sequenceEqual} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param source1
* the first MaybeSource to compare
* @param source2
* the second MaybeSource to compare
* @param <T>
* the type of items emitted by each MaybeSource
* @return a Single that emits a Boolean value that indicates whether the two sequences are the same
* @see <a href="http://reactivex.io/documentation/operators/sequenceequal.html">ReactiveX operators documentation: SequenceEqual</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Single<Boolean> sequenceEqual(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2) {
return sequenceEqual(source1, source2, ObjectHelper.equalsPredicate());
}
/**
* Returns a Single that emits a Boolean value that indicates whether two MaybeSources are the
* same by comparing the items emitted by each MaybeSource pairwise based on the results of a specified
* equality function.
* <p>
* <img width="640" height="385" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/sequenceEqual.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code sequenceEqual} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param source1
* the first MaybeSource to compare
* @param source2
* the second MaybeSource to compare
* @param isEqual
* a function used to compare items emitted by each MaybeSource
* @param <T>
* the type of items emitted by each MaybeSource
* @return a Single that emits a Boolean value that indicates whether the two MaybeSource sequences
* are the same according to the specified function
* @see <a href="http://reactivex.io/documentation/operators/sequenceequal.html">ReactiveX operators documentation: SequenceEqual</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Single<Boolean> sequenceEqual(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2,
BiPredicate<? super T, ? super T> isEqual) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(isEqual, "isEqual is null");
return RxJavaPlugins.onAssembly(new MaybeEqualSingle<T>(source1, source2, isEqual));
}
/**
* Returns a Maybe that emits {@code 0L} after a specified delay.
* <p>
* <img width="640" height="200" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timer.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timer} operates by default on the {@code computation} {@link Scheduler}.</dd>
* </dl>
*
* @param delay
* the initial delay before emitting a single {@code 0L}
* @param unit
* time units to use for {@code delay}
* @return a Maybe that emits {@code 0L} after a specified delay
* @see <a href="http://reactivex.io/documentation/operators/timer.html">ReactiveX operators documentation: Timer</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public static Maybe<Long> timer(long delay, TimeUnit unit) {
return timer(delay, unit, Schedulers.computation());
}
/**
* Returns a Maybe that emits {@code 0L} after a specified delay on a specified Scheduler.
* <p>
* <img width="640" height="200" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timer.s.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>You specify which {@link Scheduler} this operator will use.</dd>
* </dl>
*
* @param delay
* the initial delay before emitting a single 0L
* @param unit
* time units to use for {@code delay}
* @param scheduler
* the {@link Scheduler} to use for scheduling the item
* @return a Maybe that emits {@code 0L} after a specified delay, on a specified Scheduler
* @see <a href="http://reactivex.io/documentation/operators/timer.html">ReactiveX operators documentation: Timer</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public static Maybe<Long> timer(long delay, TimeUnit unit, Scheduler scheduler) {
ObjectHelper.requireNonNull(unit, "unit is null");
ObjectHelper.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new MaybeTimer(Math.max(0L, delay), unit, scheduler));
}
/**
* <strong>Advanced use only:</strong> creates a Maybe instance without
* any safeguards by using a callback that is called with a MaybeObserver.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code unsafeCreate} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param onSubscribe the function that is called with the subscribing MaybeObserver
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> unsafeCreate(MaybeSource<T> onSubscribe) {
if (onSubscribe instanceof Maybe) {
throw new IllegalArgumentException("unsafeCreate(Maybe) should be upgraded");
}
ObjectHelper.requireNonNull(onSubscribe, "onSubscribe is null");
return RxJavaPlugins.onAssembly(new MaybeUnsafeCreate<T>(onSubscribe));
}
/**
* Constructs a Maybe that creates a dependent resource object which is disposed of when the
* upstream terminates or the downstream calls dispose().
* <p>
* <img width="640" height="400" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/using.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code using} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the element type of the generated MaybeSource
* @param <D> the type of the resource associated with the output sequence
* @param resourceSupplier
* the factory function to create a resource object that depends on the Maybe
* @param sourceSupplier
* the factory function to create a MaybeSource
* @param resourceDisposer
* the function that will dispose of the resource
* @return the Maybe whose lifetime controls the lifetime of the dependent resource object
* @see <a href="http://reactivex.io/documentation/operators/using.html">ReactiveX operators documentation: Using</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public static <T, D> Maybe<T> using(Callable<? extends D> resourceSupplier,
Function<? super D, ? extends MaybeSource<? extends T>> sourceSupplier,
Consumer<? super D> resourceDisposer) {
return using(resourceSupplier, sourceSupplier, resourceDisposer, true);
}
/**
* Constructs a Maybe that creates a dependent resource object which is disposed of just before
* termination if you have set {@code disposeEagerly} to {@code true} and a downstream dispose() does not occur
* before termination. Otherwise resource disposal will occur on call to dispose(). Eager disposal is
* particularly appropriate for a synchronous Maybe that reuses resources. {@code disposeAction} will
* only be called once per subscription.
* <p>
* <img width="640" height="400" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/using.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code using} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the element type of the generated MaybeSource
* @param <D> the type of the resource associated with the output sequence
* @param resourceSupplier
* the factory function to create a resource object that depends on the Maybe
* @param sourceSupplier
* the factory function to create a MaybeSource
* @param resourceDisposer
* the function that will dispose of the resource
* @param eager
* if {@code true} then disposal will happen either on a dispose() call or just before emission of
* a terminal event ({@code onComplete} or {@code onError}).
* @return the Maybe whose lifetime controls the lifetime of the dependent resource object
* @see <a href="http://reactivex.io/documentation/operators/using.html">ReactiveX operators documentation: Using</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T, D> Maybe<T> using(Callable<? extends D> resourceSupplier,
Function<? super D, ? extends MaybeSource<? extends T>> sourceSupplier,
Consumer<? super D> resourceDisposer, boolean eager) {
ObjectHelper.requireNonNull(resourceSupplier, "resourceSupplier is null");
ObjectHelper.requireNonNull(sourceSupplier, "sourceSupplier is null");
ObjectHelper.requireNonNull(resourceDisposer, "disposer is null");
return RxJavaPlugins.onAssembly(new MaybeUsing<T, D>(resourceSupplier, sourceSupplier, resourceDisposer, eager));
}
/**
* Wraps a MaybeSource instance into a new Maybe instance if not already a Maybe
* instance.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code wrap} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <T> the value type
* @param source the source to wrap
* @return the Maybe wrapper or the source cast to Maybe (if possible)
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T> Maybe<T> wrap(MaybeSource<T> source) {
if (source instanceof Maybe) {
return RxJavaPlugins.onAssembly((Maybe<T>)source);
}
ObjectHelper.requireNonNull(source, "onSubscribe is null");
return RxJavaPlugins.onAssembly(new MaybeUnsafeCreate<T>(source));
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* items emitted, in sequence, by an Iterable of other MaybeSources.
* <p>
* Note on method signature: since Java doesn't allow creating a generic array with {@code new T[]}, the
* implementation of this operator has to create an {@code Object[]} instead. Unfortunately, a
* {@code Function<Integer[], R>} passed to the method would trigger a {@code ClassCastException}.
*
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common value type
* @param <R> the zipped result type
* @param sources
* an Iterable of source MaybeSources
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting Maybe
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T, R> Maybe<R> zip(Iterable<? extends MaybeSource<? extends T>> sources, Function<? super Object[], ? extends R> zipper) {
ObjectHelper.requireNonNull(zipper, "zipper is null");
ObjectHelper.requireNonNull(sources, "sources is null");
return RxJavaPlugins.onAssembly(new MaybeZipIterable<T, R>(sources, zipper));
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* two items emitted, in sequence, by two other MaybeSources.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T1> the value type of the first source
* @param <T2> the value type of the second source
* @param <R> the zipped result type
* @param source1
* the first source MaybeSource
* @param source2
* a second source MaybeSource
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results
* in an item that will be emitted by the resulting Maybe
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T1, T2, R> Maybe<R> zip(
MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2,
BiFunction<? super T1, ? super T2, ? extends R> zipper) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
return zipArray(Functions.toFunction(zipper), source1, source2);
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* three items emitted, in sequence, by three other MaybeSources.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T1> the value type of the first source
* @param <T2> the value type of the second source
* @param <T3> the value type of the third source
* @param <R> the zipped result type
* @param source1
* the first source MaybeSource
* @param source2
* a second source MaybeSource
* @param source3
* a third source MaybeSource
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting Maybe
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T1, T2, T3, R> Maybe<R> zip(
MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3,
Function3<? super T1, ? super T2, ? super T3, ? extends R> zipper) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
return zipArray(Functions.toFunction(zipper), source1, source2, source3);
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* four items emitted, in sequence, by four other MaybeSources.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T1> the value type of the first source
* @param <T2> the value type of the second source
* @param <T3> the value type of the third source
* @param <T4> the value type of the fourth source
* @param <R> the zipped result type
* @param source1
* the first source MaybeSource
* @param source2
* a second source MaybeSource
* @param source3
* a third source MaybeSource
* @param source4
* a fourth source MaybeSource
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting Maybe
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T1, T2, T3, T4, R> Maybe<R> zip(
MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3,
MaybeSource<? extends T4> source4,
Function4<? super T1, ? super T2, ? super T3, ? super T4, ? extends R> zipper) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4);
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* five items emitted, in sequence, by five other MaybeSources.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T1> the value type of the first source
* @param <T2> the value type of the second source
* @param <T3> the value type of the third source
* @param <T4> the value type of the fourth source
* @param <T5> the value type of the fifth source
* @param <R> the zipped result type
* @param source1
* the first source MaybeSource
* @param source2
* a second source MaybeSource
* @param source3
* a third source MaybeSource
* @param source4
* a fourth source MaybeSource
* @param source5
* a fifth source MaybeSource
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting Maybe
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T1, T2, T3, T4, T5, R> Maybe<R> zip(
MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3,
MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5,
Function5<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? extends R> zipper) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
ObjectHelper.requireNonNull(source5, "source5 is null");
return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5);
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* six items emitted, in sequence, by six other MaybeSources.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T1> the value type of the first source
* @param <T2> the value type of the second source
* @param <T3> the value type of the third source
* @param <T4> the value type of the fourth source
* @param <T5> the value type of the fifth source
* @param <T6> the value type of the sixth source
* @param <R> the zipped result type
* @param source1
* the first source MaybeSource
* @param source2
* a second source MaybeSource
* @param source3
* a third source MaybeSource
* @param source4
* a fourth source MaybeSource
* @param source5
* a fifth source MaybeSource
* @param source6
* a sixth source MaybeSource
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting Maybe
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T1, T2, T3, T4, T5, T6, R> Maybe<R> zip(
MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3,
MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, MaybeSource<? extends T6> source6,
Function6<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? super T6, ? extends R> zipper) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
ObjectHelper.requireNonNull(source5, "source5 is null");
ObjectHelper.requireNonNull(source6, "source6 is null");
return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5, source6);
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* seven items emitted, in sequence, by seven other MaybeSources.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T1> the value type of the first source
* @param <T2> the value type of the second source
* @param <T3> the value type of the third source
* @param <T4> the value type of the fourth source
* @param <T5> the value type of the fifth source
* @param <T6> the value type of the sixth source
* @param <T7> the value type of the seventh source
* @param <R> the zipped result type
* @param source1
* the first source MaybeSource
* @param source2
* a second source MaybeSource
* @param source3
* a third source MaybeSource
* @param source4
* a fourth source MaybeSource
* @param source5
* a fifth source MaybeSource
* @param source6
* a sixth source MaybeSource
* @param source7
* a seventh source MaybeSource
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting Maybe
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T1, T2, T3, T4, T5, T6, T7, R> Maybe<R> zip(
MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3,
MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, MaybeSource<? extends T6> source6,
MaybeSource<? extends T7> source7,
Function7<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? super T6, ? super T7, ? extends R> zipper) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
ObjectHelper.requireNonNull(source5, "source5 is null");
ObjectHelper.requireNonNull(source6, "source6 is null");
ObjectHelper.requireNonNull(source7, "source7 is null");
return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5, source6, source7);
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* eight items emitted, in sequence, by eight other MaybeSources.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T1> the value type of the first source
* @param <T2> the value type of the second source
* @param <T3> the value type of the third source
* @param <T4> the value type of the fourth source
* @param <T5> the value type of the fifth source
* @param <T6> the value type of the sixth source
* @param <T7> the value type of the seventh source
* @param <T8> the value type of the eighth source
* @param <R> the zipped result type
* @param source1
* the first source MaybeSource
* @param source2
* a second source MaybeSource
* @param source3
* a third source MaybeSource
* @param source4
* a fourth source MaybeSource
* @param source5
* a fifth source MaybeSource
* @param source6
* a sixth source MaybeSource
* @param source7
* a seventh source MaybeSource
* @param source8
* an eighth source MaybeSource
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting Maybe
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T1, T2, T3, T4, T5, T6, T7, T8, R> Maybe<R> zip(
MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3,
MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, MaybeSource<? extends T6> source6,
MaybeSource<? extends T7> source7, MaybeSource<? extends T8> source8,
Function8<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? super T6, ? super T7, ? super T8, ? extends R> zipper) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
ObjectHelper.requireNonNull(source5, "source5 is null");
ObjectHelper.requireNonNull(source6, "source6 is null");
ObjectHelper.requireNonNull(source7, "source7 is null");
ObjectHelper.requireNonNull(source8, "source8 is null");
return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5, source6, source7, source8);
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* nine items emitted, in sequence, by nine other MaybeSources.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
*
* @param <T1> the value type of the first source
* @param <T2> the value type of the second source
* @param <T3> the value type of the third source
* @param <T4> the value type of the fourth source
* @param <T5> the value type of the fifth source
* @param <T6> the value type of the sixth source
* @param <T7> the value type of the seventh source
* @param <T8> the value type of the eighth source
* @param <T9> the value type of the ninth source
* @param <R> the zipped result type
* @param source1
* the first source MaybeSource
* @param source2
* a second source MaybeSource
* @param source3
* a third source MaybeSource
* @param source4
* a fourth source MaybeSource
* @param source5
* a fifth source MaybeSource
* @param source6
* a sixth source MaybeSource
* @param source7
* a seventh source MaybeSource
* @param source8
* an eighth source MaybeSource
* @param source9
* a ninth source MaybeSource
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting MaybeSource
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, R> Maybe<R> zip(
MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3,
MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, MaybeSource<? extends T6> source6,
MaybeSource<? extends T7> source7, MaybeSource<? extends T8> source8, MaybeSource<? extends T9> source9,
Function9<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? super T6, ? super T7, ? super T8, ? super T9, ? extends R> zipper) {
ObjectHelper.requireNonNull(source1, "source1 is null");
ObjectHelper.requireNonNull(source2, "source2 is null");
ObjectHelper.requireNonNull(source3, "source3 is null");
ObjectHelper.requireNonNull(source4, "source4 is null");
ObjectHelper.requireNonNull(source5, "source5 is null");
ObjectHelper.requireNonNull(source6, "source6 is null");
ObjectHelper.requireNonNull(source7, "source7 is null");
ObjectHelper.requireNonNull(source8, "source8 is null");
ObjectHelper.requireNonNull(source9, "source9 is null");
return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5, source6, source7, source8, source9);
}
/**
* Returns a Maybe that emits the results of a specified combiner function applied to combinations of
* items emitted, in sequence, by an array of other MaybeSources.
* <p>
* Note on method signature: since Java doesn't allow creating a generic array with {@code new T[]}, the
* implementation of this operator has to create an {@code Object[]} instead. Unfortunately, a
* {@code Function<Integer[], R>} passed to the method would trigger a {@code ClassCastException}.
*
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
* <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This
* also means it is possible some sources may not get subscribed to at all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zipArray} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <T> the common element type
* @param <R> the result type
* @param sources
* an array of source MaybeSources
* @param zipper
* a function that, when applied to an item emitted by each of the source MaybeSources, results in
* an item that will be emitted by the resulting MaybeSource
* @return a Maybe that emits the zipped results
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public static <T, R> Maybe<R> zipArray(Function<? super Object[], ? extends R> zipper,
MaybeSource<? extends T>... sources) {
ObjectHelper.requireNonNull(sources, "sources is null");
if (sources.length == 0) {
return empty();
}
ObjectHelper.requireNonNull(zipper, "zipper is null");
return RxJavaPlugins.onAssembly(new MaybeZipArray<T, R>(sources, zipper));
}
// ------------------------------------------------------------------
// Instance methods
// ------------------------------------------------------------------
/**
* Mirrors the MaybeSource (current or provided) that first signals an event.
* <p>
* <img width="640" height="385" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/amb.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code ambWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* a MaybeSource competing to react first. A subscription to this provided source will occur after
* subscribing to the current source.
* @return a Maybe that emits the same sequence as whichever of the source MaybeSources first
* signalled
* @see <a href="http://reactivex.io/documentation/operators/amb.html">ReactiveX operators documentation: Amb</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> ambWith(MaybeSource<? extends T> other) {
ObjectHelper.requireNonNull(other, "other is null");
return ambArray(this, other);
}
/**
* Calls the specified converter function during assembly time and returns its resulting value.
* <p>
* This allows fluent conversion to any other type.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code as} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>History: 2.1.7 - experimental
* @param <R> the resulting object type
* @param converter the function that receives the current Maybe instance and returns a value
* @return the converted value
* @throws NullPointerException if converter is null
* @since 2.2
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> R as(@NonNull MaybeConverter<T, ? extends R> converter) {
return ObjectHelper.requireNonNull(converter, "converter is null").apply(this);
}
/**
* Waits in a blocking fashion until the current Maybe signals a success value (which is returned),
* null if completed or an exception (which is propagated).
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code blockingGet} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If the source signals an error, the operator wraps a checked {@link Exception}
* into {@link RuntimeException} and throws that. Otherwise, {@code RuntimeException}s and
* {@link Error}s are rethrown as they are.</dd>
* </dl>
* @return the success value
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final T blockingGet() {
BlockingMultiObserver<T> observer = new BlockingMultiObserver<T>();
subscribe(observer);
return observer.blockingGet();
}
/**
* Waits in a blocking fashion until the current Maybe signals a success value (which is returned),
* defaultValue if completed or an exception (which is propagated).
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code blockingGet} does not operate by default on a particular {@link Scheduler}.</dd>
* <dt><b>Error handling:</b></dt>
* <dd>If the source signals an error, the operator wraps a checked {@link Exception}
* into {@link RuntimeException} and throws that. Otherwise, {@code RuntimeException}s and
* {@link Error}s are rethrown as they are.</dd>
* </dl>
* @param defaultValue the default item to return if this Maybe is empty
* @return the success value
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final T blockingGet(T defaultValue) {
ObjectHelper.requireNonNull(defaultValue, "defaultValue is null");
BlockingMultiObserver<T> observer = new BlockingMultiObserver<T>();
subscribe(observer);
return observer.blockingGet(defaultValue);
}
/**
* Returns a Maybe that subscribes to this Maybe lazily, caches its event
* and replays it, to all the downstream subscribers.
* <p>
* <img width="640" height="410" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/cache.png" alt="">
* <p>
* The operator subscribes only when the first downstream subscriber subscribes and maintains
* a single subscription towards this Maybe.
* <p>
* <em>Note:</em> You sacrifice the ability to dispose the origin when you use the {@code cache}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code cache} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return a Maybe that, when first subscribed to, caches all of its items and notifications for the
* benefit of subsequent subscribers
* @see <a href="http://reactivex.io/documentation/operators/replay.html">ReactiveX operators documentation: Replay</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> cache() {
return RxJavaPlugins.onAssembly(new MaybeCache<T>(this));
}
/**
* Casts the success value of the current Maybe into the target type or signals a
* ClassCastException if not compatible.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code cast} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <U> the target type
* @param clazz the type token to use for casting the success result from the current Maybe
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<U> cast(final Class<? extends U> clazz) {
ObjectHelper.requireNonNull(clazz, "clazz is null");
return map(Functions.castFunction(clazz));
}
/**
* Transform a Maybe by applying a particular Transformer function to it.
* <p>
* This method operates on the Maybe itself whereas {@link #lift} operates on the Maybe's MaybeObservers.
* <p>
* If the operator you are creating is designed to act on the individual item emitted by a Maybe, use
* {@link #lift}. If your operator is designed to transform the source Maybe as a whole (for instance, by
* applying a particular set of existing RxJava operators to it) use {@code compose}.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code compose} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <R> the value type of the Maybe returned by the transformer function
* @param transformer the transformer function, not null
* @return a Maybe, transformed by the transformer function
* @see <a href="https://github.com/ReactiveX/RxJava/wiki/Implementing-Your-Own-Operators">RxJava wiki: Implementing Your Own Operators</a>
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Maybe<R> compose(MaybeTransformer<? super T, ? extends R> transformer) {
return wrap(((MaybeTransformer<T, R>) ObjectHelper.requireNonNull(transformer, "transformer is null")).apply(this));
}
/**
* Returns a Maybe that is based on applying a specified function to the item emitted by the source Maybe,
* where that function returns a MaybeSource.
* <p>
* <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMap.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concatMap} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>Note that flatMap and concatMap for Maybe is the same operation.
* @param <R> the result value type
* @param mapper
* a function that, when applied to the item emitted by the source Maybe, returns a MaybeSource
* @return the Maybe returned from {@code func} when applied to the item emitted by the source Maybe
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Maybe<R> concatMap(Function<? super T, ? extends MaybeSource<? extends R>> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatten<T, R>(this, mapper));
}
/**
* Returns a Flowable that emits the items emitted from the current MaybeSource, then the next, one after
* the other, without interleaving them.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/concat.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code concatWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* a MaybeSource to be concatenated after the current
* @return a Flowable that emits items emitted by the two source MaybeSources, one after the other,
* without interleaving them
* @see <a href="http://reactivex.io/documentation/operators/concat.html">ReactiveX operators documentation: Concat</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Flowable<T> concatWith(MaybeSource<? extends T> other) {
ObjectHelper.requireNonNull(other, "other is null");
return concat(this, other);
}
/**
* Returns a Single that emits a Boolean that indicates whether the source Maybe emitted a
* specified item.
* <p>
* <img width="640" height="320" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/contains.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code contains} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param item
* the item to search for in the emissions from the source Maybe, not null
* @return a Single that emits {@code true} if the specified item is emitted by the source Maybe,
* or {@code false} if the source Maybe completes without emitting that item
* @see <a href="http://reactivex.io/documentation/operators/contains.html">ReactiveX operators documentation: Contains</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<Boolean> contains(final Object item) {
ObjectHelper.requireNonNull(item, "item is null");
return RxJavaPlugins.onAssembly(new MaybeContains<T>(this, item));
}
/**
* Returns a Single that counts the total number of items emitted (0 or 1) by the source Maybe and emits
* this count as a 64-bit Long.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/longCount.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code count} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return a Single that emits a single item: the number of items emitted by the source Maybe as a
* 64-bit Long item
* @see <a href="http://reactivex.io/documentation/operators/count.html">ReactiveX operators documentation: Count</a>
* @see #count()
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<Long> count() {
return RxJavaPlugins.onAssembly(new MaybeCount<T>(this));
}
/**
* Returns a Maybe that emits the item emitted by the source Maybe or a specified default item
* if the source Maybe is empty.
* <p>
* Note that the result Maybe is semantically equivalent to a {@code Single}, since it's guaranteed
* to emit exactly one item or an error. See {@link #toSingle(Object)} for a method with equivalent
* behavior which returns a {@code Single}.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/defaultIfEmpty.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code defaultIfEmpty} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param defaultItem
* the item to emit if the source Maybe emits no items
* @return a Maybe that emits either the specified default item if the source Maybe emits no
* items, or the items emitted by the source Maybe
* @see <a href="http://reactivex.io/documentation/operators/defaultifempty.html">ReactiveX operators documentation: DefaultIfEmpty</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> defaultIfEmpty(T defaultItem) {
ObjectHelper.requireNonNull(defaultItem, "defaultItem is null");
return switchIfEmpty(just(defaultItem));
}
/**
* Returns a Maybe that signals the events emitted by the source Maybe shifted forward in time by a
* specified delay.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delay.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>This version of {@code delay} operates by default on the {@code computation} {@link Scheduler}.</dd>
* </dl>
*
* @param delay
* the delay to shift the source by
* @param unit
* the {@link TimeUnit} in which {@code period} is defined
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Maybe<T> delay(long delay, TimeUnit unit) {
return delay(delay, unit, Schedulers.computation());
}
/**
* Returns a Maybe that signals the events emitted by the source Maybe shifted forward in time by a
* specified delay running on the specified Scheduler.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delay.s.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>you specify which {@link Scheduler} this operator will use.</dd>
* </dl>
*
* @param delay
* the delay to shift the source by
* @param unit
* the time unit of {@code delay}
* @param scheduler
* the {@link Scheduler} to use for delaying
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Maybe<T> delay(long delay, TimeUnit unit, Scheduler scheduler) {
ObjectHelper.requireNonNull(unit, "unit is null");
ObjectHelper.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new MaybeDelay<T>(this, Math.max(0L, delay), unit, scheduler));
}
/**
* Delays the emission of this Maybe until the given Publisher signals an item or completes.
* <p>
* <img width="640" height="450" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delay.oo.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The {@code delayIndicator} is consumed in an unbounded manner but is cancelled after
* the first item it produces.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>This version of {@code delay} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <U>
* the subscription delay value type (ignored)
* @param <V>
* the item delay value type (ignored)
* @param delayIndicator
* the Publisher that gets subscribed to when this Maybe signals an event and that
* signal is emitted when the Publisher signals an item or completes
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
@BackpressureSupport(BackpressureKind.UNBOUNDED_IN)
public final <U, V> Maybe<T> delay(Publisher<U> delayIndicator) {
ObjectHelper.requireNonNull(delayIndicator, "delayIndicator is null");
return RxJavaPlugins.onAssembly(new MaybeDelayOtherPublisher<T, U>(this, delayIndicator));
}
/**
* Returns a Maybe that delays the subscription to this Maybe
* until the other Publisher emits an element or completes normally.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The {@code Publisher} source is consumed in an unbounded fashion (without applying backpressure).</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>This method does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <U> the value type of the other Publisher, irrelevant
* @param subscriptionIndicator the other Publisher that should trigger the subscription
* to this Publisher.
* @return a Maybe that delays the subscription to this Maybe
* until the other Publisher emits an element or completes normally.
*/
@BackpressureSupport(BackpressureKind.UNBOUNDED_IN)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<T> delaySubscription(Publisher<U> subscriptionIndicator) {
ObjectHelper.requireNonNull(subscriptionIndicator, "subscriptionIndicator is null");
return RxJavaPlugins.onAssembly(new MaybeDelaySubscriptionOtherPublisher<T, U>(this, subscriptionIndicator));
}
/**
* Returns a Maybe that delays the subscription to the source Maybe by a given amount of time.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delaySubscription.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>This version of {@code delaySubscription} operates by default on the {@code computation} {@link Scheduler}.</dd>
* </dl>
*
* @param delay
* the time to delay the subscription
* @param unit
* the time unit of {@code delay}
* @return a Maybe that delays the subscription to the source Maybe by the given amount
* @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Maybe<T> delaySubscription(long delay, TimeUnit unit) {
return delaySubscription(delay, unit, Schedulers.computation());
}
/**
* Returns a Maybe that delays the subscription to the source Maybe by a given amount of time,
* both waiting and subscribing on a given Scheduler.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delaySubscription.s.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>You specify which {@link Scheduler} this operator will use.</dd>
* </dl>
*
* @param delay
* the time to delay the subscription
* @param unit
* the time unit of {@code delay}
* @param scheduler
* the Scheduler on which the waiting and subscription will happen
* @return a Maybe that delays the subscription to the source Maybe by a given
* amount, waiting and subscribing on the given Scheduler
* @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Maybe<T> delaySubscription(long delay, TimeUnit unit, Scheduler scheduler) {
return delaySubscription(Flowable.timer(delay, unit, scheduler));
}
/**
* Calls the specified consumer with the success item after this item has been emitted to the downstream.
* <p>Note that the {@code onAfterNext} action is shared between subscriptions and as such
* should be thread-safe.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doAfterSuccess} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>History: 2.0.1 - experimental
* @param onAfterSuccess the Consumer that will be called after emitting an item from upstream to the downstream
* @return the new Maybe instance
* @since 2.1
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doAfterSuccess(Consumer<? super T> onAfterSuccess) {
ObjectHelper.requireNonNull(onAfterSuccess, "onAfterSuccess is null");
return RxJavaPlugins.onAssembly(new MaybeDoAfterSuccess<T>(this, onAfterSuccess));
}
/**
* Registers an {@link Action} to be called when this Maybe invokes either
* {@link MaybeObserver#onComplete onSuccess},
* {@link MaybeObserver#onComplete onComplete} or {@link MaybeObserver#onError onError}.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/finallyDo.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doAfterTerminate} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param onAfterTerminate
* an {@link Action} to be invoked when the source Maybe finishes
* @return a Maybe that emits the same items as the source Maybe, then invokes the
* {@link Action}
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX operators documentation: Do</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doAfterTerminate(Action onAfterTerminate) {
return RxJavaPlugins.onAssembly(new MaybePeek<T>(this,
Functions.emptyConsumer(), // onSubscribe
Functions.emptyConsumer(), // onSuccess
Functions.emptyConsumer(), // onError
Functions.EMPTY_ACTION, // onComplete
ObjectHelper.requireNonNull(onAfterTerminate, "onAfterTerminate is null"),
Functions.EMPTY_ACTION // dispose
));
}
/**
* Calls the specified action after this Maybe signals onSuccess, onError or onComplete or gets disposed by
* the downstream.
* <p>In case of a race between a terminal event and a dispose call, the provided {@code onFinally} action
* is executed once per subscription.
* <p>Note that the {@code onFinally} action is shared between subscriptions and as such
* should be thread-safe.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doFinally} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>History: 2.0.1 - experimental
* @param onFinally the action called when this Maybe terminates or gets disposed
* @return the new Maybe instance
* @since 2.1
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doFinally(Action onFinally) {
ObjectHelper.requireNonNull(onFinally, "onFinally is null");
return RxJavaPlugins.onAssembly(new MaybeDoFinally<T>(this, onFinally));
}
/**
* Calls the shared {@code Action} if a MaybeObserver subscribed to the current Maybe
* disposes the common Disposable it received via onSubscribe.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doOnDispose} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param onDispose the action called when the subscription is disposed
* @throws NullPointerException if onDispose is null
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doOnDispose(Action onDispose) {
return RxJavaPlugins.onAssembly(new MaybePeek<T>(this,
Functions.emptyConsumer(), // onSubscribe
Functions.emptyConsumer(), // onSuccess
Functions.emptyConsumer(), // onError
Functions.EMPTY_ACTION, // onComplete
Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete) after
ObjectHelper.requireNonNull(onDispose, "onDispose is null")
));
}
/**
* Modifies the source Maybe so that it invokes an action when it calls {@code onComplete}.
* <p>
* <img width="640" height="358" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/doOnComplete.m.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doOnComplete} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param onComplete
* the action to invoke when the source Maybe calls {@code onComplete}
* @return the new Maybe with the side-effecting behavior applied
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX operators documentation: Do</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doOnComplete(Action onComplete) {
return RxJavaPlugins.onAssembly(new MaybePeek<T>(this,
Functions.emptyConsumer(), // onSubscribe
Functions.emptyConsumer(), // onSuccess
Functions.emptyConsumer(), // onError
ObjectHelper.requireNonNull(onComplete, "onComplete is null"),
Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete)
Functions.EMPTY_ACTION // dispose
));
}
/**
* Calls the shared consumer with the error sent via onError for each
* MaybeObserver that subscribes to the current Maybe.
* <p>
* <img width="640" height="358" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/doOnError.m.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doOnError} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param onError the consumer called with the success value of onError
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doOnError(Consumer<? super Throwable> onError) {
return RxJavaPlugins.onAssembly(new MaybePeek<T>(this,
Functions.emptyConsumer(), // onSubscribe
Functions.emptyConsumer(), // onSuccess
ObjectHelper.requireNonNull(onError, "onError is null"),
Functions.EMPTY_ACTION, // onComplete
Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete)
Functions.EMPTY_ACTION // dispose
));
}
/**
* Calls the given onEvent callback with the (success value, null) for an onSuccess, (null, throwable) for
* an onError or (null, null) for an onComplete signal from this Maybe before delivering said
* signal to the downstream.
* <p>
* Exceptions thrown from the callback will override the event so the downstream receives the
* error instead of the original signal.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doOnEvent} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param onEvent the callback to call with the terminal event tuple
* @return the new Maybe instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doOnEvent(BiConsumer<? super T, ? super Throwable> onEvent) {
ObjectHelper.requireNonNull(onEvent, "onEvent is null");
return RxJavaPlugins.onAssembly(new MaybeDoOnEvent<T>(this, onEvent));
}
/**
* Calls the shared consumer with the Disposable sent through the onSubscribe for each
* MaybeObserver that subscribes to the current Maybe.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doOnSubscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param onSubscribe the consumer called with the Disposable sent via onSubscribe
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doOnSubscribe(Consumer<? super Disposable> onSubscribe) {
return RxJavaPlugins.onAssembly(new MaybePeek<T>(this,
ObjectHelper.requireNonNull(onSubscribe, "onSubscribe is null"),
Functions.emptyConsumer(), // onSuccess
Functions.emptyConsumer(), // onError
Functions.EMPTY_ACTION, // onComplete
Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete)
Functions.EMPTY_ACTION // dispose
));
}
/**
* Returns a Maybe instance that calls the given onTerminate callback
* just before this Maybe completes normally or with an exception.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/doOnTerminate.png" alt="">
* <p>
* This differs from {@code doAfterTerminate} in that this happens <em>before</em> the {@code onComplete} or
* {@code onError} notification.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doOnTerminate} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param onTerminate the action to invoke when the consumer calls {@code onComplete} or {@code onError}
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX operators documentation: Do</a>
* @see #doOnTerminate(Action)
* @since 2.2.7 - experimental
*/
@Experimental
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doOnTerminate(final Action onTerminate) {
ObjectHelper.requireNonNull(onTerminate, "onTerminate is null");
return RxJavaPlugins.onAssembly(new MaybeDoOnTerminate<T>(this, onTerminate));
}
/**
* Calls the shared consumer with the success value sent via onSuccess for each
* MaybeObserver that subscribes to the current Maybe.
* <p>
* <img width="640" height="358" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/doOnSuccess.m.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code doOnSuccess} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param onSuccess the consumer called with the success value of onSuccess
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> doOnSuccess(Consumer<? super T> onSuccess) {
return RxJavaPlugins.onAssembly(new MaybePeek<T>(this,
Functions.emptyConsumer(), // onSubscribe
ObjectHelper.requireNonNull(onSuccess, "onSuccess is null"),
Functions.emptyConsumer(), // onError
Functions.EMPTY_ACTION, // onComplete
Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete)
Functions.EMPTY_ACTION // dispose
));
}
/**
* Filters the success item of the Maybe via a predicate function and emitting it if the predicate
* returns true, completing otherwise.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/filter.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code filter} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param predicate
* a function that evaluates the item emitted by the source Maybe, returning {@code true}
* if it passes the filter
* @return a Maybe that emit the item emitted by the source Maybe that the filter
* evaluates as {@code true}
* @see <a href="http://reactivex.io/documentation/operators/filter.html">ReactiveX operators documentation: Filter</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> filter(Predicate<? super T> predicate) {
ObjectHelper.requireNonNull(predicate, "predicate is null");
return RxJavaPlugins.onAssembly(new MaybeFilter<T>(this, predicate));
}
/**
* Returns a Maybe that is based on applying a specified function to the item emitted by the source Maybe,
* where that function returns a MaybeSource.
* <p>
* <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMap.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flatMap} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>Note that flatMap and concatMap for Maybe is the same operation.
*
* @param <R> the result value type
* @param mapper
* a function that, when applied to the item emitted by the source Maybe, returns a MaybeSource
* @return the Maybe returned from {@code func} when applied to the item emitted by the source Maybe
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Maybe<R> flatMap(Function<? super T, ? extends MaybeSource<? extends R>> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatten<T, R>(this, mapper));
}
/**
* Maps the onSuccess, onError or onComplete signals of this Maybe into MaybeSource and emits that
* MaybeSource's signals.
* <p>
* <img width="640" height="354" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMap.mmm.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flatMap} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <R>
* the result type
* @param onSuccessMapper
* a function that returns a MaybeSource to merge for the onSuccess item emitted by this Maybe
* @param onErrorMapper
* a function that returns a MaybeSource to merge for an onError notification from this Maybe
* @param onCompleteSupplier
* a function that returns a MaybeSource to merge for an onComplete notification this Maybe
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Maybe<R> flatMap(
Function<? super T, ? extends MaybeSource<? extends R>> onSuccessMapper,
Function<? super Throwable, ? extends MaybeSource<? extends R>> onErrorMapper,
Callable<? extends MaybeSource<? extends R>> onCompleteSupplier) {
ObjectHelper.requireNonNull(onSuccessMapper, "onSuccessMapper is null");
ObjectHelper.requireNonNull(onErrorMapper, "onErrorMapper is null");
ObjectHelper.requireNonNull(onCompleteSupplier, "onCompleteSupplier is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapNotification<T, R>(this, onSuccessMapper, onErrorMapper, onCompleteSupplier));
}
/**
* Returns a Maybe that emits the results of a specified function to the pair of values emitted by the
* source Maybe and a specified mapped MaybeSource.
* <p>
* <img width="640" height="390" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeMap.r.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flatMap} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <U>
* the type of items emitted by the MaybeSource returned by the {@code mapper} function
* @param <R>
* the type of items emitted by the resulting Maybe
* @param mapper
* a function that returns a MaybeSource for the item emitted by the source Maybe
* @param resultSelector
* a function that combines one item emitted by each of the source and collection MaybeSource and
* returns an item to be emitted by the resulting MaybeSource
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U, R> Maybe<R> flatMap(Function<? super T, ? extends MaybeSource<? extends U>> mapper,
BiFunction<? super T, ? super U, ? extends R> resultSelector) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
ObjectHelper.requireNonNull(resultSelector, "resultSelector is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapBiSelector<T, U, R>(this, mapper, resultSelector));
}
/**
* Maps the success value of the upstream {@link Maybe} into an {@link Iterable} and emits its items as a
* {@link Flowable} sequence.
* <p>
* <img width="640" height="373" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flattenAsFlowable.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flattenAsFlowable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <U>
* the type of item emitted by the resulting Iterable
* @param mapper
* a function that returns an Iterable sequence of values for when given an item emitted by the
* source Maybe
* @return the new Flowable instance
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Flowable<U> flattenAsFlowable(final Function<? super T, ? extends Iterable<? extends U>> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapIterableFlowable<T, U>(this, mapper));
}
/**
* Maps the success value of the upstream {@link Maybe} into an {@link Iterable} and emits its items as an
* {@link Observable} sequence.
* <p>
* <img width="640" height="373" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flattenAsObservable.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flattenAsObservable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <U>
* the type of item emitted by the resulting Iterable
* @param mapper
* a function that returns an Iterable sequence of values for when given an item emitted by the
* source Maybe
* @return the new Observable instance
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Observable<U> flattenAsObservable(final Function<? super T, ? extends Iterable<? extends U>> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapIterableObservable<T, U>(this, mapper));
}
/**
* Returns an Observable that is based on applying a specified function to the item emitted by the source Maybe,
* where that function returns an ObservableSource.
* <p>
* <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMap.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flatMapObservable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <R> the result value type
* @param mapper
* a function that, when applied to the item emitted by the source Maybe, returns an ObservableSource
* @return the Observable returned from {@code func} when applied to the item emitted by the source Maybe
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Observable<R> flatMapObservable(Function<? super T, ? extends ObservableSource<? extends R>> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapObservable<T, R>(this, mapper));
}
/**
* Returns a Flowable that emits items based on applying a specified function to the item emitted by the
* source Maybe, where that function returns a Publisher.
* <p>
* <img width="640" height="260" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapPublisher.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned Flowable honors the downstream backpressure.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flatMapPublisher} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <R> the result value type
* @param mapper
* a function that, when applied to the item emitted by the source Maybe, returns a
* Flowable
* @return the Flowable returned from {@code func} when applied to the item emitted by the source Maybe
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Flowable<R> flatMapPublisher(Function<? super T, ? extends Publisher<? extends R>> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapPublisher<T, R>(this, mapper));
}
/**
* Returns a {@link Single} based on applying a specified function to the item emitted by the
* source {@link Maybe}, where that function returns a {@link Single}.
* When this Maybe completes a {@link NoSuchElementException} will be thrown.
* <p>
* <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapSingle.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flatMapSingle} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <R> the result value type
* @param mapper
* a function that, when applied to the item emitted by the source Maybe, returns a
* Single
* @return the Single returned from {@code mapper} when applied to the item emitted by the source Maybe
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Single<R> flatMapSingle(final Function<? super T, ? extends SingleSource<? extends R>> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapSingle<T, R>(this, mapper));
}
/**
* Returns a {@link Maybe} based on applying a specified function to the item emitted by the
* source {@link Maybe}, where that function returns a {@link Single}.
* When this Maybe just completes the resulting {@code Maybe} completes as well.
* <p>
* <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapSingle.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flatMapSingleElement} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* <p>History: 2.0.2 - experimental
* @param <R> the result value type
* @param mapper
* a function that, when applied to the item emitted by the source Maybe, returns a
* Single
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
* @since 2.1
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Maybe<R> flatMapSingleElement(final Function<? super T, ? extends SingleSource<? extends R>> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapSingleElement<T, R>(this, mapper));
}
/**
* Returns a {@link Completable} that completes based on applying a specified function to the item emitted by the
* source {@link Maybe}, where that function returns a {@link Completable}.
* <p>
* <img width="640" height="267" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapCompletable.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code flatMapCompletable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param mapper
* a function that, when applied to the item emitted by the source Maybe, returns a
* Completable
* @return the Completable returned from {@code mapper} when applied to the item emitted by the source Maybe
* @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Completable flatMapCompletable(final Function<? super T, ? extends CompletableSource> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeFlatMapCompletable<T>(this, mapper));
}
/**
* Hides the identity of this Maybe and its Disposable.
* <p>
* <img width="640" height="300" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.hide.png" alt="">
* <p>Allows preventing certain identity-based
* optimizations (fusion).
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code hide} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new Maybe instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> hide() {
return RxJavaPlugins.onAssembly(new MaybeHide<T>(this));
}
/**
* Ignores the item emitted by the source Maybe and only calls {@code onComplete} or {@code onError}.
* <p>
* <img width="640" height="389" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.ignoreElement.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code ignoreElement} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return an empty Completable that only calls {@code onComplete} or {@code onError}, based on which one is
* called by the source Maybe
* @see <a href="http://reactivex.io/documentation/operators/ignoreelements.html">ReactiveX operators documentation: IgnoreElements</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Completable ignoreElement() {
return RxJavaPlugins.onAssembly(new MaybeIgnoreElementCompletable<T>(this));
}
/**
* Returns a Single that emits {@code true} if the source Maybe is empty, otherwise {@code false}.
* <p>
* <img width="640" height="320" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/isEmpty.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code isEmpty} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return a Single that emits a Boolean
* @see <a href="http://reactivex.io/documentation/operators/contains.html">ReactiveX operators documentation: Contains</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<Boolean> isEmpty() {
return RxJavaPlugins.onAssembly(new MaybeIsEmptySingle<T>(this));
}
/**
* <strong>This method requires advanced knowledge about building operators, please consider
* other standard composition methods first;</strong>
* Returns a {@code Maybe} which, when subscribed to, invokes the {@link MaybeOperator#apply(MaybeObserver) apply(MaybeObserver)} method
* of the provided {@link MaybeOperator} for each individual downstream {@link Maybe} and allows the
* insertion of a custom operator by accessing the downstream's {@link MaybeObserver} during this subscription phase
* and providing a new {@code MaybeObserver}, containing the custom operator's intended business logic, that will be
* used in the subscription process going further upstream.
* <p>
* Generally, such a new {@code MaybeObserver} will wrap the downstream's {@code MaybeObserver} and forwards the
* {@code onSuccess}, {@code onError} and {@code onComplete} events from the upstream directly or according to the
* emission pattern the custom operator's business logic requires. In addition, such operator can intercept the
* flow control calls of {@code dispose} and {@code isDisposed} that would have traveled upstream and perform
* additional actions depending on the same business logic requirements.
* <p>
* Example:
* <pre><code>
* // Step 1: Create the consumer type that will be returned by the MaybeOperator.apply():
*
* public final class CustomMaybeObserver<T> implements MaybeObserver<T>, Disposable {
*
* // The downstream's MaybeObserver that will receive the onXXX events
* final MaybeObserver<? super String> downstream;
*
* // The connection to the upstream source that will call this class' onXXX methods
* Disposable upstream;
*
* // The constructor takes the downstream subscriber and usually any other parameters
* public CustomMaybeObserver(MaybeObserver<? super String> downstream) {
* this.downstream = downstream;
* }
*
* // In the subscription phase, the upstream sends a Disposable to this class
* // and subsequently this class has to send a Disposable to the downstream.
* // Note that relaying the upstream's Disposable directly is not allowed in RxJava
* @Override
* public void onSubscribe(Disposable d) {
* if (upstream != null) {
* d.dispose();
* } else {
* upstream = d;
* downstream.onSubscribe(this);
* }
* }
*
* // The upstream calls this with the next item and the implementation's
* // responsibility is to emit an item to the downstream based on the intended
* // business logic, or if it can't do so for the particular item,
* // request more from the upstream
* @Override
* public void onSuccess(T item) {
* String str = item.toString();
* if (str.length() < 2) {
* downstream.onSuccess(str);
* } else {
* // Maybe is usually expected to produce one of the onXXX events
* downstream.onComplete();
* }
* }
*
* // Some operators may handle the upstream's error while others
* // could just forward it to the downstream.
* @Override
* public void onError(Throwable throwable) {
* downstream.onError(throwable);
* }
*
* // When the upstream completes, usually the downstream should complete as well.
* @Override
* public void onComplete() {
* downstream.onComplete();
* }
*
* // Some operators may use their own resources which should be cleaned up if
* // the downstream disposes the flow before it completed. Operators without
* // resources can simply forward the dispose to the upstream.
* // In some cases, a disposed flag may be set by this method so that other parts
* // of this class may detect the dispose and stop sending events
* // to the downstream.
* @Override
* public void dispose() {
* upstream.dispose();
* }
*
* // Some operators may simply forward the call to the upstream while others
* // can return the disposed flag set in dispose().
* @Override
* public boolean isDisposed() {
* return upstream.isDisposed();
* }
* }
*
* // Step 2: Create a class that implements the MaybeOperator interface and
* // returns the custom consumer type from above in its apply() method.
* // Such class may define additional parameters to be submitted to
* // the custom consumer type.
*
* final class CustomMaybeOperator<T> implements MaybeOperator<String> {
* @Override
* public MaybeObserver<? super String> apply(MaybeObserver<? super T> upstream) {
* return new CustomMaybeObserver<T>(upstream);
* }
* }
*
* // Step 3: Apply the custom operator via lift() in a flow by creating an instance of it
* // or reusing an existing one.
*
* Maybe.just(5)
* .lift(new CustomMaybeOperator<Integer>())
* .test()
* .assertResult("5");
*
* Maybe.just(15)
* .lift(new CustomMaybeOperator<Integer>())
* .test()
* .assertResult();
* </code></pre>
* <p>
* Creating custom operators can be complicated and it is recommended one consults the
* <a href="https://github.com/ReactiveX/RxJava/wiki/Writing-operators-for-2.0">RxJava wiki: Writing operators</a> page about
* the tools, requirements, rules, considerations and pitfalls of implementing them.
* <p>
* Note that implementing custom operators via this {@code lift()} method adds slightly more overhead by requiring
* an additional allocation and indirection per assembled flows. Instead, extending the abstract {@code Maybe}
* class and creating a {@link MaybeTransformer} with it is recommended.
* <p>
* Note also that it is not possible to stop the subscription phase in {@code lift()} as the {@code apply()} method
* requires a non-null {@code MaybeObserver} instance to be returned, which is then unconditionally subscribed to
* the upstream {@code Maybe}. For example, if the operator decided there is no reason to subscribe to the
* upstream source because of some optimization possibility or a failure to prepare the operator, it still has to
* return a {@code MaybeObserver} that should immediately dispose the upstream's {@code Disposable} in its
* {@code onSubscribe} method. Again, using a {@code MaybeTransformer} and extending the {@code Maybe} is
* a better option as {@link #subscribeActual} can decide to not subscribe to its upstream after all.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code lift} does not operate by default on a particular {@link Scheduler}, however, the
* {@link MaybeOperator} may use a {@code Scheduler} to support its own asynchronous behavior.</dd>
* </dl>
*
* @param <R> the output value type
* @param lift the {@link MaybeOperator} that receives the downstream's {@code MaybeObserver} and should return
* a {@code MaybeObserver} with custom behavior to be used as the consumer for the current
* {@code Maybe}.
* @return the new Maybe instance
* @see <a href="https://github.com/ReactiveX/RxJava/wiki/Writing-operators-for-2.0">RxJava wiki: Writing operators</a>
* @see #compose(MaybeTransformer)
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Maybe<R> lift(final MaybeOperator<? extends R, ? super T> lift) {
ObjectHelper.requireNonNull(lift, "lift is null");
return RxJavaPlugins.onAssembly(new MaybeLift<T, R>(this, lift));
}
/**
* Returns a Maybe that applies a specified function to the item emitted by the source Maybe and
* emits the result of this function application.
* <p>
* <img width="640" height="515" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.map.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code map} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <R> the result value type
* @param mapper
* a function to apply to the item emitted by the Maybe
* @return a Maybe that emits the item from the source Maybe, transformed by the specified function
* @see <a href="http://reactivex.io/documentation/operators/map.html">ReactiveX operators documentation: Map</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> Maybe<R> map(Function<? super T, ? extends R> mapper) {
ObjectHelper.requireNonNull(mapper, "mapper is null");
return RxJavaPlugins.onAssembly(new MaybeMap<T, R>(this, mapper));
}
/**
* Maps the signal types of this Maybe into a {@link Notification} of the same kind
* and emits it as a single success value to downstream.
* <p>
* <img width="640" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/materialize.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code materialize} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new Single instance
* @since 2.2.4 - experimental
* @see Single#dematerialize(Function)
*/
@Experimental
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<Notification<T>> materialize() {
return RxJavaPlugins.onAssembly(new MaybeMaterialize<T>(this));
}
/**
* Flattens this and another Maybe into a single Flowable, without any transformation.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/merge.png" alt="">
* <p>
* You can combine items emitted by multiple Maybes so that they appear as a single Flowable, by
* using the {@code mergeWith} method.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors backpressure from downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code mergeWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* a MaybeSource to be merged
* @return a new Flowable instance
* @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Flowable<T> mergeWith(MaybeSource<? extends T> other) {
ObjectHelper.requireNonNull(other, "other is null");
return merge(this, other);
}
/**
* Wraps a Maybe to emit its item (or notify of its error) on a specified {@link Scheduler},
* asynchronously.
* <p>
* <img width="640" height="182" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.observeOn.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>you specify which {@link Scheduler} this operator will use.</dd>
* </dl>
*
* @param scheduler
* the {@link Scheduler} to notify subscribers on
* @return the new Maybe instance that its subscribers are notified on the specified
* {@link Scheduler}
* @see <a href="http://reactivex.io/documentation/operators/observeon.html">ReactiveX operators documentation: ObserveOn</a>
* @see <a href="http://www.grahamlea.com/2014/07/rxjava-threading-examples/">RxJava Threading Examples</a>
* @see #subscribeOn
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Maybe<T> observeOn(final Scheduler scheduler) {
ObjectHelper.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new MaybeObserveOn<T>(this, scheduler));
}
/**
* Filters the items emitted by a Maybe, only emitting its success value if that
* is an instance of the supplied Class.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/ofClass.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code ofType} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <U> the output type
* @param clazz
* the class type to filter the items emitted by the source Maybe
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/filter.html">ReactiveX operators documentation: Filter</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<U> ofType(final Class<U> clazz) {
ObjectHelper.requireNonNull(clazz, "clazz is null");
return filter(Functions.isInstanceOf(clazz)).cast(clazz);
}
/**
* Calls the specified converter function with the current Maybe instance
* during assembly time and returns its result.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code to} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <R> the result type
* @param convert the function that is called with the current Maybe instance during
* assembly time that should return some value to be the result
*
* @return the value returned by the convert function
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <R> R to(Function<? super Maybe<T>, R> convert) {
try {
return ObjectHelper.requireNonNull(convert, "convert is null").apply(this);
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
throw ExceptionHelper.wrapOrThrow(ex);
}
}
/**
* Converts this Maybe into a backpressure-aware Flowable instance composing cancellation
* through.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The returned Flowable honors the backpressure of the downstream.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toFlowable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new Flowable instance
*/
@SuppressWarnings("unchecked")
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Flowable<T> toFlowable() {
if (this instanceof FuseToFlowable) {
return ((FuseToFlowable<T>)this).fuseToFlowable();
}
return RxJavaPlugins.onAssembly(new MaybeToFlowable<T>(this));
}
/**
* Converts this Maybe into an Observable instance composing disposal
* through.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toObservable} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new Observable instance
*/
@SuppressWarnings("unchecked")
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Observable<T> toObservable() {
if (this instanceof FuseToObservable) {
return ((FuseToObservable<T>)this).fuseToObservable();
}
return RxJavaPlugins.onAssembly(new MaybeToObservable<T>(this));
}
/**
* Converts this Maybe into a Single instance composing disposal
* through and turning an empty Maybe into a Single that emits the given
* value through onSuccess.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toSingle} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param defaultValue the default item to signal in Single if this Maybe is empty
* @return the new Single instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> toSingle(T defaultValue) {
ObjectHelper.requireNonNull(defaultValue, "defaultValue is null");
return RxJavaPlugins.onAssembly(new MaybeToSingle<T>(this, defaultValue));
}
/**
* Converts this Maybe into a Single instance composing disposal
* through and turning an empty Maybe into a signal of NoSuchElementException.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code toSingle} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new Single instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> toSingle() {
return RxJavaPlugins.onAssembly(new MaybeToSingle<T>(this, null));
}
/**
* Returns a Maybe instance that if this Maybe emits an error, it will emit an onComplete
* and swallow the throwable.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorComplete} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new Maybe instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onErrorComplete() {
return onErrorComplete(Functions.alwaysTrue());
}
/**
* Returns a Maybe instance that if this Maybe emits an error and the predicate returns
* true, it will emit an onComplete and swallow the throwable.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorComplete} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param predicate the predicate to call when an Throwable is emitted which should return true
* if the Throwable should be swallowed and replaced with an onComplete.
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onErrorComplete(final Predicate<? super Throwable> predicate) {
ObjectHelper.requireNonNull(predicate, "predicate is null");
return RxJavaPlugins.onAssembly(new MaybeOnErrorComplete<T>(this, predicate));
}
/**
* Instructs a Maybe to pass control to another {@link MaybeSource} rather than invoking
* {@link MaybeObserver#onError onError} if it encounters an error.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onErrorResumeNext.png" alt="">
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorResumeNext} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param next
* the next {@code MaybeSource} that will take over if the source Maybe encounters
* an error
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onErrorResumeNext(final MaybeSource<? extends T> next) {
ObjectHelper.requireNonNull(next, "next is null");
return onErrorResumeNext(Functions.justFunction(next));
}
/**
* Instructs a Maybe to pass control to another Maybe rather than invoking
* {@link MaybeObserver#onError onError} if it encounters an error.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onErrorResumeNext.png" alt="">
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorResumeNext} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param resumeFunction
* a function that returns a MaybeSource that will take over if the source Maybe encounters
* an error
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onErrorResumeNext(Function<? super Throwable, ? extends MaybeSource<? extends T>> resumeFunction) {
ObjectHelper.requireNonNull(resumeFunction, "resumeFunction is null");
return RxJavaPlugins.onAssembly(new MaybeOnErrorNext<T>(this, resumeFunction, true));
}
/**
* Instructs a Maybe to emit an item (returned by a specified function) rather than invoking
* {@link MaybeObserver#onError onError} if it encounters an error.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onErrorReturn.png" alt="">
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorReturn} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param valueSupplier
* a function that returns a single value that will be emitted as success value
* the current Maybe signals an onError event
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onErrorReturn(Function<? super Throwable, ? extends T> valueSupplier) {
ObjectHelper.requireNonNull(valueSupplier, "valueSupplier is null");
return RxJavaPlugins.onAssembly(new MaybeOnErrorReturn<T>(this, valueSupplier));
}
/**
* Instructs a Maybe to emit an item (returned by a specified function) rather than invoking
* {@link MaybeObserver#onError onError} if it encounters an error.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onErrorReturn.png" alt="">
* <p>
* You can use this to prevent errors from propagating or to supply fallback data should errors be
* encountered.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onErrorReturnItem} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param item
* the value that is emitted as onSuccess in case this Maybe signals an onError
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onErrorReturnItem(final T item) {
ObjectHelper.requireNonNull(item, "item is null");
return onErrorReturn(Functions.justFunction(item));
}
/**
* Instructs a Maybe to pass control to another MaybeSource rather than invoking
* {@link MaybeObserver#onError onError} if it encounters an {@link java.lang.Exception}.
* <p>
* This differs from {@link #onErrorResumeNext} in that this one does not handle {@link java.lang.Throwable}
* or {@link java.lang.Error} but lets those continue through.
* <p>
* <img width="640" height="333" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onExceptionResumeNextViaMaybe.png" alt="">
* <p>
* You can use this to prevent exceptions from propagating or to supply fallback data should exceptions be
* encountered.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onExceptionResumeNext} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param next
* the next MaybeSource that will take over if the source Maybe encounters
* an exception
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onExceptionResumeNext(final MaybeSource<? extends T> next) {
ObjectHelper.requireNonNull(next, "next is null");
return RxJavaPlugins.onAssembly(new MaybeOnErrorNext<T>(this, Functions.justFunction(next), false));
}
/**
* Nulls out references to the upstream producer and downstream MaybeObserver if
* the sequence is terminated or downstream calls dispose().
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code onTerminateDetach} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return a Maybe which nulls out references to the upstream producer and downstream MaybeObserver if
* the sequence is terminated or downstream calls dispose()
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> onTerminateDetach() {
return RxJavaPlugins.onAssembly(new MaybeDetach<T>(this));
}
/**
* Returns a Flowable that repeats the sequence of items emitted by the source Maybe indefinitely.
* <p>
* <img width="640" height="309" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/repeat.o.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors downstream backpressure.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code repeat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return a Flowable that emits the items emitted by the source Maybe repeatedly and in sequence
* @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX operators documentation: Repeat</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Flowable<T> repeat() {
return repeat(Long.MAX_VALUE);
}
/**
* Returns a Flowable that repeats the sequence of items emitted by the source Maybe at most
* {@code count} times.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/repeat.on.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>This operator honors downstream backpressure.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code repeat} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param times
* the number of times the source Maybe items are repeated, a count of 0 will yield an empty
* sequence
* @return a Flowable that repeats the sequence of items emitted by the source Maybe at most
* {@code count} times
* @throws IllegalArgumentException
* if {@code count} is less than zero
* @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX operators documentation: Repeat</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Flowable<T> repeat(long times) {
return toFlowable().repeat(times);
}
/**
* Returns a Flowable that repeats the sequence of items emitted by the source Maybe until
* the provided stop function returns true.
* <p>
* <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/repeat.on.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>This operator honors downstream backpressure.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code repeatUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param stop
* a boolean supplier that is called when the current Flowable completes and unless it returns
* false, the current Flowable is resubscribed
* @return the new Flowable instance
* @throws NullPointerException
* if {@code stop} is null
* @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX operators documentation: Repeat</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Flowable<T> repeatUntil(BooleanSupplier stop) {
return toFlowable().repeatUntil(stop);
}
/**
* Returns a Flowable that emits the same values as the source Publisher with the exception of an
* {@code onComplete}. An {@code onComplete} notification from the source will result in the emission of
* a {@code void} item to the Publisher provided as an argument to the {@code notificationHandler}
* function. If that Publisher calls {@code onComplete} or {@code onError} then {@code repeatWhen} will
* call {@code onComplete} or {@code onError} on the child subscription. Otherwise, this Publisher will
* resubscribe to the source Publisher.
* <p>
* <img width="640" height="430" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/repeatWhen.f.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The operator honors downstream backpressure and expects the source {@code Publisher} to honor backpressure as well.
* If this expectation is violated, the operator <em>may</em> throw an {@code IllegalStateException}.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code repeatWhen} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param handler
* receives a Publisher of notifications with which a user can complete or error, aborting the repeat.
* @return the source Publisher modified with repeat logic
* @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX operators documentation: Repeat</a>
*/
@BackpressureSupport(BackpressureKind.FULL)
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Flowable<T> repeatWhen(final Function<? super Flowable<Object>, ? extends Publisher<?>> handler) {
return toFlowable().repeatWhen(handler);
}
/**
* Returns a Maybe that mirrors the source Maybe, resubscribing to it if it calls {@code onError}
* (infinite retry count).
* <p>
* <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/retry.png" alt="">
* <p>
* If the source Maybe calls {@link MaybeObserver#onError}, this method will resubscribe to the source
* Maybe rather than propagating the {@code onError} call.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX operators documentation: Retry</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> retry() {
return retry(Long.MAX_VALUE, Functions.alwaysTrue());
}
/**
* Returns a Maybe that mirrors the source Maybe, resubscribing to it if it calls {@code onError}
* and the predicate returns true for that specific exception and retry count.
* <p>
* <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/retry.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param predicate
* the predicate that determines if a resubscription may happen in case of a specific exception
* and retry count
* @return the new Maybe instance
* @see #retry()
* @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX operators documentation: Retry</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> retry(BiPredicate<? super Integer, ? super Throwable> predicate) {
return toFlowable().retry(predicate).singleElement();
}
/**
* Returns a Maybe that mirrors the source Maybe, resubscribing to it if it calls {@code onError}
* up to a specified number of retries.
* <p>
* <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/retry.png" alt="">
* <p>
* If the source Maybe calls {@link MaybeObserver#onError}, this method will resubscribe to the source
* Maybe for a maximum of {@code count} resubscriptions rather than propagating the
* {@code onError} call.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param count
* the number of times to resubscribe if the current Maybe fails
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX operators documentation: Retry</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> retry(long count) {
return retry(count, Functions.alwaysTrue());
}
/**
* Retries at most times or until the predicate returns false, whichever happens first.
*
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param times the number of times to resubscribe if the current Maybe fails
* @param predicate the predicate called with the failure Throwable and should return true to trigger a retry.
* @return the new Maybe instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> retry(long times, Predicate<? super Throwable> predicate) {
return toFlowable().retry(times, predicate).singleElement();
}
/**
* Retries the current Maybe if it fails and the predicate returns true.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param predicate the predicate that receives the failure Throwable and should return true to trigger a retry.
* @return the new Maybe instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> retry(Predicate<? super Throwable> predicate) {
return retry(Long.MAX_VALUE, predicate);
}
/**
* Retries until the given stop function returns true.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retryUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param stop the function that should return true to stop retrying
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> retryUntil(final BooleanSupplier stop) {
ObjectHelper.requireNonNull(stop, "stop is null");
return retry(Long.MAX_VALUE, Functions.predicateReverseFor(stop));
}
/**
* Returns a Maybe that emits the same values as the source Maybe with the exception of an
* {@code onError}. An {@code onError} notification from the source will result in the emission of a
* {@link Throwable} item to the Publisher provided as an argument to the {@code notificationHandler}
* function. If that Publisher calls {@code onComplete} or {@code onError} then {@code retry} will call
* {@code onComplete} or {@code onError} on the child subscription. Otherwise, this Publisher will
* resubscribe to the source Publisher.
* <p>
* <img width="640" height="430" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/retryWhen.f.png" alt="">
* <p>
* Example:
*
* This retries 3 times, each time incrementing the number of seconds it waits.
*
* <pre><code>
* Maybe.create((MaybeEmitter<? super String> s) -> {
* System.out.println("subscribing");
* s.onError(new RuntimeException("always fails"));
* }, BackpressureStrategy.BUFFER).retryWhen(attempts -> {
* return attempts.zipWith(Publisher.range(1, 3), (n, i) -> i).flatMap(i -> {
* System.out.println("delay retry by " + i + " second(s)");
* return Flowable.timer(i, TimeUnit.SECONDS);
* });
* }).blockingForEach(System.out::println);
* </code></pre>
*
* Output is:
*
* <pre> {@code
* subscribing
* delay retry by 1 second(s)
* subscribing
* delay retry by 2 second(s)
* subscribing
* delay retry by 3 second(s)
* subscribing
* } </pre>
* <p>
* Note that the inner {@code Publisher} returned by the handler function should signal
* either {@code onNext}, {@code onError} or {@code onComplete} in response to the received
* {@code Throwable} to indicate the operator should retry or terminate. If the upstream to
* the operator is asynchronous, signalling onNext followed by onComplete immediately may
* result in the sequence to be completed immediately. Similarly, if this inner
* {@code Publisher} signals {@code onError} or {@code onComplete} while the upstream is
* active, the sequence is terminated with the same signal immediately.
* <p>
* The following example demonstrates how to retry an asynchronous source with a delay:
* <pre><code>
* Maybe.timer(1, TimeUnit.SECONDS)
* .doOnSubscribe(s -> System.out.println("subscribing"))
* .map(v -> { throw new RuntimeException(); })
* .retryWhen(errors -> {
* AtomicInteger counter = new AtomicInteger();
* return errors
* .takeWhile(e -> counter.getAndIncrement() != 3)
* .flatMap(e -> {
* System.out.println("delay retry by " + counter.get() + " second(s)");
* return Flowable.timer(counter.get(), TimeUnit.SECONDS);
* });
* })
* .blockingGet();
* </code></pre>
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code retryWhen} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param handler
* receives a Publisher of notifications with which a user can complete or error, aborting the
* retry
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX operators documentation: Retry</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> retryWhen(
final Function<? super Flowable<Throwable>, ? extends Publisher<?>> handler) {
return toFlowable().retryWhen(handler).singleElement();
}
/**
* Subscribes to a Maybe and ignores {@code onSuccess} and {@code onComplete} emissions.
* <p>
* If the Maybe emits an error, it is wrapped into an
* {@link io.reactivex.exceptions.OnErrorNotImplementedException OnErrorNotImplementedException}
* and routed to the RxJavaPlugins.onError handler.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @return a {@link Disposable} reference with which the caller can stop receiving items before
* the Maybe has finished sending them
* @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a>
*/
@SchedulerSupport(SchedulerSupport.NONE)
public final Disposable subscribe() {
return subscribe(Functions.emptyConsumer(), Functions.ON_ERROR_MISSING, Functions.EMPTY_ACTION);
}
/**
* Subscribes to a Maybe and provides a callback to handle the items it emits.
* <p>
* If the Maybe emits an error, it is wrapped into an
* {@link io.reactivex.exceptions.OnErrorNotImplementedException OnErrorNotImplementedException}
* and routed to the RxJavaPlugins.onError handler.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param onSuccess
* the {@code Consumer<T>} you have designed to accept a success value from the Maybe
* @return a {@link Disposable} reference with which the caller can stop receiving items before
* the Maybe has finished sending them
* @throws NullPointerException
* if {@code onSuccess} is null
* @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Disposable subscribe(Consumer<? super T> onSuccess) {
return subscribe(onSuccess, Functions.ON_ERROR_MISSING, Functions.EMPTY_ACTION);
}
/**
* Subscribes to a Maybe and provides callbacks to handle the items it emits and any error
* notification it issues.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param onSuccess
* the {@code Consumer<T>} you have designed to accept a success value from the Maybe
* @param onError
* the {@code Consumer<Throwable>} you have designed to accept any error notification from the
* Maybe
* @return a {@link Disposable} reference with which the caller can stop receiving items before
* the Maybe has finished sending them
* @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a>
* @throws NullPointerException
* if {@code onSuccess} is null, or
* if {@code onError} is null
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final Disposable subscribe(Consumer<? super T> onSuccess, Consumer<? super Throwable> onError) {
return subscribe(onSuccess, onError, Functions.EMPTY_ACTION);
}
/**
* Subscribes to a Maybe and provides callbacks to handle the items it emits and any error or
* completion notification it issues.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param onSuccess
* the {@code Consumer<T>} you have designed to accept a success value from the Maybe
* @param onError
* the {@code Consumer<Throwable>} you have designed to accept any error notification from the
* Maybe
* @param onComplete
* the {@code Action} you have designed to accept a completion notification from the
* Maybe
* @return a {@link Disposable} reference with which the caller can stop receiving items before
* the Maybe has finished sending them
* @throws NullPointerException
* if {@code onSuccess} is null, or
* if {@code onError} is null, or
* if {@code onComplete} is null
* @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Disposable subscribe(Consumer<? super T> onSuccess, Consumer<? super Throwable> onError,
Action onComplete) {
ObjectHelper.requireNonNull(onSuccess, "onSuccess is null");
ObjectHelper.requireNonNull(onError, "onError is null");
ObjectHelper.requireNonNull(onComplete, "onComplete is null");
return subscribeWith(new MaybeCallbackObserver<T>(onSuccess, onError, onComplete));
}
@SchedulerSupport(SchedulerSupport.NONE)
@Override
public final void subscribe(MaybeObserver<? super T> observer) {
ObjectHelper.requireNonNull(observer, "observer is null");
observer = RxJavaPlugins.onSubscribe(this, observer);
ObjectHelper.requireNonNull(observer, "The RxJavaPlugins.onSubscribe hook returned a null MaybeObserver. Please check the handler provided to RxJavaPlugins.setOnMaybeSubscribe for invalid null returns. Further reading: https://github.com/ReactiveX/RxJava/wiki/Plugins");
try {
subscribeActual(observer);
} catch (NullPointerException ex) {
throw ex;
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
NullPointerException npe = new NullPointerException("subscribeActual failed");
npe.initCause(ex);
throw npe;
}
}
/**
* Implement this method in subclasses to handle the incoming {@link MaybeObserver}s.
* <p>There is no need to call any of the plugin hooks on the current {@code Maybe} instance or
* the {@code MaybeObserver}; all hooks and basic safeguards have been
* applied by {@link #subscribe(MaybeObserver)} before this method gets called.
* @param observer the MaybeObserver to handle, not null
*/
protected abstract void subscribeActual(MaybeObserver<? super T> observer);
/**
* Asynchronously subscribes subscribers to this Maybe on the specified {@link Scheduler}.
* <p>
* <img width="640" height="752" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.subscribeOn.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>you specify which {@link Scheduler} this operator will use.</dd>
* </dl>
*
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the new Maybe instance that its subscriptions happen on the specified {@link Scheduler}
* @see <a href="http://reactivex.io/documentation/operators/subscribeon.html">ReactiveX operators documentation: SubscribeOn</a>
* @see <a href="http://www.grahamlea.com/2014/07/rxjava-threading-examples/">RxJava Threading Examples</a>
* @see #observeOn
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Maybe<T> subscribeOn(Scheduler scheduler) {
ObjectHelper.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new MaybeSubscribeOn<T>(this, scheduler));
}
/**
* Subscribes a given MaybeObserver (subclass) to this Maybe and returns the given
* MaybeObserver as is.
* <p>Usage example:
* <pre><code>
* Maybe<Integer> source = Maybe.just(1);
* CompositeDisposable composite = new CompositeDisposable();
*
* DisposableMaybeObserver<Integer> ds = new DisposableMaybeObserver<>() {
* // ...
* };
*
* composite.add(source.subscribeWith(ds));
* </code></pre>
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code subscribeWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <E> the type of the MaybeObserver to use and return
* @param observer the MaybeObserver (subclass) to use and return, not null
* @return the input {@code subscriber}
* @throws NullPointerException if {@code subscriber} is null
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final <E extends MaybeObserver<? super T>> E subscribeWith(E observer) {
subscribe(observer);
return observer;
}
/**
* Returns a Maybe that emits the items emitted by the source Maybe or the items of an alternate
* MaybeSource if the current Maybe is empty.
* <p>
* <img width="640" height="445" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/switchifempty.m.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code switchIfEmpty} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* the alternate MaybeSource to subscribe to if the main does not emit any items
* @return a Maybe that emits the items emitted by the source Maybe or the items of an
* alternate MaybeSource if the source Maybe is empty.
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Maybe<T> switchIfEmpty(MaybeSource<? extends T> other) {
ObjectHelper.requireNonNull(other, "other is null");
return RxJavaPlugins.onAssembly(new MaybeSwitchIfEmpty<T>(this, other));
}
/**
* Returns a Single that emits the items emitted by the source Maybe or the item of an alternate
* SingleSource if the current Maybe is empty.
* <p>
* <img width="640" height="445" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/switchifempty.m.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code switchIfEmpty} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* <p>History: 2.1.4 - experimental
* @param other
* the alternate SingleSource to subscribe to if the main does not emit any items
* @return a Single that emits the items emitted by the source Maybe or the item of an
* alternate SingleSource if the source Maybe is empty.
* @since 2.2
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final Single<T> switchIfEmpty(SingleSource<? extends T> other) {
ObjectHelper.requireNonNull(other, "other is null");
return RxJavaPlugins.onAssembly(new MaybeSwitchIfEmptySingle<T>(this, other));
}
/**
* Returns a Maybe that emits the items emitted by the source Maybe until a second MaybeSource
* emits an item.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/takeUntil.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code takeUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* the MaybeSource whose first emitted item will cause {@code takeUntil} to stop emitting items
* from the source Maybe
* @param <U>
* the type of items emitted by {@code other}
* @return a Maybe that emits the items emitted by the source Maybe until such time as {@code other} emits its first item
* @see <a href="http://reactivex.io/documentation/operators/takeuntil.html">ReactiveX operators documentation: TakeUntil</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<T> takeUntil(MaybeSource<U> other) {
ObjectHelper.requireNonNull(other, "other is null");
return RxJavaPlugins.onAssembly(new MaybeTakeUntilMaybe<T, U>(this, other));
}
/**
* Returns a Maybe that emits the item emitted by the source Maybe until a second Publisher
* emits an item.
* <p>
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/takeUntil.png" alt="">
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The {@code Publisher} is consumed in an unbounded fashion and is cancelled after the first item
* emitted.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code takeUntil} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param other
* the Publisher whose first emitted item will cause {@code takeUntil} to stop emitting items
* from the source Publisher
* @param <U>
* the type of items emitted by {@code other}
* @return a Maybe that emits the items emitted by the source Maybe until such time as {@code other} emits its first item
* @see <a href="http://reactivex.io/documentation/operators/takeuntil.html">ReactiveX operators documentation: TakeUntil</a>
*/
@BackpressureSupport(BackpressureKind.UNBOUNDED_IN)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<T> takeUntil(Publisher<U> other) {
ObjectHelper.requireNonNull(other, "other is null");
return RxJavaPlugins.onAssembly(new MaybeTakeUntilPublisher<T, U>(this, other));
}
/**
* Returns a Maybe that mirrors the source Maybe but applies a timeout policy for each emitted
* item. If the next item isn't emitted within the specified timeout duration starting from its predecessor,
* the resulting Maybe terminates and notifies MaybeObservers of a {@code TimeoutException}.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timeout.1.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>This version of {@code timeout} operates by default on the {@code computation} {@link Scheduler}.</dd>
* </dl>
*
* @param timeout
* maximum duration between emitted items before a timeout occurs
* @param timeUnit
* the unit of time that applies to the {@code timeout} argument.
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX operators documentation: Timeout</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Maybe<T> timeout(long timeout, TimeUnit timeUnit) {
return timeout(timeout, timeUnit, Schedulers.computation());
}
/**
* Returns a Maybe that mirrors the source Maybe but applies a timeout policy for each emitted
* item. If the next item isn't emitted within the specified timeout duration starting from its predecessor,
* the source MaybeSource is disposed and resulting Maybe begins instead to mirror a fallback MaybeSource.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timeout.2.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>This version of {@code timeout} operates by default on the {@code computation} {@link Scheduler}.</dd>
* </dl>
*
* @param timeout
* maximum duration between items before a timeout occurs
* @param timeUnit
* the unit of time that applies to the {@code timeout} argument
* @param fallback
* the fallback MaybeSource to use in case of a timeout
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX operators documentation: Timeout</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.COMPUTATION)
public final Maybe<T> timeout(long timeout, TimeUnit timeUnit, MaybeSource<? extends T> fallback) {
ObjectHelper.requireNonNull(fallback, "fallback is null");
return timeout(timeout, timeUnit, Schedulers.computation(), fallback);
}
/**
* Returns a Maybe that mirrors the source Maybe but applies a timeout policy for each emitted
* item using a specified Scheduler. If the next item isn't emitted within the specified timeout duration
* starting from its predecessor, the source MaybeSource is disposed and resulting Maybe begins instead
* to mirror a fallback MaybeSource.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timeout.2s.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>You specify which {@link Scheduler} this operator will use.</dd>
* </dl>
*
* @param timeout
* maximum duration between items before a timeout occurs
* @param timeUnit
* the unit of time that applies to the {@code timeout} argument
* @param fallback
* the MaybeSource to use as the fallback in case of a timeout
* @param scheduler
* the {@link Scheduler} to run the timeout timers on
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX operators documentation: Timeout</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Maybe<T> timeout(long timeout, TimeUnit timeUnit, Scheduler scheduler, MaybeSource<? extends T> fallback) {
ObjectHelper.requireNonNull(fallback, "fallback is null");
return timeout(timer(timeout, timeUnit, scheduler), fallback);
}
/**
* Returns a Maybe that mirrors the source Maybe but applies a timeout policy for each emitted
* item, where this policy is governed on a specified Scheduler. If the next item isn't emitted within the
* specified timeout duration starting from its predecessor, the resulting Maybe terminates and
* notifies MaybeObservers of a {@code TimeoutException}.
* <p>
* <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timeout.1s.png" alt="">
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>You specify which {@link Scheduler} this operator will use.</dd>
* </dl>
*
* @param timeout
* maximum duration between items before a timeout occurs
* @param timeUnit
* the unit of time that applies to the {@code timeout} argument
* @param scheduler
* the Scheduler to run the timeout timers on
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX operators documentation: Timeout</a>
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Maybe<T> timeout(long timeout, TimeUnit timeUnit, Scheduler scheduler) {
return timeout(timer(timeout, timeUnit, scheduler));
}
/**
* If the current {@code Maybe} didn't signal an event before the {@code timeoutIndicator} {@link MaybeSource} signals, a
* {@link TimeoutException} is signaled instead.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeout} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <U> the value type of the
* @param timeoutIndicator the {@code MaybeSource} that indicates the timeout by signaling onSuccess
* or onComplete.
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<T> timeout(MaybeSource<U> timeoutIndicator) {
ObjectHelper.requireNonNull(timeoutIndicator, "timeoutIndicator is null");
return RxJavaPlugins.onAssembly(new MaybeTimeoutMaybe<T, U>(this, timeoutIndicator, null));
}
/**
* If the current {@code Maybe} didn't signal an event before the {@code timeoutIndicator} {@link MaybeSource} signals,
* the current {@code Maybe} is disposed and the {@code fallback} {@code MaybeSource} subscribed to
* as a continuation.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeout} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <U> the value type of the
* @param timeoutIndicator the {@code MaybeSource} that indicates the timeout by signaling {@code onSuccess}
* or {@code onComplete}.
* @param fallback the {@code MaybeSource} that is subscribed to if the current {@code Maybe} times out
* @return the new Maybe instance
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<T> timeout(MaybeSource<U> timeoutIndicator, MaybeSource<? extends T> fallback) {
ObjectHelper.requireNonNull(timeoutIndicator, "timeoutIndicator is null");
ObjectHelper.requireNonNull(fallback, "fallback is null");
return RxJavaPlugins.onAssembly(new MaybeTimeoutMaybe<T, U>(this, timeoutIndicator, fallback));
}
/**
* If the current {@code Maybe} source didn't signal an event before the {@code timeoutIndicator} {@link Publisher} signals, a
* {@link TimeoutException} is signaled instead.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The {@code timeoutIndicator} {@link Publisher} is consumed in an unbounded manner and
* is cancelled after its first item.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeout} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <U> the value type of the
* @param timeoutIndicator the {@code MaybeSource} that indicates the timeout by signaling {@code onSuccess}
* or {@code onComplete}.
* @return the new Maybe instance
*/
@BackpressureSupport(BackpressureKind.UNBOUNDED_IN)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<T> timeout(Publisher<U> timeoutIndicator) {
ObjectHelper.requireNonNull(timeoutIndicator, "timeoutIndicator is null");
return RxJavaPlugins.onAssembly(new MaybeTimeoutPublisher<T, U>(this, timeoutIndicator, null));
}
/**
* If the current {@code Maybe} didn't signal an event before the {@code timeoutIndicator} {@link Publisher} signals,
* the current {@code Maybe} is disposed and the {@code fallback} {@code MaybeSource} subscribed to
* as a continuation.
* <dl>
* <dt><b>Backpressure:</b></dt>
* <dd>The {@code timeoutIndicator} {@link Publisher} is consumed in an unbounded manner and
* is cancelled after its first item.</dd>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code timeout} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param <U> the value type of the
* @param timeoutIndicator the {@code MaybeSource} that indicates the timeout by signaling {@code onSuccess}
* or {@code onComplete}
* @param fallback the {@code MaybeSource} that is subscribed to if the current {@code Maybe} times out
* @return the new Maybe instance
*/
@BackpressureSupport(BackpressureKind.UNBOUNDED_IN)
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U> Maybe<T> timeout(Publisher<U> timeoutIndicator, MaybeSource<? extends T> fallback) {
ObjectHelper.requireNonNull(timeoutIndicator, "timeoutIndicator is null");
ObjectHelper.requireNonNull(fallback, "fallback is null");
return RxJavaPlugins.onAssembly(new MaybeTimeoutPublisher<T, U>(this, timeoutIndicator, fallback));
}
/**
* Returns a Maybe which makes sure when a MaybeObserver disposes the Disposable,
* that call is propagated up on the specified scheduler.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code unsubscribeOn} calls dispose() of the upstream on the {@link Scheduler} you specify.</dd>
* </dl>
* @param scheduler the target scheduler where to execute the disposal
* @return the new Maybe instance
* @throws NullPointerException if scheduler is null
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.CUSTOM)
public final Maybe<T> unsubscribeOn(final Scheduler scheduler) {
ObjectHelper.requireNonNull(scheduler, "scheduler is null");
return RxJavaPlugins.onAssembly(new MaybeUnsubscribeOn<T>(this, scheduler));
}
/**
* Waits until this and the other MaybeSource signal a success value then applies the given BiFunction
* to those values and emits the BiFunction's resulting value to downstream.
*
* <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt="">
*
* <p>If either this or the other MaybeSource is empty or signals an error, the resulting Maybe will
* terminate immediately and dispose the other source.
*
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code zipWith} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
*
* @param <U>
* the type of items emitted by the {@code other} MaybeSource
* @param <R>
* the type of items emitted by the resulting Maybe
* @param other
* the other MaybeSource
* @param zipper
* a function that combines the pairs of items from the two MaybeSources to generate the items to
* be emitted by the resulting Maybe
* @return the new Maybe instance
* @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a>
*/
@CheckReturnValue
@NonNull
@SchedulerSupport(SchedulerSupport.NONE)
public final <U, R> Maybe<R> zipWith(MaybeSource<? extends U> other, BiFunction<? super T, ? super U, ? extends R> zipper) {
ObjectHelper.requireNonNull(other, "other is null");
return zip(this, other, zipper);
}
// ------------------------------------------------------------------
// Test helper
// ------------------------------------------------------------------
/**
* Creates a TestObserver and subscribes
* it to this Maybe.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code test} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @return the new TestObserver instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final TestObserver<T> test() {
TestObserver<T> to = new TestObserver<T>();
subscribe(to);
return to;
}
/**
* Creates a TestObserver optionally in cancelled state, then subscribes it to this Maybe.
* <dl>
* <dt><b>Scheduler:</b></dt>
* <dd>{@code test} does not operate by default on a particular {@link Scheduler}.</dd>
* </dl>
* @param cancelled if true, the TestObserver will be cancelled before subscribing to this
* Maybe.
* @return the new TestObserver instance
*/
@CheckReturnValue
@SchedulerSupport(SchedulerSupport.NONE)
public final TestObserver<T> test(boolean cancelled) {
TestObserver<T> to = new TestObserver<T>();
if (cancelled) {
to.cancel();
}
subscribe(to);
return to;
}
}
| artem-zinnatullin/RxJava | src/main/java/io/reactivex/Maybe.java | Java | apache-2.0 | 233,229 |
# Copyright 2010 Jacob Kaplan-Moss
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Flavor interface.
"""
from oslo_utils import strutils
from six.moves.urllib import parse
from novaclient import base
from novaclient import exceptions
from novaclient.openstack.common.gettextutils import _
from novaclient import utils
class Flavor(base.Resource):
"""
A flavor is an available hardware configuration for a server.
"""
HUMAN_ID = True
def __repr__(self):
return "<Flavor: %s>" % self.name
@property
def ephemeral(self):
"""
Provide a user-friendly accessor to OS-FLV-EXT-DATA:ephemeral
"""
return self._info.get("OS-FLV-EXT-DATA:ephemeral", 'N/A')
@property
def is_public(self):
"""
Provide a user-friendly accessor to os-flavor-access:is_public
"""
return self._info.get("os-flavor-access:is_public", 'N/A')
def get_keys(self):
"""
Get extra specs from a flavor.
:param flavor: The :class:`Flavor` to get extra specs from
"""
_resp, body = self.manager.api.client.get(
"/flavors/%s/os-extra_specs" %
base.getid(self))
return body["extra_specs"]
def set_keys(self, metadata):
"""
Set extra specs on a flavor.
:param flavor: The :class:`Flavor` to set extra spec on
:param metadata: A dict of key/value pairs to be set
"""
utils.validate_flavor_metadata_keys(metadata.keys())
body = {'extra_specs': metadata}
return self.manager._create(
"/flavors/%s/os-extra_specs" % base.getid(self),
body,
"extra_specs",
return_raw=True)
def unset_keys(self, keys):
"""
Unset extra specs on a flavor.
:param flavor: The :class:`Flavor` to unset extra spec on
:param keys: A list of keys to be unset
"""
for k in keys:
self.manager._delete(
"/flavors/%s/os-extra_specs/%s" % (base.getid(self), k))
def delete(self):
"""
Delete this flavor.
"""
self.manager.delete(self)
class FlavorManager(base.ManagerWithFind):
"""
Manage :class:`Flavor` resources.
"""
resource_class = Flavor
is_alphanum_id_allowed = True
def list(self, detailed=True, is_public=True):
"""
Get a list of all flavors.
:rtype: list of :class:`Flavor`.
"""
qparams = {}
# is_public is ternary - None means give all flavors.
# By default Nova assumes True and gives admins public flavors
# and flavors from their own projects only.
if not is_public:
qparams['is_public'] = is_public
query_string = "?%s" % parse.urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/flavors%s%s" % (detail, query_string), "flavors")
def get(self, flavor):
"""
Get a specific flavor.
:param flavor: The ID of the :class:`Flavor` to get.
:rtype: :class:`Flavor`
"""
return self._get("/flavors/%s" % base.getid(flavor), "flavor")
def delete(self, flavor):
"""
Delete a specific flavor.
:param flavor: The ID of the :class:`Flavor` to get.
"""
self._delete("/flavors/%s" % base.getid(flavor))
def _build_body(self, name, ram, vcpus, disk, id, swap,
ephemeral, rxtx_factor, is_public):
return {
"flavor": {
"name": name,
"ram": ram,
"vcpus": vcpus,
"disk": disk,
"id": id,
"swap": swap,
"OS-FLV-EXT-DATA:ephemeral": ephemeral,
"rxtx_factor": rxtx_factor,
"os-flavor-access:is_public": is_public,
}
}
def create(self, name, ram, vcpus, disk, flavorid="auto",
ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):
"""
Create a flavor.
:param name: Descriptive name of the flavor
:param ram: Memory in MB for the flavor
:param vcpus: Number of VCPUs for the flavor
:param disk: Size of local disk in GB
:param flavorid: ID for the flavor (optional). You can use the reserved
value ``"auto"`` to have Nova generate a UUID for the
flavor in cases where you cannot simply pass ``None``.
:param swap: Swap space in MB
:param rxtx_factor: RX/TX factor
:rtype: :class:`Flavor`
"""
try:
ram = int(ram)
except (TypeError, ValueError):
raise exceptions.CommandError(_("Ram must be an integer."))
try:
vcpus = int(vcpus)
except (TypeError, ValueError):
raise exceptions.CommandError(_("VCPUs must be an integer."))
try:
disk = int(disk)
except (TypeError, ValueError):
raise exceptions.CommandError(_("Disk must be an integer."))
if flavorid == "auto":
flavorid = None
try:
swap = int(swap)
except (TypeError, ValueError):
raise exceptions.CommandError(_("Swap must be an integer."))
try:
ephemeral = int(ephemeral)
except (TypeError, ValueError):
raise exceptions.CommandError(_("Ephemeral must be an integer."))
try:
rxtx_factor = float(rxtx_factor)
except (TypeError, ValueError):
raise exceptions.CommandError(_("rxtx_factor must be a float."))
try:
is_public = strutils.bool_from_string(is_public, True)
except Exception:
raise exceptions.CommandError(_("is_public must be a boolean."))
body = self._build_body(name, ram, vcpus, disk, flavorid, swap,
ephemeral, rxtx_factor, is_public)
return self._create("/flavors", body, "flavor")
| akash1808/python-novaclient | novaclient/v1_1/flavors.py | Python | apache-2.0 | 6,740 |
package main
import (
"fmt"
"os"
"github.com/codegangsta/cli"
"github.com/heartbeatsjp/check_happo/command"
"github.com/heartbeatsjp/happo-agent/halib"
)
// GlobalFlags are global level options
var GlobalFlags = []cli.Flag{}
// Commands is list of subcommand
var Commands = []cli.Command{
{
Name: "monitor",
Usage: "",
Action: command.CmdMonitor,
Flags: []cli.Flag{
cli.StringFlag{
Name: "host, H",
Usage: "hostname or IP address",
},
cli.IntFlag{
Name: "port, P",
Value: halib.DefaultAgentPort,
Usage: "Port number",
},
cli.StringSliceFlag{
Name: "proxy, X",
Value: &cli.StringSlice{},
Usage: "Proxy hostname[:port] (You can multiple define.)",
},
cli.StringFlag{
Name: "plugin_name, p",
Usage: "Plugin Name",
},
cli.StringFlag{
Name: "plugin_option, o",
Usage: "Plugin Option",
},
cli.StringFlag{
Name: "timeout, t",
Usage: "Connect Timeout",
},
cli.BoolFlag{
Name: "verbose, v",
Usage: "verbose output",
},
},
},
{
Name: "check_happo",
Usage: "",
Action: command.CmdTest,
Flags: []cli.Flag{},
},
}
// CommandNotFound implements action when subcommand not found
func CommandNotFound(c *cli.Context, command string) {
fmt.Fprintf(os.Stderr, "%s: '%s' is not a %s command. See '%s --help'.", c.App.Name, command, c.App.Name, c.App.Name)
os.Exit(2)
}
| heartbeatsjp/check_happo | commands.go | GO | apache-2.0 | 1,406 |
//! \file
/*
** Copyright (C) - Triton
**
** This program is under the terms of the Apache License 2.0.
*/
#ifndef TRITON_LIFTINGTOLLVM_HPP
#define TRITON_LIFTINGTOLLVM_HPP
#include <map>
#include <memory>
#include <ostream>
#include <triton/ast.hpp>
#include <triton/dllexport.hpp>
#include <triton/symbolicExpression.hpp>
//! The Triton namespace
namespace triton {
/*!
* \addtogroup triton
* @{
*/
//! The Engines namespace
namespace engines {
/*!
* \ingroup triton
* \addtogroup engines
* @{
*/
//! The Lifters namespace
namespace lifters {
/*!
* \ingroup engines
* \addtogroup lifters
* @{
*/
//! \class LiftingToLLVM
/*! \brief The lifting to LLVM class. */
class LiftingToLLVM {
public:
//! Constructor.
TRITON_EXPORT LiftingToLLVM();
//! Lifts a symbolic expression and all its references to LLVM format. `fname` represents the name of the LLVM function.
TRITON_EXPORT std::ostream& liftToLLVM(std::ostream& stream, const triton::engines::symbolic::SharedSymbolicExpression& expr, const char* fname="__triton", bool optimize=false);
//! Lifts a abstract node and all its references to LLVM format. `fname` represents the name of the LLVM function.
TRITON_EXPORT std::ostream& liftToLLVM(std::ostream& stream, const triton::ast::SharedAbstractNode& node, const char* fname="__triton", bool optimize=false);
//! Lifts and simplify an AST using LLVM
TRITON_EXPORT triton::ast::SharedAbstractNode simplifyAstViaLLVM(const triton::ast::SharedAbstractNode& node) const;
};
/*! @} End of lifters namespace */
};
/*! @} End of engines namespace */
};
/*! @} End of triton namespace */
};
#endif /* TRITON_LIFTINGTOLLVM_HPP */
| JonathanSalwan/Triton | src/libtriton/includes/triton/liftingToLLVM.hpp | C++ | apache-2.0 | 1,827 |
/*
* Copyright 1999-2010 University of Chicago
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied.
*
* See the License for the specific language governing permissions and limitations under the License.
*/
package org.globus.gsi.stores;
import org.globus.gsi.provider.SigningPolicyStore;
import org.globus.gsi.provider.SigningPolicyStoreException;
import org.globus.gsi.provider.SigningPolicyStoreParameters;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import java.io.IOException;
import java.net.URI;
import java.security.InvalidAlgorithmParameterException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import javax.security.auth.x500.X500Principal;
import org.globus.gsi.SigningPolicy;
import org.globus.gsi.util.CertificateIOUtil;
import org.globus.util.GlobusResource;
import org.globus.util.GlobusPathMatchingResourcePatternResolver;
/**
* FILL ME
*
* @author ranantha@mcs.anl.gov
*/
public class ResourceSigningPolicyStore implements SigningPolicyStore {
private GlobusPathMatchingResourcePatternResolver globusResolver = new GlobusPathMatchingResourcePatternResolver();
private Map<URI, ResourceSigningPolicy> signingPolicyFileMap = new HashMap<URI, ResourceSigningPolicy>();
private Map<String, SigningPolicy> policyMap = new HashMap<String, SigningPolicy>();
private ResourceSigningPolicyStoreParameters parameters;
private Log logger = LogFactory.getLog(ResourceSigningPolicyStore.class.getCanonicalName());
private final Map<String, Long> invalidPoliciesCache = new HashMap<String, Long>();
private final Map<String, Long> validPoliciesCache = new HashMap<String, Long>();
private final static long CACHE_TIME_MILLIS = 3600*1000;
private long lastUpdate = 0;
/**
* Please use the {@link Stores} class to generate Key/Cert stores
*/
public ResourceSigningPolicyStore(SigningPolicyStoreParameters param) throws InvalidAlgorithmParameterException {
if (param == null) {
throw new IllegalArgumentException();
}
if (!(param instanceof ResourceSigningPolicyStoreParameters)) {
throw new InvalidAlgorithmParameterException();
}
this.parameters = (ResourceSigningPolicyStoreParameters) param;
}
public synchronized SigningPolicy getSigningPolicy(X500Principal caPrincipal) throws SigningPolicyStoreException {
if (caPrincipal == null) {
return null;
}
String name = caPrincipal.getName();
long now = System.currentTimeMillis();
String hash = CertificateIOUtil.nameHash(caPrincipal);
Long validCacheTime = validPoliciesCache.get(hash);
Long invalidCacheTime = invalidPoliciesCache.get(hash);
if ((invalidCacheTime != null) && (now - invalidCacheTime < 10*CACHE_TIME_MILLIS)) {
return null;
}
if ((validCacheTime == null) || (now - validCacheTime >= CACHE_TIME_MILLIS) || !this.policyMap.containsKey(name)) {
loadPolicy(hash);
}
return this.policyMap.get(name);
}
private synchronized void loadPolicy(String hash) throws SigningPolicyStoreException {
String locations = this.parameters.getTrustRootLocations();
GlobusResource[] resources;
resources = globusResolver.getResources(locations);
long now = System.currentTimeMillis();
boolean found_policy = false;
// Optimization: If we find a hash for this CA, only process that.
// Otherwise, we will process all policies.
for (GlobusResource resource : resources) {
String filename = resource.getFilename();
// Note invalidPoliciesCache contains both filenames and hashes!
Long invalidCacheTime = invalidPoliciesCache.get(filename);
if ((invalidCacheTime != null) && (now - invalidCacheTime < 10*CACHE_TIME_MILLIS)) {
continue;
}
if (!filename.startsWith(hash)) {
continue;
}
if (!resource.isReadable()) {
logger.debug("Cannot read: " + resource.getFilename());
continue;
}
try {
loadSigningPolicy(resource, policyMap, signingPolicyFileMap);
} catch (Exception e) {
invalidCacheTime = invalidPoliciesCache.get(filename);
if ((invalidCacheTime == null) || (now - invalidCacheTime >= 10*CACHE_TIME_MILLIS)) {
logger.warn("Failed to load signing policy: " + filename);
logger.debug("Failed to load signing policy: " + filename, e);
invalidPoliciesCache.put(filename, now);
invalidPoliciesCache.put(hash, now);
}
continue;
}
found_policy = true;
}
if (found_policy) {
if (!validPoliciesCache.containsKey(hash)) {
invalidPoliciesCache.put(hash, now);
}
return;
}
// Poor-man's implementation. Note it is much more expensive than a hashed directory
for (GlobusResource resource : resources) {
String filename = resource.getFilename();
Long invalidCacheTime = invalidPoliciesCache.get(filename);
if ((invalidCacheTime != null) && (now - invalidCacheTime < 10*CACHE_TIME_MILLIS)) {
continue;
}
try {
loadSigningPolicy(resource, policyMap, signingPolicyFileMap);
} catch (Exception e) {
invalidCacheTime = invalidPoliciesCache.get(filename);
if ((invalidCacheTime == null) || (now - invalidCacheTime >= 10*CACHE_TIME_MILLIS)) {
logger.warn("Failed to load signing policy: " + filename);
logger.debug("Failed to load signing policy: " + filename, e);
invalidPoliciesCache.put(filename, now);
invalidPoliciesCache.put(hash, now);
}
continue;
}
}
if (!validPoliciesCache.containsKey(hash)) {
invalidPoliciesCache.put(hash, now);
}
}
private void loadSigningPolicy(
GlobusResource policyResource, Map<String, SigningPolicy> policyMapToLoad,
Map<URI, ResourceSigningPolicy> currentPolicyFileMap) throws SigningPolicyStoreException {
URI uri;
if (!policyResource.isReadable()) {
throw new SigningPolicyStoreException("Cannot read file");
}
try {
uri = policyResource.getURI();
} catch (IOException e) {
throw new SigningPolicyStoreException(e);
}
ResourceSigningPolicy filePolicy = this.signingPolicyFileMap.get(uri);
if (filePolicy == null) {
try {
filePolicy = new ResourceSigningPolicy(policyResource);
} catch (ResourceStoreException e) {
throw new SigningPolicyStoreException(e);
}
}
Collection<SigningPolicy> policies = filePolicy.getSigningPolicies();
currentPolicyFileMap.put(uri, filePolicy);
if (policies != null) {
long now = System.currentTimeMillis();
for (SigningPolicy policy : policies) {
X500Principal caPrincipal = policy.getCASubjectDN();
policyMapToLoad.put(caPrincipal.getName(), policy);
String hash = CertificateIOUtil.nameHash(caPrincipal);
validPoliciesCache.put(hash, now);
}
}
}
}
| gbehrmann/JGlobus | ssl-proxies/src/main/java/org/globus/gsi/stores/ResourceSigningPolicyStore.java | Java | apache-2.0 | 8,094 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.plugin.iceberg;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.airlift.slice.Slice;
import io.prestosql.orc.OrcDataSink;
import io.prestosql.orc.OrcDataSource;
import io.prestosql.orc.OrcWriteValidation;
import io.prestosql.orc.OrcWriterOptions;
import io.prestosql.orc.OrcWriterStats;
import io.prestosql.orc.metadata.ColumnMetadata;
import io.prestosql.orc.metadata.CompressionKind;
import io.prestosql.orc.metadata.OrcColumnId;
import io.prestosql.orc.metadata.OrcType;
import io.prestosql.orc.metadata.statistics.ColumnStatistics;
import io.prestosql.orc.metadata.statistics.DateStatistics;
import io.prestosql.orc.metadata.statistics.DecimalStatistics;
import io.prestosql.orc.metadata.statistics.DoubleStatistics;
import io.prestosql.orc.metadata.statistics.IntegerStatistics;
import io.prestosql.orc.metadata.statistics.StringStatistics;
import io.prestosql.plugin.hive.orc.OrcFileWriter;
import io.prestosql.spi.type.Type;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.Schema;
import org.apache.iceberg.types.Conversions;
import org.apache.iceberg.types.Types;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.function.Supplier;
import static com.google.common.base.Verify.verify;
import static io.prestosql.orc.metadata.OrcColumnId.ROOT_COLUMN;
import static io.prestosql.plugin.hive.acid.AcidTransaction.NO_ACID_TRANSACTION;
import static io.prestosql.plugin.iceberg.TypeConverter.ORC_ICEBERG_ID_KEY;
import static java.lang.Math.toIntExact;
import static java.util.Objects.requireNonNull;
public class IcebergOrcFileWriter
extends OrcFileWriter
implements IcebergFileWriter
{
private final Schema icebergSchema;
private final ColumnMetadata<OrcType> orcColumns;
public IcebergOrcFileWriter(
Schema icebergSchema,
OrcDataSink orcDataSink,
Callable<Void> rollbackAction,
List<String> columnNames,
List<Type> fileColumnTypes,
ColumnMetadata<OrcType> fileColumnOrcTypes,
CompressionKind compression,
OrcWriterOptions options,
boolean writeLegacyVersion,
int[] fileInputColumnIndexes,
Map<String, String> metadata,
Optional<Supplier<OrcDataSource>> validationInputFactory,
OrcWriteValidation.OrcWriteValidationMode validationMode,
OrcWriterStats stats)
{
super(orcDataSink, NO_ACID_TRANSACTION, false, OptionalInt.empty(), rollbackAction, columnNames, fileColumnTypes, fileColumnOrcTypes, compression, options, writeLegacyVersion, fileInputColumnIndexes, metadata, validationInputFactory, validationMode, stats);
this.icebergSchema = requireNonNull(icebergSchema, "icebergSchema is null");
orcColumns = fileColumnOrcTypes;
}
@Override
public Metrics getMetrics()
{
return computeMetrics(icebergSchema, orcColumns, orcWriter.getFileRowCount(), orcWriter.getFileStats());
}
private static Metrics computeMetrics(Schema icebergSchema, ColumnMetadata<OrcType> orcColumns, long fileRowCount, Optional<ColumnMetadata<ColumnStatistics>> columnStatistics)
{
if (columnStatistics.isEmpty()) {
return new Metrics(fileRowCount, null, null, null, null, null);
}
// Columns that are descendants of LIST or MAP types are excluded because:
// 1. Their stats are not used by Apache Iceberg to filter out data files
// 2. Their record count can be larger than table-level row count. There's no good way to calculate nullCounts for them.
// See https://github.com/apache/iceberg/pull/199#discussion_r429443627
Set<OrcColumnId> excludedColumns = getExcludedColumns(orcColumns);
ImmutableMap.Builder<Integer, Long> valueCountsBuilder = ImmutableMap.builder();
ImmutableMap.Builder<Integer, Long> nullCountsBuilder = ImmutableMap.builder();
ImmutableMap.Builder<Integer, ByteBuffer> lowerBoundsBuilder = ImmutableMap.builder();
ImmutableMap.Builder<Integer, ByteBuffer> upperBoundsBuilder = ImmutableMap.builder();
// OrcColumnId(0) is the root column that represents file-level schema
for (int i = 1; i < orcColumns.size(); i++) {
OrcColumnId orcColumnId = new OrcColumnId(i);
if (excludedColumns.contains(orcColumnId)) {
continue;
}
OrcType orcColumn = orcColumns.get(orcColumnId);
ColumnStatistics orcColumnStats = columnStatistics.get().get(orcColumnId);
int icebergId = getIcebergId(orcColumn);
Types.NestedField icebergField = icebergSchema.findField(icebergId);
verify(icebergField != null, "Cannot find Iceberg column with ID %s in schema %s", icebergId, icebergSchema);
valueCountsBuilder.put(icebergId, fileRowCount);
if (orcColumnStats.hasNumberOfValues()) {
nullCountsBuilder.put(icebergId, fileRowCount - orcColumnStats.getNumberOfValues());
}
toIcebergMinMax(orcColumnStats, icebergField.type()).ifPresent(minMax -> {
lowerBoundsBuilder.put(icebergId, minMax.getMin());
upperBoundsBuilder.put(icebergId, minMax.getMax());
});
}
Map<Integer, Long> valueCounts = valueCountsBuilder.build();
Map<Integer, Long> nullCounts = nullCountsBuilder.build();
Map<Integer, ByteBuffer> lowerBounds = lowerBoundsBuilder.build();
Map<Integer, ByteBuffer> upperBounds = upperBoundsBuilder.build();
return new Metrics(
fileRowCount,
null, // TODO: Add column size accounting to ORC column writers
valueCounts.isEmpty() ? null : valueCounts,
nullCounts.isEmpty() ? null : nullCounts,
lowerBounds.isEmpty() ? null : lowerBounds,
upperBounds.isEmpty() ? null : upperBounds);
}
private static Set<OrcColumnId> getExcludedColumns(ColumnMetadata<OrcType> orcColumns)
{
ImmutableSet.Builder<OrcColumnId> excludedColumns = ImmutableSet.builder();
populateExcludedColumns(orcColumns, ROOT_COLUMN, false, excludedColumns);
return excludedColumns.build();
}
private static void populateExcludedColumns(ColumnMetadata<OrcType> orcColumns, OrcColumnId orcColumnId, boolean exclude, ImmutableSet.Builder<OrcColumnId> excludedColumns)
{
if (exclude) {
excludedColumns.add(orcColumnId);
}
OrcType orcColumn = orcColumns.get(orcColumnId);
switch (orcColumn.getOrcTypeKind()) {
case LIST:
case MAP:
for (OrcColumnId child : orcColumn.getFieldTypeIndexes()) {
populateExcludedColumns(orcColumns, child, true, excludedColumns);
}
return;
case STRUCT:
for (OrcColumnId child : orcColumn.getFieldTypeIndexes()) {
populateExcludedColumns(orcColumns, child, exclude, excludedColumns);
}
return;
}
}
private static int getIcebergId(OrcType orcColumn)
{
String icebergId = orcColumn.getAttributes().get(ORC_ICEBERG_ID_KEY);
verify(icebergId != null, "ORC column %s doesn't have an associated Iceberg ID", orcColumn);
return Integer.parseInt(icebergId);
}
private static Optional<IcebergMinMax> toIcebergMinMax(ColumnStatistics orcColumnStats, org.apache.iceberg.types.Type icebergType)
{
IntegerStatistics integerStatistics = orcColumnStats.getIntegerStatistics();
if (integerStatistics != null) {
Object min = integerStatistics.getMin();
Object max = integerStatistics.getMax();
if (min == null || max == null) {
return Optional.empty();
}
if (icebergType.typeId() == org.apache.iceberg.types.Type.TypeID.INTEGER) {
min = toIntExact((Long) min);
max = toIntExact((Long) max);
}
return Optional.of(new IcebergMinMax(icebergType, min, max));
}
DoubleStatistics doubleStatistics = orcColumnStats.getDoubleStatistics();
if (doubleStatistics != null) {
Object min = doubleStatistics.getMin();
Object max = doubleStatistics.getMax();
if (min == null || max == null) {
return Optional.empty();
}
if (icebergType.typeId() == org.apache.iceberg.types.Type.TypeID.FLOAT) {
min = ((Double) min).floatValue();
max = ((Double) max).floatValue();
}
return Optional.of(new IcebergMinMax(icebergType, min, max));
}
StringStatistics stringStatistics = orcColumnStats.getStringStatistics();
if (stringStatistics != null) {
Slice min = stringStatistics.getMin();
Slice max = stringStatistics.getMax();
if (min == null || max == null) {
return Optional.empty();
}
return Optional.of(new IcebergMinMax(icebergType, min.toStringUtf8(), max.toStringUtf8()));
}
DateStatistics dateStatistics = orcColumnStats.getDateStatistics();
if (dateStatistics != null) {
Integer min = dateStatistics.getMin();
Integer max = dateStatistics.getMax();
if (min == null || max == null) {
return Optional.empty();
}
return Optional.of(new IcebergMinMax(icebergType, min, max));
}
DecimalStatistics decimalStatistics = orcColumnStats.getDecimalStatistics();
if (decimalStatistics != null) {
BigDecimal min = decimalStatistics.getMin();
BigDecimal max = decimalStatistics.getMax();
if (min == null || max == null) {
return Optional.empty();
}
min = min.setScale(((Types.DecimalType) icebergType).scale());
max = max.setScale(((Types.DecimalType) icebergType).scale());
return Optional.of(new IcebergMinMax(icebergType, min, max));
}
return Optional.empty();
}
private static class IcebergMinMax
{
private ByteBuffer min;
private ByteBuffer max;
private IcebergMinMax(org.apache.iceberg.types.Type type, Object min, Object max)
{
this.min = Conversions.toByteBuffer(type, min);
this.max = Conversions.toByteBuffer(type, max);
}
public ByteBuffer getMin()
{
return min;
}
public ByteBuffer getMax()
{
return max;
}
}
}
| erichwang/presto | presto-iceberg/src/main/java/io/prestosql/plugin/iceberg/IcebergOrcFileWriter.java | Java | apache-2.0 | 11,546 |
/*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio.master.file;
import static alluxio.metrics.MetricInfo.UFS_OP_SAVED_PREFIX;
import alluxio.AlluxioURI;
import alluxio.ClientContext;
import alluxio.Constants;
import alluxio.Server;
import alluxio.client.job.JobMasterClient;
import alluxio.client.job.JobMasterClientPool;
import alluxio.clock.SystemClock;
import alluxio.collections.Pair;
import alluxio.collections.PrefixList;
import alluxio.conf.PropertyKey;
import alluxio.conf.ServerConfiguration;
import alluxio.exception.AccessControlException;
import alluxio.exception.AlluxioException;
import alluxio.exception.BlockInfoException;
import alluxio.exception.ConnectionFailedException;
import alluxio.exception.DirectoryNotEmptyException;
import alluxio.exception.ExceptionMessage;
import alluxio.exception.FileAlreadyCompletedException;
import alluxio.exception.FileAlreadyExistsException;
import alluxio.exception.FileDoesNotExistException;
import alluxio.exception.InvalidFileSizeException;
import alluxio.exception.InvalidPathException;
import alluxio.exception.PreconditionMessage;
import alluxio.exception.UnexpectedAlluxioException;
import alluxio.exception.status.FailedPreconditionException;
import alluxio.exception.status.InvalidArgumentException;
import alluxio.exception.status.NotFoundException;
import alluxio.exception.status.PermissionDeniedException;
import alluxio.exception.status.ResourceExhaustedException;
import alluxio.exception.status.UnavailableException;
import alluxio.file.options.DescendantType;
import alluxio.grpc.DeletePOptions;
import alluxio.grpc.FileSystemMasterCommonPOptions;
import alluxio.grpc.GrpcService;
import alluxio.grpc.GrpcUtils;
import alluxio.grpc.LoadDescendantPType;
import alluxio.grpc.LoadMetadataPOptions;
import alluxio.grpc.LoadMetadataPType;
import alluxio.grpc.MountPOptions;
import alluxio.grpc.ServiceType;
import alluxio.grpc.SetAclAction;
import alluxio.grpc.SetAttributePOptions;
import alluxio.grpc.TtlAction;
import alluxio.heartbeat.HeartbeatContext;
import alluxio.heartbeat.HeartbeatThread;
import alluxio.job.plan.persist.PersistConfig;
import alluxio.job.wire.JobInfo;
import alluxio.master.file.contexts.CallTracker;
import alluxio.master.CoreMaster;
import alluxio.master.CoreMasterContext;
import alluxio.master.ProtobufUtils;
import alluxio.master.audit.AsyncUserAccessAuditLogWriter;
import alluxio.master.audit.AuditContext;
import alluxio.master.block.BlockId;
import alluxio.master.block.BlockMaster;
import alluxio.master.file.activesync.ActiveSyncManager;
import alluxio.master.file.contexts.CheckAccessContext;
import alluxio.master.file.contexts.CheckConsistencyContext;
import alluxio.master.file.contexts.CompleteFileContext;
import alluxio.master.file.contexts.CreateDirectoryContext;
import alluxio.master.file.contexts.CreateFileContext;
import alluxio.master.file.contexts.DeleteContext;
import alluxio.master.file.contexts.FreeContext;
import alluxio.master.file.contexts.GetStatusContext;
import alluxio.master.file.contexts.InternalOperationContext;
import alluxio.master.file.contexts.ListStatusContext;
import alluxio.master.file.contexts.LoadMetadataContext;
import alluxio.master.file.contexts.MountContext;
import alluxio.master.file.contexts.OperationContext;
import alluxio.master.file.contexts.RenameContext;
import alluxio.master.file.contexts.ScheduleAsyncPersistenceContext;
import alluxio.master.file.contexts.SetAclContext;
import alluxio.master.file.contexts.SetAttributeContext;
import alluxio.master.file.contexts.WorkerHeartbeatContext;
import alluxio.master.file.meta.FileSystemMasterView;
import alluxio.master.file.meta.Inode;
import alluxio.master.file.meta.InodeDirectory;
import alluxio.master.file.meta.InodeDirectoryIdGenerator;
import alluxio.master.file.meta.InodeDirectoryView;
import alluxio.master.file.meta.InodeFile;
import alluxio.master.file.meta.InodeLockManager;
import alluxio.master.file.meta.InodePathPair;
import alluxio.master.file.meta.InodeTree;
import alluxio.master.file.meta.InodeTree.LockPattern;
import alluxio.master.file.meta.LockedInodePath;
import alluxio.master.file.meta.LockedInodePathList;
import alluxio.master.file.meta.LockingScheme;
import alluxio.master.file.meta.MountTable;
import alluxio.master.file.meta.PersistenceState;
import alluxio.master.file.meta.UfsAbsentPathCache;
import alluxio.master.file.meta.UfsBlockLocationCache;
import alluxio.master.file.meta.UfsSyncPathCache;
import alluxio.master.file.meta.options.MountInfo;
import alluxio.master.journal.DelegatingJournaled;
import alluxio.master.journal.JournalContext;
import alluxio.master.journal.Journaled;
import alluxio.master.journal.JournaledGroup;
import alluxio.master.journal.checkpoint.CheckpointName;
import alluxio.master.metastore.DelegatingReadOnlyInodeStore;
import alluxio.master.metastore.InodeStore;
import alluxio.master.metastore.ReadOnlyInodeStore;
import alluxio.master.metrics.TimeSeriesStore;
import alluxio.metrics.Metric;
import alluxio.metrics.MetricInfo;
import alluxio.metrics.MetricKey;
import alluxio.metrics.MetricsSystem;
import alluxio.metrics.TimeSeries;
import alluxio.proto.journal.File;
import alluxio.proto.journal.File.NewBlockEntry;
import alluxio.proto.journal.File.RenameEntry;
import alluxio.proto.journal.File.SetAclEntry;
import alluxio.proto.journal.File.UpdateInodeEntry;
import alluxio.proto.journal.File.UpdateInodeFileEntry;
import alluxio.proto.journal.File.UpdateInodeFileEntry.Builder;
import alluxio.proto.journal.Journal.JournalEntry;
import alluxio.resource.CloseableResource;
import alluxio.resource.LockResource;
import alluxio.retry.CountingRetry;
import alluxio.retry.RetryPolicy;
import alluxio.security.authentication.AuthType;
import alluxio.security.authentication.AuthenticatedClientUser;
import alluxio.security.authentication.ClientIpAddressInjector;
import alluxio.security.authorization.AclEntry;
import alluxio.security.authorization.AclEntryType;
import alluxio.security.authorization.Mode;
import alluxio.underfs.Fingerprint;
import alluxio.underfs.MasterUfsManager;
import alluxio.underfs.UfsManager;
import alluxio.underfs.UfsMode;
import alluxio.underfs.UfsStatus;
import alluxio.underfs.UnderFileSystem;
import alluxio.underfs.UnderFileSystemConfiguration;
import alluxio.util.CommonUtils;
import alluxio.util.IdUtils;
import alluxio.util.LogUtils;
import alluxio.util.ModeUtils;
import alluxio.util.SecurityUtils;
import alluxio.util.ThreadFactoryUtils;
import alluxio.util.UnderFileSystemUtils;
import alluxio.util.executor.ExecutorServiceFactories;
import alluxio.util.executor.ExecutorServiceFactory;
import alluxio.util.io.PathUtils;
import alluxio.util.proto.ProtoUtils;
import alluxio.wire.BlockInfo;
import alluxio.wire.BlockLocation;
import alluxio.wire.CommandType;
import alluxio.wire.FileBlockInfo;
import alluxio.wire.FileInfo;
import alluxio.wire.FileSystemCommand;
import alluxio.wire.FileSystemCommandOptions;
import alluxio.wire.MountPointInfo;
import alluxio.wire.PersistCommandOptions;
import alluxio.wire.PersistFile;
import alluxio.wire.SyncPointInfo;
import alluxio.wire.UfsInfo;
import alluxio.wire.WorkerInfo;
import alluxio.worker.job.JobMasterClientContext;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.MetricRegistry;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import io.grpc.ServerInterceptors;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.Stack;
import java.util.TreeMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.annotation.Nullable;
import javax.annotation.concurrent.NotThreadSafe;
/**
* The master that handles all file system metadata management.
*/
@NotThreadSafe // TODO(jiri): make thread-safe (c.f. ALLUXIO-1664)
public final class DefaultFileSystemMaster extends CoreMaster
implements FileSystemMaster, DelegatingJournaled {
private static final Logger LOG = LoggerFactory.getLogger(DefaultFileSystemMaster.class);
private static final Set<Class<? extends Server>> DEPS = ImmutableSet.of(BlockMaster.class);
/** The number of threads to use in the {@link #mPersistCheckerPool}. */
private static final int PERSIST_CHECKER_POOL_THREADS = 128;
/**
* Locking in DefaultFileSystemMaster
*
* Individual paths are locked in the inode tree. In order to read or write any inode, the path
* must be locked. A path is locked via one of the lock methods in {@link InodeTree}, such as
* {@link InodeTree#lockInodePath(AlluxioURI, LockMode)} or
* {@link InodeTree#lockFullInodePath(AlluxioURI, LockMode)}. These lock methods return
* an {@link LockedInodePath}, which represents a locked path of inodes. These locked paths
* ({@link LockedInodePath}) must be unlocked. In order to ensure a locked
* {@link LockedInodePath} is always unlocked, the following paradigm is recommended:
*
* <p><blockquote><pre>
* try (LockedInodePath inodePath = mInodeTree.lockInodePath(path, LockPattern.READ)) {
* ...
* }
* </pre></blockquote>
*
* When locking a path in the inode tree, it is possible that other concurrent operations have
* modified the inode tree while a thread is waiting to acquire a lock on the inode. Lock
* acquisitions throw {@link InvalidPathException} to indicate that the inode structure is no
* longer consistent with what the caller original expected, for example if the inode
* previously obtained at /pathA has been renamed to /pathB during the wait for the inode lock.
* Methods which specifically act on a path will propagate this exception to the caller, while
* methods which iterate over child nodes can safely ignore the exception and treat the inode
* as no longer a child.
*
* JournalContext, BlockDeletionContext, and RpcContext
*
* RpcContext is an aggregator for various contexts which get passed around through file system
* master methods.
*
* Currently there are two types of contexts that get passed around: {@link JournalContext} and
* {@link BlockDeletionContext}. These contexts are used to register work that should be done when
* the context closes. The journal context tracks journal entries which need to be flushed, while
* the block deletion context tracks which blocks need to be deleted in the {@link BlockMaster}.
*
* File system master journal entries should be written before blocks are deleted in the block
* master, so journal context should always be closed before block deletion context. In order to
* ensure that contexts are closed and closed in the right order, the following paradign is
* recommended:
*
* <p><blockquote><pre>
* try (RpcContext rpcContext = createRpcContext()) {
* // access journal context with rpcContext.getJournalContext()
* // access block deletion context with rpcContext.getBlockDeletionContext()
* ...
* }
* </pre></blockquote>
*
* When used in conjunction with {@link LockedInodePath} and {@link AuditContext}, the usage
* should look like
*
* <p><blockquote><pre>
* try (RpcContext rpcContext = createRpcContext();
* LockedInodePath inodePath = mInodeTree.lockInodePath(...);
* FileSystemMasterAuditContext auditContext = createAuditContext(...)) {
* ...
* }
* </pre></blockquote>
*
* NOTE: Because resources are released in the opposite order they are acquired, the
* {@link JournalContext}, {@link BlockDeletionContext}, or {@link RpcContext} resources should be
* always created before any {@link LockedInodePath} resources to avoid holding an inode path lock
* while waiting for journal IO.
*
* User access audit logging in the FileSystemMaster
*
* User accesses to file system metadata should be audited. The intent to write audit log and the
* actual writing of the audit log is decoupled so that operations are not holding metadata locks
* waiting on the audit log IO. In particular {@link AsyncUserAccessAuditLogWriter} uses a
* separate thread to perform actual audit log IO. In order for audit log entries to preserve
* the order of file system operations, the intention of auditing should be submitted to
* {@link AsyncUserAccessAuditLogWriter} while holding locks on the inode path. That said, the
* {@link AuditContext} resources should always live within the scope of {@link LockedInodePath},
* i.e. created after {@link LockedInodePath}. Otherwise, the order of audit log entries may not
* reflect the actual order of the user accesses.
* Resources are released in the opposite order they are acquired, the
* {@link AuditContext#close()} method is called before {@link LockedInodePath#close()}, thus
* guaranteeing the order.
*
* Method Conventions in the FileSystemMaster
*
* All of the flow of the FileSystemMaster follow a convention. There are essentially 4 main
* types of methods:
* (A) public api methods
* (B) private (or package private) internal methods
*
* (A) public api methods:
* These methods are public and are accessed by the RPC and REST APIs. These methods lock all
* the required paths, and also perform all permission checking.
* (A) cannot call (A)
* (A) can call (B)
*
* (B) private (or package private) internal methods:
* These methods perform the rest of the work. The names of these
* methods are suffixed by "Internal". These are typically called by the (A) methods.
* (B) cannot call (A)
* (B) can call (B)
*/
/** Handle to the block master. */
private final BlockMaster mBlockMaster;
/** This manages the file system inode structure. This must be journaled. */
private final InodeTree mInodeTree;
/** Store for holding inodes. */
private final ReadOnlyInodeStore mInodeStore;
/** This manages inode locking. */
private final InodeLockManager mInodeLockManager;
/** This manages the file system mount points. */
private final MountTable mMountTable;
/** This generates unique directory ids. This must be journaled. */
private final InodeDirectoryIdGenerator mDirectoryIdGenerator;
/** This checks user permissions on different operations. */
private final PermissionChecker mPermissionChecker;
/** List of paths to always keep in memory. */
private final PrefixList mWhitelist;
/** A pool of job master clients. */
private final JobMasterClientPool mJobMasterClientPool;
/** Set of file IDs to persist. */
private final Map<Long, alluxio.time.ExponentialTimer> mPersistRequests;
/** Map from file IDs to persist jobs. */
private final Map<Long, PersistJob> mPersistJobs;
/** The manager of all ufs. */
private final MasterUfsManager mUfsManager;
/** This caches absent paths in the UFS. */
private final UfsAbsentPathCache mUfsAbsentPathCache;
/** This caches block locations in the UFS. */
private final UfsBlockLocationCache mUfsBlockLocationCache;
/** This caches paths which have been synced with UFS. */
private final UfsSyncPathCache mUfsSyncPathCache;
/** The {@link JournaledGroup} representing all the subcomponents which require journaling. */
private final JournaledGroup mJournaledGroup;
/** List of strings which are blacklisted from async persist. */
private final List<String> mPersistBlacklist;
/** Thread pool which asynchronously handles the completion of persist jobs. */
private java.util.concurrent.ThreadPoolExecutor mPersistCheckerPool;
private ActiveSyncManager mSyncManager;
/** Log writer for user access audit log. */
private AsyncUserAccessAuditLogWriter mAsyncAuditLogWriter;
/** Stores the time series for various metrics which are exposed in the UI. */
private TimeSeriesStore mTimeSeriesStore;
private AccessTimeUpdater mAccessTimeUpdater;
/** Used to check pending/running backup from RPCs. */
private CallTracker mStateLockCallTracker;
final ThreadPoolExecutor mSyncPrefetchExecutor = new ThreadPoolExecutor(
ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_UFS_PREFETCH_POOL_SIZE),
ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_UFS_PREFETCH_POOL_SIZE),
1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(),
ThreadFactoryUtils.build("alluxio-ufs-sync-prefetch-%d", false));
final ThreadPoolExecutor mSyncMetadataExecutor = new ThreadPoolExecutor(
ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE),
ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE),
1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(),
ThreadFactoryUtils.build("alluxio-ufs-sync-%d", false));
final ThreadPoolExecutor mActiveSyncMetadataExecutor = new ThreadPoolExecutor(
ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE),
ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE),
1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(),
ThreadFactoryUtils.build("alluxio-ufs-active-sync-%d", false));
/**
* Creates a new instance of {@link DefaultFileSystemMaster}.
*
* @param blockMaster a block master handle
* @param masterContext the context for Alluxio master
*/
public DefaultFileSystemMaster(BlockMaster blockMaster, CoreMasterContext masterContext) {
this(blockMaster, masterContext,
ExecutorServiceFactories.cachedThreadPool(Constants.FILE_SYSTEM_MASTER_NAME));
}
/**
* Creates a new instance of {@link DefaultFileSystemMaster}.
*
* @param blockMaster a block master handle
* @param masterContext the context for Alluxio master
* @param executorServiceFactory a factory for creating the executor service to use for running
* maintenance threads
*/
public DefaultFileSystemMaster(BlockMaster blockMaster, CoreMasterContext masterContext,
ExecutorServiceFactory executorServiceFactory) {
super(masterContext, new SystemClock(), executorServiceFactory);
mBlockMaster = blockMaster;
mDirectoryIdGenerator = new InodeDirectoryIdGenerator(mBlockMaster);
mUfsManager = masterContext.getUfsManager();
mMountTable = new MountTable(mUfsManager, getRootMountInfo(mUfsManager));
mInodeLockManager = new InodeLockManager();
InodeStore inodeStore = masterContext.getInodeStoreFactory().apply(mInodeLockManager);
mInodeStore = new DelegatingReadOnlyInodeStore(inodeStore);
mInodeTree = new InodeTree(inodeStore, mBlockMaster,
mDirectoryIdGenerator, mMountTable, mInodeLockManager);
// TODO(gene): Handle default config value for whitelist.
mWhitelist = new PrefixList(ServerConfiguration.getList(PropertyKey.MASTER_WHITELIST, ","));
mPersistBlacklist = ServerConfiguration.isSet(PropertyKey.MASTER_PERSISTENCE_BLACKLIST)
? ServerConfiguration.getList(PropertyKey.MASTER_PERSISTENCE_BLACKLIST, ",")
: Collections.emptyList();
mStateLockCallTracker = new CallTracker() {
@Override
public boolean isCancelled() {
return masterContext.getStateLockManager().interruptCycleTicking();
}
@Override
public Type getType() {
return Type.STATE_LOCK_TRACKER;
}
};
mPermissionChecker = new DefaultPermissionChecker(mInodeTree);
mJobMasterClientPool = new JobMasterClientPool(JobMasterClientContext
.newBuilder(ClientContext.create(ServerConfiguration.global())).build());
mPersistRequests = new java.util.concurrent.ConcurrentHashMap<>();
mPersistJobs = new java.util.concurrent.ConcurrentHashMap<>();
mUfsAbsentPathCache = UfsAbsentPathCache.Factory.create(mMountTable);
mUfsBlockLocationCache = UfsBlockLocationCache.Factory.create(mMountTable);
mUfsSyncPathCache = new UfsSyncPathCache();
mSyncManager = new ActiveSyncManager(mMountTable, this);
mTimeSeriesStore = new TimeSeriesStore();
mAccessTimeUpdater = new AccessTimeUpdater(this, mInodeTree, masterContext.getJournalSystem());
// Sync executors should allow core threads to time out
mSyncPrefetchExecutor.allowCoreThreadTimeOut(true);
mSyncMetadataExecutor.allowCoreThreadTimeOut(true);
mActiveSyncMetadataExecutor.allowCoreThreadTimeOut(true);
// The mount table should come after the inode tree because restoring the mount table requires
// that the inode tree is already restored.
ArrayList<Journaled> journaledComponents = new ArrayList<Journaled>() {
{
add(mInodeTree);
add(mDirectoryIdGenerator);
add(mMountTable);
add(mUfsManager);
add(mSyncManager);
}
};
mJournaledGroup = new JournaledGroup(journaledComponents, CheckpointName.FILE_SYSTEM_MASTER);
resetState();
Metrics.registerGauges(this, mUfsManager);
}
private static MountInfo getRootMountInfo(MasterUfsManager ufsManager) {
try (CloseableResource<UnderFileSystem> resource = ufsManager.getRoot().acquireUfsResource()) {
boolean shared = resource.get().isObjectStorage()
&& ServerConfiguration.getBoolean(PropertyKey.UNDERFS_OBJECT_STORE_MOUNT_SHARED_PUBLICLY);
boolean readonly = ServerConfiguration.getBoolean(
PropertyKey.MASTER_MOUNT_TABLE_ROOT_READONLY);
String rootUfsUri = PathUtils.normalizePath(
ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS),
AlluxioURI.SEPARATOR);
Map<String, String> rootUfsConf =
ServerConfiguration.getNestedProperties(PropertyKey.MASTER_MOUNT_TABLE_ROOT_OPTION);
MountPOptions mountOptions = MountContext
.mergeFrom(MountPOptions.newBuilder().setShared(shared).setReadOnly(readonly)
.putAllProperties(rootUfsConf))
.getOptions().build();
return new MountInfo(new AlluxioURI(MountTable.ROOT),
new AlluxioURI(rootUfsUri), IdUtils.ROOT_MOUNT_ID, mountOptions);
}
}
@Override
public Map<ServiceType, GrpcService> getServices() {
Map<ServiceType, GrpcService> services = new HashMap<>();
services.put(ServiceType.FILE_SYSTEM_MASTER_CLIENT_SERVICE, new GrpcService(ServerInterceptors
.intercept(new FileSystemMasterClientServiceHandler(this), new ClientIpAddressInjector())));
services.put(ServiceType.FILE_SYSTEM_MASTER_JOB_SERVICE,
new GrpcService(new FileSystemMasterJobServiceHandler(this)));
services.put(ServiceType.FILE_SYSTEM_MASTER_WORKER_SERVICE,
new GrpcService(new FileSystemMasterWorkerServiceHandler(this)));
return services;
}
@Override
public String getName() {
return Constants.FILE_SYSTEM_MASTER_NAME;
}
@Override
public Set<Class<? extends Server>> getDependencies() {
return DEPS;
}
@Override
public Journaled getDelegate() {
return mJournaledGroup;
}
@Override
public void start(Boolean isPrimary) throws IOException {
super.start(isPrimary);
if (isPrimary) {
LOG.info("Starting fs master as primary");
InodeDirectory root = mInodeTree.getRoot();
if (root == null) {
try (JournalContext context = createJournalContext()) {
mInodeTree.initializeRoot(
SecurityUtils.getOwner(mMasterContext.getUserState()),
SecurityUtils.getGroup(mMasterContext.getUserState(), ServerConfiguration.global()),
ModeUtils.applyDirectoryUMask(Mode.createFullAccess(),
ServerConfiguration.get(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK)),
context);
}
} else if (!ServerConfiguration.getBoolean(PropertyKey.MASTER_SKIP_ROOT_ACL_CHECK)) {
// For backwards-compatibility:
// Empty root owner indicates that previously the master had no security. In this case, the
// master is allowed to be started with security turned on.
String serverOwner = SecurityUtils.getOwner(mMasterContext.getUserState());
if (SecurityUtils.isSecurityEnabled(ServerConfiguration.global())
&& !root.getOwner().isEmpty() && !root.getOwner().equals(serverOwner)) {
// user is not the previous owner
throw new PermissionDeniedException(ExceptionMessage.PERMISSION_DENIED.getMessage(String
.format("Unauthorized user on root. inode owner: %s current user: %s",
root.getOwner(), serverOwner)));
}
}
// Initialize the ufs manager from the mount table.
for (String key : mMountTable.getMountTable().keySet()) {
if (key.equals(MountTable.ROOT)) {
continue;
}
MountInfo mountInfo = mMountTable.getMountTable().get(key);
UnderFileSystemConfiguration ufsConf =
UnderFileSystemConfiguration.defaults(ServerConfiguration.global())
.createMountSpecificConf(mountInfo.getOptions().getPropertiesMap())
.setReadOnly(mountInfo.getOptions().getReadOnly())
.setShared(mountInfo.getOptions().getShared());
mUfsManager.addMount(mountInfo.getMountId(), mountInfo.getUfsUri(), ufsConf);
}
// Startup Checks and Periodic Threads.
// Rebuild the list of persist jobs (mPersistJobs) and map of pending persist requests
// (mPersistRequests)
long persistInitialIntervalMs =
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS);
long persistMaxIntervalMs =
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS);
long persistMaxWaitMs =
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS);
for (Long id : mInodeTree.getToBePersistedIds()) {
Inode inode = mInodeStore.get(id).get();
if (inode.isDirectory()
|| !inode.asFile().isCompleted() // When file is completed it is added to persist reqs
|| inode.getPersistenceState() != PersistenceState.TO_BE_PERSISTED
|| inode.asFile().getShouldPersistTime() == Constants.NO_AUTO_PERSIST) {
continue;
}
InodeFile inodeFile = inode.asFile();
if (inodeFile.getPersistJobId() == Constants.PERSISTENCE_INVALID_JOB_ID) {
mPersistRequests.put(inodeFile.getId(),
new alluxio.time.ExponentialTimer(
persistInitialIntervalMs,
persistMaxIntervalMs,
getPersistenceWaitTime(inodeFile.getShouldPersistTime()),
persistMaxWaitMs));
} else {
AlluxioURI path;
try {
path = mInodeTree.getPath(inodeFile);
} catch (FileDoesNotExistException e) {
LOG.error("Failed to determine path for inode with id {}", id, e);
continue;
}
addPersistJob(id, inodeFile.getPersistJobId(),
getPersistenceWaitTime(inodeFile.getShouldPersistTime()),
path, inodeFile.getTempUfsPath());
}
}
if (ServerConfiguration
.getBoolean(PropertyKey.MASTER_STARTUP_BLOCK_INTEGRITY_CHECK_ENABLED)) {
validateInodeBlocks(true);
}
int blockIntegrityCheckInterval = (int) ServerConfiguration
.getMs(PropertyKey.MASTER_PERIODIC_BLOCK_INTEGRITY_CHECK_INTERVAL);
if (blockIntegrityCheckInterval > 0) { // negative or zero interval implies disabled
getExecutorService().submit(
new HeartbeatThread(HeartbeatContext.MASTER_BLOCK_INTEGRITY_CHECK,
new BlockIntegrityChecker(this), blockIntegrityCheckInterval,
ServerConfiguration.global(), mMasterContext.getUserState()));
}
getExecutorService().submit(
new HeartbeatThread(HeartbeatContext.MASTER_TTL_CHECK,
new InodeTtlChecker(this, mInodeTree),
(int) ServerConfiguration.getMs(PropertyKey.MASTER_TTL_CHECKER_INTERVAL_MS),
ServerConfiguration.global(), mMasterContext.getUserState()));
getExecutorService().submit(
new HeartbeatThread(HeartbeatContext.MASTER_LOST_FILES_DETECTION,
new LostFileDetector(this, mInodeTree),
(int) ServerConfiguration.getMs(PropertyKey
.MASTER_LOST_WORKER_FILE_DETECTION_INTERVAL),
ServerConfiguration.global(), mMasterContext.getUserState()));
getExecutorService().submit(new HeartbeatThread(
HeartbeatContext.MASTER_REPLICATION_CHECK,
new alluxio.master.file.replication.ReplicationChecker(mInodeTree, mBlockMaster,
mSafeModeManager, mJobMasterClientPool),
(int) ServerConfiguration.getMs(PropertyKey.MASTER_REPLICATION_CHECK_INTERVAL_MS),
ServerConfiguration.global(), mMasterContext.getUserState()));
getExecutorService().submit(
new HeartbeatThread(HeartbeatContext.MASTER_PERSISTENCE_SCHEDULER,
new PersistenceScheduler(),
(int) ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_SCHEDULER_INTERVAL_MS),
ServerConfiguration.global(), mMasterContext.getUserState()));
mPersistCheckerPool =
new java.util.concurrent.ThreadPoolExecutor(PERSIST_CHECKER_POOL_THREADS,
PERSIST_CHECKER_POOL_THREADS, 1, java.util.concurrent.TimeUnit.MINUTES,
new LinkedBlockingQueue<Runnable>(),
alluxio.util.ThreadFactoryUtils.build("Persist-Checker-%d", true));
mPersistCheckerPool.allowCoreThreadTimeOut(true);
getExecutorService().submit(
new HeartbeatThread(HeartbeatContext.MASTER_PERSISTENCE_CHECKER,
new PersistenceChecker(),
(int) ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_CHECKER_INTERVAL_MS),
ServerConfiguration.global(), mMasterContext.getUserState()));
getExecutorService().submit(
new HeartbeatThread(HeartbeatContext.MASTER_METRICS_TIME_SERIES,
new TimeSeriesRecorder(),
(int) ServerConfiguration.getMs(PropertyKey.MASTER_METRICS_TIME_SERIES_INTERVAL),
ServerConfiguration.global(), mMasterContext.getUserState()));
if (ServerConfiguration.getBoolean(PropertyKey.MASTER_AUDIT_LOGGING_ENABLED)) {
mAsyncAuditLogWriter = new AsyncUserAccessAuditLogWriter();
mAsyncAuditLogWriter.start();
}
if (ServerConfiguration.getBoolean(PropertyKey.UNDERFS_CLEANUP_ENABLED)) {
getExecutorService().submit(
new HeartbeatThread(HeartbeatContext.MASTER_UFS_CLEANUP, new UfsCleaner(this),
(int) ServerConfiguration.getMs(PropertyKey.UNDERFS_CLEANUP_INTERVAL),
ServerConfiguration.global(), mMasterContext.getUserState()));
}
mAccessTimeUpdater.start();
mSyncManager.start();
}
}
@Override
public void stop() throws IOException {
if (mAsyncAuditLogWriter != null) {
mAsyncAuditLogWriter.stop();
mAsyncAuditLogWriter = null;
}
mSyncManager.stop();
mAccessTimeUpdater.stop();
super.stop();
}
@Override
public void close() throws IOException {
super.close();
mInodeTree.close();
mInodeLockManager.close();
try {
mSyncMetadataExecutor.shutdownNow();
mSyncMetadataExecutor.awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("Failed to wait for metadata sync executor to shut down.");
}
try {
mSyncPrefetchExecutor.shutdownNow();
mSyncPrefetchExecutor.awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("Failed to wait for ufs prefetch executor to shut down.");
}
try {
mActiveSyncMetadataExecutor.shutdownNow();
mActiveSyncMetadataExecutor.awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("Failed to wait for active sync executor to shut down.");
}
}
@Override
public void validateInodeBlocks(boolean repair) throws UnavailableException {
mBlockMaster.validateBlocks((blockId) -> {
long fileId = IdUtils.fileIdFromBlockId(blockId);
return mInodeTree.inodeIdExists(fileId);
}, repair);
}
@Override
public void cleanupUfs() {
for (Map.Entry<String, MountInfo> mountPoint : mMountTable.getMountTable().entrySet()) {
MountInfo info = mountPoint.getValue();
if (info.getOptions().getReadOnly()) {
continue;
}
try (CloseableResource<UnderFileSystem> ufsResource =
mUfsManager.get(info.getMountId()).acquireUfsResource()) {
ufsResource.get().cleanup();
} catch (UnavailableException | NotFoundException e) {
LOG.error("No UFS cached for {}", info, e);
} catch (IOException e) {
LOG.error("Failed in cleanup UFS {}.", info, e);
}
}
}
@Override
public long getFileId(AlluxioURI path) throws AccessControlException, UnavailableException {
return getFileIdInternal(path, true);
}
private long getFileIdInternal(AlluxioURI path, boolean checkPermission)
throws AccessControlException, UnavailableException {
try (RpcContext rpcContext = createRpcContext()) {
/*
In order to prevent locking twice on RPCs where metadata does _not_ need to be loaded, we use
a two-step scheme as an optimization to prevent the extra lock. loadMetadataIfNotExists
requires a lock on the tree to determine if the path should be loaded before executing. To
prevent the extra lock, we execute the RPC as normal and use a conditional check in the
main body of the function to determine whether control flow should be shifted out of the
RPC logic and back to the loadMetadataIfNotExists function.
If loadMetadataIfNotExists runs, then the next pass into the main logic body should
continue as normal. This may present a slight decrease in performance for newly-loaded
metadata, but it is better than affecting the most common case where metadata is not being
loaded.
*/
LoadMetadataContext lmCtx = LoadMetadataContext.mergeFrom(
LoadMetadataPOptions.newBuilder().setCreateAncestors(true));
boolean run = true;
boolean loadMetadata = false;
while (run) {
run = false;
if (loadMetadata) {
loadMetadataIfNotExist(rpcContext, path, lmCtx, false);
}
try (LockedInodePath inodePath = mInodeTree.lockInodePath(path, LockPattern.READ)) {
if (checkPermission) {
mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath);
}
if (!loadMetadata && shouldLoadMetadataIfNotExists(inodePath, lmCtx)) {
loadMetadata = true;
run = true;
continue;
}
mInodeTree.ensureFullInodePath(inodePath);
return inodePath.getInode().getId();
} catch (InvalidPathException | FileDoesNotExistException e) {
return IdUtils.INVALID_FILE_ID;
}
}
} catch (InvalidPathException e) {
return IdUtils.INVALID_FILE_ID;
}
return IdUtils.INVALID_FILE_ID;
}
@Override
public FileInfo getFileInfo(long fileId)
throws FileDoesNotExistException, AccessControlException, UnavailableException {
Metrics.GET_FILE_INFO_OPS.inc();
try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) {
return getFileInfoInternal(inodePath);
}
}
@Override
public FileInfo getFileInfo(AlluxioURI path, GetStatusContext context)
throws FileDoesNotExistException, InvalidPathException, AccessControlException, IOException {
Metrics.GET_FILE_INFO_OPS.inc();
boolean ufsAccessed = false;
long opTimeMs = System.currentTimeMillis();
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("getFileInfo", path, null, null)) {
if (syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(),
DescendantType.ONE, auditContext, LockedInodePath::getInodeOrNull,
(inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath),
true)) {
// If synced, do not load metadata.
context.getOptions().setLoadMetadataType(LoadMetadataPType.NEVER);
ufsAccessed = true;
}
LoadMetadataContext lmCtx = LoadMetadataContext.mergeFrom(
LoadMetadataPOptions.newBuilder().setCreateAncestors(true).setCommonOptions(
FileSystemMasterCommonPOptions.newBuilder()
.setTtl(context.getOptions().getCommonOptions().getTtl())
.setTtlAction(context.getOptions().getCommonOptions().getTtlAction())));
/*
See the comments in #getFileIdInternal for an explanation on why the loop here is required.
*/
boolean run = true;
boolean loadMetadata = false;
FileInfo ret = null;
while (run) {
run = false;
if (loadMetadata) {
checkLoadMetadataOptions(context.getOptions().getLoadMetadataType(), path);
loadMetadataIfNotExist(rpcContext, path, lmCtx, true);
ufsAccessed = true;
}
LockingScheme lockingScheme = new LockingScheme(path, LockPattern.READ, false);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) {
auditContext.setSrcInode(inodePath.getInodeOrNull());
try {
mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
if (!loadMetadata && shouldLoadMetadataIfNotExists(inodePath, lmCtx)) {
loadMetadata = true;
run = true;
continue;
}
ensureFullPathAndUpdateCache(inodePath);
FileInfo fileInfo = getFileInfoInternal(inodePath);
if (ufsAccessed) {
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
Metrics.getUfsCounter(mMountTable.getMountInfo(
resolution.getMountId()).getUfsUri().toString(),
Metrics.UFSOps.GET_FILE_INFO).dec();
}
Mode.Bits accessMode = Mode.Bits.fromProto(context.getOptions().getAccessMode());
if (context.getOptions().getUpdateTimestamps() && context.getOptions().hasAccessMode()
&& (accessMode.imply(Mode.Bits.READ) || accessMode.imply(Mode.Bits.WRITE))) {
mAccessTimeUpdater.updateAccessTime(rpcContext.getJournalContext(),
inodePath.getInode(), opTimeMs);
}
auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true);
ret = fileInfo;
}
}
return ret;
}
}
/**
* @param inodePath the {@link LockedInodePath} to get the {@link FileInfo} for
* @return the {@link FileInfo} for the given inode
*/
private FileInfo getFileInfoInternal(LockedInodePath inodePath)
throws FileDoesNotExistException, UnavailableException {
Inode inode = inodePath.getInode();
AlluxioURI uri = inodePath.getUri();
FileInfo fileInfo = inode.generateClientFileInfo(uri.toString());
if (fileInfo.isFolder()) {
fileInfo.setLength(inode.asDirectory().getChildCount());
}
fileInfo.setInMemoryPercentage(getInMemoryPercentage(inode));
fileInfo.setInAlluxioPercentage(getInAlluxioPercentage(inode));
if (inode.isFile()) {
try {
fileInfo.setFileBlockInfos(getFileBlockInfoListInternal(inodePath));
} catch (InvalidPathException e) {
throw new FileDoesNotExistException(e.getMessage(), e);
}
}
// Rehydrate missing block-infos for persisted files.
if (fileInfo.getBlockIds().size() > fileInfo.getFileBlockInfos().size()
&& inode.isPersisted()) {
List<Long> missingBlockIds = fileInfo.getBlockIds().stream()
.filter((bId) -> fileInfo.getFileBlockInfo(bId) != null).collect(Collectors.toList());
LOG.warn("BlockInfo missing for file: {}. BlockIdsWithMissingInfos: {}", inodePath.getUri(),
missingBlockIds.stream().map(Object::toString).collect(Collectors.joining(",")));
// Remove old block metadata from block-master before re-committing.
mBlockMaster.removeBlocks(fileInfo.getBlockIds(), true);
// Commit all the file blocks (without locations) so the metadata for the block exists.
commitBlockInfosForFile(
fileInfo.getBlockIds(), fileInfo.getLength(), fileInfo.getBlockSizeBytes());
// Reset file-block-info list with the new list.
try {
fileInfo.setFileBlockInfos(getFileBlockInfoListInternal(inodePath));
} catch (InvalidPathException e) {
throw new FileDoesNotExistException(
String.format("Hydration failed for file: %s", inodePath.getUri()), e);
}
}
fileInfo.setXAttr(inode.getXAttr());
MountTable.Resolution resolution;
try {
resolution = mMountTable.resolve(uri);
} catch (InvalidPathException e) {
throw new FileDoesNotExistException(e.getMessage(), e);
}
AlluxioURI resolvedUri = resolution.getUri();
fileInfo.setUfsPath(resolvedUri.toString());
fileInfo.setMountId(resolution.getMountId());
Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()).getUfsUri().toString(),
Metrics.UFSOps.GET_FILE_INFO).inc();
Metrics.FILE_INFOS_GOT.inc();
return fileInfo;
}
@Override
public PersistenceState getPersistenceState(long fileId) throws FileDoesNotExistException {
try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) {
return inodePath.getInode().getPersistenceState();
}
}
@Override
public void listStatus(AlluxioURI path, ListStatusContext context,
ResultStream<FileInfo> resultStream)
throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException {
Metrics.GET_FILE_INFO_OPS.inc();
LockingScheme lockingScheme = new LockingScheme(path, LockPattern.READ, false);
boolean ufsAccessed = false;
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("listStatus", path, null, null)) {
DescendantType descendantType =
context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.ONE;
if (syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), descendantType,
auditContext, LockedInodePath::getInodeOrNull,
(inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath))) {
// If synced, do not load metadata.
context.getOptions().setLoadMetadataType(LoadMetadataPType.NEVER);
ufsAccessed = true;
}
/*
See the comments in #getFileIdInternal for an explanation on why the loop here is required.
*/
DescendantType loadDescendantType;
if (context.getOptions().getLoadMetadataType() == LoadMetadataPType.NEVER) {
loadDescendantType = DescendantType.NONE;
} else if (context.getOptions().getRecursive()) {
loadDescendantType = DescendantType.ALL;
} else {
loadDescendantType = DescendantType.ONE;
}
// load metadata for 1 level of descendants, or all descendants if recursive
LoadMetadataContext loadMetadataContext = LoadMetadataContext.mergeFrom(
LoadMetadataPOptions.newBuilder().setCreateAncestors(true)
.setLoadDescendantType(GrpcUtils.toProto(loadDescendantType)).setCommonOptions(
FileSystemMasterCommonPOptions.newBuilder()
.setTtl(context.getOptions().getCommonOptions().getTtl())
.setTtlAction(context.getOptions().getCommonOptions().getTtlAction())));
boolean loadMetadata = false;
boolean run = true;
while (run) {
run = false;
if (loadMetadata) {
loadMetadataIfNotExist(rpcContext, path, loadMetadataContext, false);
ufsAccessed = true;
}
// We just synced; the new lock pattern should not sync.
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) {
auditContext.setSrcInode(inodePath.getInodeOrNull());
try {
mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
if (!loadMetadata) {
Inode inode;
boolean isLoaded = true;
if (inodePath.fullPathExists()) {
inode = inodePath.getInode();
if (inode.isDirectory()
&& context.getOptions().getLoadMetadataType() != LoadMetadataPType.ALWAYS) {
InodeDirectory inodeDirectory = inode.asDirectory();
isLoaded = inodeDirectory.isDirectChildrenLoaded();
if (context.getOptions().getRecursive()) {
isLoaded = areDescendantsLoaded(inodeDirectory);
}
if (isLoaded) {
// no need to load again.
loadMetadataContext.getOptions().setLoadDescendantType(LoadDescendantPType.NONE);
}
}
} else {
checkLoadMetadataOptions(context.getOptions().getLoadMetadataType(),
inodePath.getUri());
}
if (shouldLoadMetadataIfNotExists(inodePath, loadMetadataContext)) {
loadMetadata = true;
run = true;
continue;
}
}
ensureFullPathAndUpdateCache(inodePath);
auditContext.setSrcInode(inodePath.getInode());
if (context.getOptions().getResultsRequired()) {
DescendantType descendantTypeForListStatus =
(context.getOptions().getRecursive()) ? DescendantType.ALL : DescendantType.ONE;
listStatusInternal(context, rpcContext, inodePath, auditContext,
descendantTypeForListStatus, resultStream, 0);
}
auditContext.setSucceeded(true);
Metrics.FILE_INFOS_GOT.inc();
if (!ufsAccessed) {
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId())
.getUfsUri().toString(),
Metrics.UFSOps.LIST_STATUS).inc();
}
}
}
}
}
@Override
public List<FileInfo> listStatus(AlluxioURI path, ListStatusContext context)
throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException {
final List<FileInfo> fileInfos = new ArrayList<>();
listStatus(path, context, (item) -> fileInfos.add(item));
return fileInfos;
}
/**
* Lists the status of the path in {@link LockedInodePath}, possibly recursively depending on the
* descendantType. The result is returned via a list specified by statusList, in postorder
* traversal order.
*
* @param context call context
* @param rpcContext the context for the RPC call
* @param currInodePath the inode path to find the status
* @param auditContext the audit context to return any access exceptions
* @param descendantType if the currInodePath is a directory, how many levels of its descendant
* should be returned
* @param resultStream the stream to receive individual results
* @param depth internal use field for tracking depth relative to root item
*/
private void listStatusInternal(ListStatusContext context, RpcContext rpcContext,
LockedInodePath currInodePath, AuditContext auditContext, DescendantType descendantType,
ResultStream<FileInfo> resultStream, int depth) throws FileDoesNotExistException,
UnavailableException, AccessControlException, InvalidPathException {
rpcContext.throwIfCancelled();
Inode inode = currInodePath.getInode();
if (inode.isDirectory() && descendantType != DescendantType.NONE) {
try {
// TODO(david): Return the error message when we do not have permission
mPermissionChecker.checkPermission(Mode.Bits.EXECUTE, currInodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
if (descendantType == DescendantType.ALL) {
return;
} else {
throw e;
}
}
mAccessTimeUpdater.updateAccessTime(rpcContext.getJournalContext(), inode,
CommonUtils.getCurrentMs());
DescendantType nextDescendantType = (descendantType == DescendantType.ALL)
? DescendantType.ALL : DescendantType.NONE;
// This is to generate a parsed child path components to be passed to lockChildPath
String [] childComponentsHint = null;
for (Inode child : mInodeStore.getChildren(inode.asDirectory())) {
if (childComponentsHint == null) {
String[] parentComponents = PathUtils.getPathComponents(currInodePath.getUri().getPath());
childComponentsHint = new String[parentComponents.length + 1];
System.arraycopy(parentComponents, 0, childComponentsHint, 0, parentComponents.length);
}
// TODO(david): Make extending InodePath more efficient
childComponentsHint[childComponentsHint.length - 1] = child.getName();
try (LockedInodePath childInodePath =
currInodePath.lockChild(child, LockPattern.READ, childComponentsHint)) {
listStatusInternal(context, rpcContext, childInodePath, auditContext, nextDescendantType,
resultStream, depth + 1);
} catch (InvalidPathException | FileDoesNotExistException e) {
LOG.debug("Path \"{}\" is invalid, has been ignored.",
PathUtils.concatPath("/", childComponentsHint));
}
}
}
// Listing a directory should not emit item for the directory itself.
if (depth != 0 || inode.isFile()) {
resultStream.submit(getFileInfoInternal(currInodePath));
}
}
/**
* Checks the {@link LoadMetadataPType} to determine whether or not to proceed in loading
* metadata. This method assumes that the path does not exist in Alluxio namespace, and will
* throw an exception if metadata should not be loaded.
*
* @param loadMetadataType the {@link LoadMetadataPType} to check
* @param path the path that does not exist in Alluxio namespace (used for exception message)
*/
private void checkLoadMetadataOptions(LoadMetadataPType loadMetadataType, AlluxioURI path)
throws FileDoesNotExistException {
if (loadMetadataType == LoadMetadataPType.NEVER || (loadMetadataType == LoadMetadataPType.ONCE
&& mUfsAbsentPathCache.isAbsent(path))) {
throw new FileDoesNotExistException(ExceptionMessage.PATH_DOES_NOT_EXIST.getMessage(path));
}
}
private boolean areDescendantsLoaded(InodeDirectoryView inode) {
if (!inode.isDirectChildrenLoaded()) {
return false;
}
for (Inode child : mInodeStore.getChildren(inode)) {
if (child.isDirectory()) {
if (!areDescendantsLoaded(child.asDirectory())) {
return false;
}
}
}
return true;
}
/**
* Checks to see if the entire path exists in Alluxio. Updates the absent cache if it does not
* exist.
*
* @param inodePath the path to ensure
*/
private void ensureFullPathAndUpdateCache(LockedInodePath inodePath)
throws InvalidPathException, FileDoesNotExistException {
boolean exists = false;
try {
mInodeTree.ensureFullInodePath(inodePath);
exists = true;
} finally {
if (!exists) {
mUfsAbsentPathCache.process(inodePath.getUri(), inodePath.getInodeList());
}
}
}
@Override
public FileSystemMasterView getFileSystemMasterView() {
return new FileSystemMasterView(this);
}
@Override
public void checkAccess(AlluxioURI path, CheckAccessContext context)
throws FileDoesNotExistException, InvalidPathException, AccessControlException, IOException {
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("checkAccess", path, null, null)) {
Mode.Bits bits = Mode.Bits.fromProto(context.getOptions().getBits());
syncMetadata(rpcContext,
path,
context.getOptions().getCommonOptions(),
DescendantType.NONE,
auditContext,
LockedInodePath::getInodeOrNull,
(inodePath, permChecker) -> permChecker.checkPermission(bits, inodePath)
);
LockingScheme lockingScheme =
createLockingScheme(path, context.getOptions().getCommonOptions(),
LockPattern.READ);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) {
mPermissionChecker.checkPermission(bits, inodePath);
if (!inodePath.fullPathExists()) {
throw new FileDoesNotExistException(ExceptionMessage
.PATH_DOES_NOT_EXIST.getMessage(path));
}
auditContext.setSucceeded(true);
}
}
}
@Override
public List<AlluxioURI> checkConsistency(AlluxioURI path, CheckConsistencyContext context)
throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException {
List<AlluxioURI> inconsistentUris = new ArrayList<>();
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("checkConsistency", path, null, null)) {
syncMetadata(rpcContext,
path,
context.getOptions().getCommonOptions(),
DescendantType.ALL,
auditContext,
LockedInodePath::getInodeOrNull,
(inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath));
LockingScheme lockingScheme =
createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.READ);
try (LockedInodePath parent = mInodeTree.lockInodePath(
lockingScheme.getPath(), lockingScheme.getPattern())) {
auditContext.setSrcInode(parent.getInodeOrNull());
try {
mPermissionChecker.checkPermission(Mode.Bits.READ, parent);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
checkConsistencyRecursive(parent, inconsistentUris);
auditContext.setSucceeded(true);
}
}
return inconsistentUris;
}
private void checkConsistencyRecursive(LockedInodePath inodePath,
List<AlluxioURI> inconsistentUris) throws IOException, FileDoesNotExistException {
Inode inode = inodePath.getInode();
try {
if (!checkConsistencyInternal(inodePath)) {
inconsistentUris.add(inodePath.getUri());
}
if (inode.isDirectory()) {
InodeDirectory inodeDir = inode.asDirectory();
for (Inode child : mInodeStore.getChildren(inodeDir)) {
try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) {
checkConsistencyRecursive(childPath, inconsistentUris);
}
}
}
} catch (InvalidPathException e) {
LOG.debug("Path \"{}\" is invalid, has been ignored.",
PathUtils.concatPath(inodePath.getUri().getPath()));
}
}
/**
* Checks if a path is consistent between Alluxio and the underlying storage.
* <p>
* A path without a backing under storage is always consistent.
* <p>
* A not persisted path is considered consistent if:
* 1. It does not shadow an object in the underlying storage.
* <p>
* A persisted path is considered consistent if:
* 1. An equivalent object exists for its under storage path.
* 2. The metadata of the Alluxio and under storage object are equal.
*
* @param inodePath the path to check. This must exist and be read-locked
* @return true if the path is consistent, false otherwise
*/
private boolean checkConsistencyInternal(LockedInodePath inodePath) throws InvalidPathException,
IOException {
Inode inode;
try {
inode = inodePath.getInode();
} catch (FileDoesNotExistException e) {
throw new RuntimeException(e); // already checked existence when creating the inodePath
}
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
String ufsPath = resolution.getUri().getPath();
if (ufs == null) {
return true;
}
if (!inode.isPersisted()) {
return !ufs.exists(ufsPath);
}
UfsStatus ufsStatus;
try {
ufsStatus = ufs.getStatus(ufsPath);
} catch (FileNotFoundException e) {
return !inode.isPersisted();
}
// TODO(calvin): Evaluate which other metadata fields should be validated.
if (inode.isDirectory()) {
return ufsStatus.isDirectory();
} else {
String ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize();
return ufsStatus.isFile()
&& (ufsFingerprint.equals(inode.asFile().getUfsFingerprint()));
}
}
}
@Override
public void completeFile(AlluxioURI path, CompleteFileContext context)
throws BlockInfoException, FileDoesNotExistException, InvalidPathException,
InvalidFileSizeException, FileAlreadyCompletedException, AccessControlException,
UnavailableException {
Metrics.COMPLETE_FILE_OPS.inc();
// No need to syncMetadata before complete.
try (RpcContext rpcContext = createRpcContext(context);
LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE);
FileSystemMasterAuditContext auditContext =
createAuditContext("completeFile", path, null, inodePath.getInodeOrNull())) {
try {
mPermissionChecker.checkPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
// Even readonly mount points should be able to complete a file, for UFS reads in CACHE mode.
completeFileInternal(rpcContext, inodePath, context);
// Schedule async persistence if requested.
if (context.getOptions().hasAsyncPersistOptions()) {
scheduleAsyncPersistenceInternal(inodePath, ScheduleAsyncPersistenceContext
.create(context.getOptions().getAsyncPersistOptionsBuilder()), rpcContext);
}
auditContext.setSucceeded(true);
}
}
/**
* Completes a file. After a file is completed, it cannot be written to.
*
* @param rpcContext the rpc context
* @param inodePath the {@link LockedInodePath} to complete
* @param context the method context
*/
void completeFileInternal(RpcContext rpcContext, LockedInodePath inodePath,
CompleteFileContext context)
throws InvalidPathException, FileDoesNotExistException, BlockInfoException,
FileAlreadyCompletedException, InvalidFileSizeException, UnavailableException {
Inode inode = inodePath.getInode();
if (!inode.isFile()) {
throw new FileDoesNotExistException(
ExceptionMessage.PATH_MUST_BE_FILE.getMessage(inodePath.getUri()));
}
InodeFile fileInode = inode.asFile();
List<Long> blockIdList = fileInode.getBlockIds();
List<BlockInfo> blockInfoList = mBlockMaster.getBlockInfoList(blockIdList);
if (!fileInode.isPersisted() && blockInfoList.size() != blockIdList.size()) {
throw new BlockInfoException("Cannot complete a file without all the blocks committed");
}
// Iterate over all file blocks committed to Alluxio, computing the length and verify that all
// the blocks (except the last one) is the same size as the file block size.
long inAlluxioLength = 0;
long fileBlockSize = fileInode.getBlockSizeBytes();
for (int i = 0; i < blockInfoList.size(); i++) {
BlockInfo blockInfo = blockInfoList.get(i);
inAlluxioLength += blockInfo.getLength();
if (i < blockInfoList.size() - 1 && blockInfo.getLength() != fileBlockSize) {
throw new BlockInfoException(
"Block index " + i + " has a block size smaller than the file block size (" + fileInode
.getBlockSizeBytes() + ")");
}
}
// If the file is persisted, its length is determined by UFS. Otherwise, its length is
// determined by its size in Alluxio.
long length = fileInode.isPersisted() ? context.getOptions().getUfsLength() : inAlluxioLength;
String ufsFingerprint = Constants.INVALID_UFS_FINGERPRINT;
if (fileInode.isPersisted()) {
UfsStatus ufsStatus = context.getUfsStatus();
// Retrieve the UFS fingerprint for this file.
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
AlluxioURI resolvedUri = resolution.getUri();
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
if (ufsStatus == null) {
ufsFingerprint = ufs.getFingerprint(resolvedUri.toString());
} else {
ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize();
}
}
}
completeFileInternal(rpcContext, inodePath, length, context.getOperationTimeMs(),
ufsFingerprint);
}
/**
* @param rpcContext the rpc context
* @param inodePath the {@link LockedInodePath} to complete
* @param length the length to use
* @param opTimeMs the operation time (in milliseconds)
* @param ufsFingerprint the ufs fingerprint
*/
private void completeFileInternal(RpcContext rpcContext, LockedInodePath inodePath, long length,
long opTimeMs, String ufsFingerprint)
throws FileDoesNotExistException, InvalidPathException, InvalidFileSizeException,
FileAlreadyCompletedException, UnavailableException {
Preconditions.checkState(inodePath.getLockPattern().isWrite());
InodeFile inode = inodePath.getInodeFile();
if (inode.isCompleted() && inode.getLength() != Constants.UNKNOWN_SIZE) {
throw new FileAlreadyCompletedException("File " + getName() + " has already been completed.");
}
if (length < 0 && length != Constants.UNKNOWN_SIZE) {
throw new InvalidFileSizeException(
"File " + inode.getName() + " cannot have negative length: " + length);
}
Builder entry = UpdateInodeFileEntry.newBuilder()
.setId(inode.getId())
.setPath(inodePath.getUri().getPath())
.setCompleted(true)
.setLength(length);
if (length == Constants.UNKNOWN_SIZE) {
// TODO(gpang): allow unknown files to be multiple blocks.
// If the length of the file is unknown, only allow 1 block to the file.
length = inode.getBlockSizeBytes();
}
int sequenceNumber = 0;
long remainingBytes = length;
while (remainingBytes > 0) {
entry.addSetBlocks(BlockId.createBlockId(inode.getBlockContainerId(), sequenceNumber));
remainingBytes -= Math.min(remainingBytes, inode.getBlockSizeBytes());
sequenceNumber++;
}
if (inode.isPersisted()) {
// Commit all the file blocks (without locations) so the metadata for the block exists.
commitBlockInfosForFile(entry.getSetBlocksList(), length, inode.getBlockSizeBytes());
// The path exists in UFS, so it is no longer absent
mUfsAbsentPathCache.processExisting(inodePath.getUri());
}
// We could introduce a concept of composite entries, so that these two entries could
// be applied in a single call to applyAndJournal.
mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder()
.setId(inode.getId())
.setUfsFingerprint(ufsFingerprint)
.setLastModificationTimeMs(opTimeMs)
.setLastAccessTimeMs(opTimeMs)
.setOverwriteModificationTime(true)
.build());
mInodeTree.updateInodeFile(rpcContext, entry.build());
Metrics.FILES_COMPLETED.inc();
}
/**
* Commits blocks to BlockMaster for given block list.
*
* @param blockIds the list of block ids
* @param fileLength length of the file in bytes
* @param blockSize the block size in bytes
*/
private void commitBlockInfosForFile(List<Long> blockIds, long fileLength, long blockSize)
throws UnavailableException {
long currLength = fileLength;
for (long blockId : blockIds) {
long currentBlockSize = Math.min(currLength, blockSize);
mBlockMaster.commitBlockInUFS(blockId, currentBlockSize);
currLength -= currentBlockSize;
}
}
@Override
public FileInfo createFile(AlluxioURI path, CreateFileContext context)
throws AccessControlException, InvalidPathException, FileAlreadyExistsException,
BlockInfoException, IOException, FileDoesNotExistException {
Metrics.CREATE_FILES_OPS.inc();
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("createFile", path, null, null)) {
syncMetadata(rpcContext,
path,
context.getOptions().getCommonOptions(),
DescendantType.ONE,
auditContext,
(inodePath) -> context.getOptions().getRecursive()
? inodePath.getLastExistingInode() : inodePath.getParentInodeOrNull(),
(inodePath, permChecker) -> permChecker
.checkParentPermission(Mode.Bits.WRITE, inodePath));
LockingScheme lockingScheme =
createLockingScheme(path, context.getOptions().getCommonOptions(),
LockPattern.WRITE_EDGE);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) {
auditContext.setSrcInode(inodePath.getParentInodeOrNull());
if (context.getOptions().getRecursive()) {
auditContext.setSrcInode(inodePath.getLastExistingInode());
}
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
mMountTable.checkUnderWritableMountPoint(path);
if (context.isPersisted()) {
// Check if ufs is writable
checkUfsMode(path, OperationType.WRITE);
}
createFileInternal(rpcContext, inodePath, context);
auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true);
return getFileInfoInternal(inodePath);
}
}
}
/**
* @param rpcContext the rpc context
* @param inodePath the path to be created
* @param context the method context
* @return the list of created inodes
*/
List<Inode> createFileInternal(RpcContext rpcContext, LockedInodePath inodePath,
CreateFileContext context)
throws InvalidPathException, FileAlreadyExistsException, BlockInfoException, IOException,
FileDoesNotExistException {
if (mWhitelist.inList(inodePath.getUri().toString())) {
context.setCacheable(true);
}
// If the create succeeded, the list of created inodes will not be empty.
List<Inode> created = mInodeTree.createPath(rpcContext, inodePath, context);
if (context.isPersisted()) {
// The path exists in UFS, so it is no longer absent. The ancestors exist in UFS, but the
// actual file does not exist in UFS yet.
mUfsAbsentPathCache.processExisting(inodePath.getUri().getParent());
} else {
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId())
.getUfsUri().toString(), Metrics.UFSOps.CREATE_FILE).inc();
}
Metrics.FILES_CREATED.inc();
return created;
}
@Override
public long getNewBlockIdForFile(AlluxioURI path) throws FileDoesNotExistException,
InvalidPathException, AccessControlException, UnavailableException {
Metrics.GET_NEW_BLOCK_OPS.inc();
try (RpcContext rpcContext = createRpcContext();
LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE);
FileSystemMasterAuditContext auditContext =
createAuditContext("getNewBlockIdForFile", path, null, inodePath.getInodeOrNull())) {
try {
mPermissionChecker.checkPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
Metrics.NEW_BLOCKS_GOT.inc();
long blockId = mInodeTree.newBlock(rpcContext, NewBlockEntry.newBuilder()
.setId(inodePath.getInode().getId())
.build());
auditContext.setSucceeded(true);
return blockId;
}
}
@Override
public Map<String, MountPointInfo> getMountPointInfoSummary() {
SortedMap<String, MountPointInfo> mountPoints = new TreeMap<>();
for (Map.Entry<String, MountInfo> mountPoint : mMountTable.getMountTable().entrySet()) {
mountPoints.put(mountPoint.getKey(), getDisplayMountPointInfo(mountPoint.getValue()));
}
return mountPoints;
}
@Override
public MountPointInfo getDisplayMountPointInfo(AlluxioURI path) throws InvalidPathException {
if (!mMountTable.isMountPoint(path)) {
throw new InvalidPathException(
ExceptionMessage.PATH_MUST_BE_MOUNT_POINT.getMessage(path));
}
return getDisplayMountPointInfo(mMountTable.getMountTable().get(path.toString()));
}
/**
* Gets the mount point information for display from a mount information.
*
* @param mountInfo the mount information to transform
* @return the mount point information
*/
private MountPointInfo getDisplayMountPointInfo(MountInfo mountInfo) {
MountPointInfo info = mountInfo.toDisplayMountPointInfo();
try (CloseableResource<UnderFileSystem> ufsResource =
mUfsManager.get(mountInfo.getMountId()).acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
info.setUfsType(ufs.getUnderFSType());
try {
info.setUfsCapacityBytes(
ufs.getSpace(info.getUfsUri(), UnderFileSystem.SpaceType.SPACE_TOTAL));
} catch (IOException e) {
LOG.warn("Cannot get total capacity of {}", info.getUfsUri(), e);
}
try {
info.setUfsUsedBytes(
ufs.getSpace(info.getUfsUri(), UnderFileSystem.SpaceType.SPACE_USED));
} catch (IOException e) {
LOG.warn("Cannot get used capacity of {}", info.getUfsUri(), e);
}
} catch (UnavailableException | NotFoundException e) {
// We should never reach here
LOG.error("No UFS cached for {}", info, e);
}
return info;
}
@Override
public long getInodeCount() {
return mInodeTree.getInodeCount();
}
@Override
public int getNumberOfPinnedFiles() {
return mInodeTree.getPinnedSize();
}
@Override
public void delete(AlluxioURI path, DeleteContext context)
throws IOException, FileDoesNotExistException, DirectoryNotEmptyException,
InvalidPathException, AccessControlException {
Metrics.DELETE_PATHS_OPS.inc();
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("delete", path, null, null)) {
syncMetadata(rpcContext,
path,
context.getOptions().getCommonOptions(),
context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.ONE,
auditContext,
LockedInodePath::getInodeOrNull,
(inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath)
);
LockingScheme lockingScheme =
createLockingScheme(path, context.getOptions().getCommonOptions(),
LockPattern.WRITE_EDGE);
try (LockedInodePath inodePath = mInodeTree
.lockInodePath(lockingScheme)) {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
if (context.getOptions().getRecursive()) {
List<String> failedChildren = new ArrayList<>();
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath childPath : descendants) {
try {
mPermissionChecker.checkPermission(Mode.Bits.WRITE, childPath);
if (mMountTable.isMountPoint(childPath.getUri())) {
mMountTable.checkUnderWritableMountPoint(childPath.getUri());
}
} catch (AccessControlException e) {
failedChildren.add(e.getMessage());
}
}
if (failedChildren.size() > 0) {
throw new AccessControlException(ExceptionMessage.DELETE_FAILED_DIR_CHILDREN
.getMessage(path, StringUtils.join(failedChildren, ",")));
}
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
}
mMountTable.checkUnderWritableMountPoint(path);
if (!inodePath.fullPathExists()) {
throw new FileDoesNotExistException(ExceptionMessage.PATH_DOES_NOT_EXIST
.getMessage(path));
}
deleteInternal(rpcContext, inodePath, context);
auditContext.setSucceeded(true);
}
}
}
/**
* Implements file deletion.
* <p>
* This method does not delete blocks. Instead, it returns deleted inodes so that their blocks can
* be deleted after the inode deletion journal entry has been written. We cannot delete blocks
* earlier because the inode deletion may fail, leaving us with inode containing deleted blocks.
*
* @param rpcContext the rpc context
* @param inodePath the file {@link LockedInodePath}
* @param deleteContext the method optitions
*/
@VisibleForTesting
public void deleteInternal(RpcContext rpcContext, LockedInodePath inodePath,
DeleteContext deleteContext) throws FileDoesNotExistException, IOException,
DirectoryNotEmptyException, InvalidPathException {
Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE);
// TODO(jiri): A crash after any UFS object is deleted and before the delete operation is
// journaled will result in an inconsistency between Alluxio and UFS.
if (!inodePath.fullPathExists()) {
return;
}
long opTimeMs = System.currentTimeMillis();
Inode inode = inodePath.getInode();
if (inode == null) {
return;
}
boolean recursive = deleteContext.getOptions().getRecursive();
if (inode.isDirectory() && !recursive && mInodeStore.hasChildren(inode.asDirectory())) {
// inode is nonempty, and we don't want to delete a nonempty directory unless recursive is
// true
throw new DirectoryNotEmptyException(ExceptionMessage.DELETE_NONEMPTY_DIRECTORY_NONRECURSIVE,
inode.getName());
}
if (mInodeTree.isRootId(inode.getId())) {
// The root cannot be deleted.
throw new InvalidPathException(ExceptionMessage.DELETE_ROOT_DIRECTORY.getMessage());
}
// Inodes for which deletion will be attempted
List<Pair<AlluxioURI, LockedInodePath>> inodesToDelete = new ArrayList<>();
// Add root of sub-tree to delete
inodesToDelete.add(new Pair<>(inodePath.getUri(), inodePath));
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath childPath : descendants) {
inodesToDelete.add(new Pair<>(mInodeTree.getPath(childPath.getInode()), childPath));
}
// Prepare to delete persisted inodes
UfsDeleter ufsDeleter = NoopUfsDeleter.INSTANCE;
if (!deleteContext.getOptions().getAlluxioOnly()) {
ufsDeleter = new SafeUfsDeleter(mMountTable, mInodeStore, inodesToDelete,
deleteContext.getOptions().build());
}
// Inodes to delete from tree after attempting to delete from UFS
List<Pair<AlluxioURI, LockedInodePath>> revisedInodesToDelete = new ArrayList<>();
// Inodes that are not safe for recursive deletes
Set<Long> unsafeInodes = new HashSet<>();
// Alluxio URIs (and the reason for failure) which could not be deleted
List<Pair<String, String>> failedUris = new ArrayList<>();
// We go through each inode, removing it from its parent set and from mDelInodes. If it's a
// file, we deal with the checkpoints and blocks as well.
for (int i = inodesToDelete.size() - 1; i >= 0; i--) {
rpcContext.throwIfCancelled();
Pair<AlluxioURI, LockedInodePath> inodePairToDelete = inodesToDelete.get(i);
AlluxioURI alluxioUriToDelete = inodePairToDelete.getFirst();
Inode inodeToDelete = inodePairToDelete.getSecond().getInode();
String failureReason = null;
if (unsafeInodes.contains(inodeToDelete.getId())) {
failureReason = ExceptionMessage.DELETE_FAILED_DIR_NONEMPTY.getMessage();
} else if (inodeToDelete.isPersisted()) {
// If this is a mount point, we have deleted all the children and can unmount it
// TODO(calvin): Add tests (ALLUXIO-1831)
if (mMountTable.isMountPoint(alluxioUriToDelete)) {
mMountTable.delete(rpcContext, alluxioUriToDelete, true);
} else {
if (!deleteContext.getOptions().getAlluxioOnly()) {
try {
checkUfsMode(alluxioUriToDelete, OperationType.WRITE);
// Attempt to delete node if all children were deleted successfully
ufsDeleter.delete(alluxioUriToDelete, inodeToDelete);
} catch (AccessControlException e) {
// In case ufs is not writable, we will still attempt to delete other entries
// if any as they may be from a different mount point
LOG.warn(e.getMessage());
failureReason = e.getMessage();
} catch (IOException e) {
LOG.warn(e.getMessage());
failureReason = e.getMessage();
}
}
}
}
if (failureReason == null) {
if (inodeToDelete.isFile()) {
long fileId = inodeToDelete.getId();
// Remove the file from the set of files to persist.
mPersistRequests.remove(fileId);
// Cancel any ongoing jobs.
PersistJob job = mPersistJobs.get(fileId);
if (job != null) {
job.setCancelState(PersistJob.CancelState.TO_BE_CANCELED);
}
}
revisedInodesToDelete.add(new Pair<>(alluxioUriToDelete, inodePairToDelete.getSecond()));
} else {
unsafeInodes.add(inodeToDelete.getId());
// Propagate 'unsafe-ness' to parent as one of its descendants can't be deleted
unsafeInodes.add(inodeToDelete.getParentId());
failedUris.add(new Pair<>(alluxioUriToDelete.toString(), failureReason));
}
}
if (mSyncManager.isSyncPoint(inodePath.getUri())) {
mSyncManager.stopSyncAndJournal(RpcContext.NOOP, inodePath.getUri());
}
// Delete Inodes
for (Pair<AlluxioURI, LockedInodePath> delInodePair : revisedInodesToDelete) {
LockedInodePath tempInodePath = delInodePair.getSecond();
MountTable.Resolution resolution = mMountTable.resolve(tempInodePath.getUri());
mInodeTree.deleteInode(rpcContext, tempInodePath, opTimeMs);
if (deleteContext.getOptions().getAlluxioOnly()) {
Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId())
.getUfsUri().toString(), Metrics.UFSOps.DELETE_FILE).inc();
}
}
if (!failedUris.isEmpty()) {
Collection<String> messages = failedUris.stream()
.map(pair -> String.format("%s (%s)", pair.getFirst(), pair.getSecond()))
.collect(Collectors.toList());
throw new FailedPreconditionException(
ExceptionMessage.DELETE_FAILED_UFS.getMessage(StringUtils.join(messages, ", ")));
}
}
Metrics.PATHS_DELETED.inc(inodesToDelete.size());
}
@Override
public List<FileBlockInfo> getFileBlockInfoList(AlluxioURI path)
throws FileDoesNotExistException, InvalidPathException, AccessControlException,
UnavailableException {
Metrics.GET_FILE_BLOCK_INFO_OPS.inc();
try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.READ);
FileSystemMasterAuditContext auditContext =
createAuditContext("getFileBlockInfoList", path, null, inodePath.getInodeOrNull())) {
try {
mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
List<FileBlockInfo> ret = getFileBlockInfoListInternal(inodePath);
Metrics.FILE_BLOCK_INFOS_GOT.inc();
auditContext.setSucceeded(true);
return ret;
}
}
/**
* @param inodePath the {@link LockedInodePath} to get the info for
* @return a list of {@link FileBlockInfo} for all the blocks of the given inode
*/
private List<FileBlockInfo> getFileBlockInfoListInternal(LockedInodePath inodePath)
throws InvalidPathException, FileDoesNotExistException, UnavailableException {
InodeFile file = inodePath.getInodeFile();
List<BlockInfo> blockInfoList = mBlockMaster.getBlockInfoList(file.getBlockIds());
List<FileBlockInfo> ret = new ArrayList<>();
for (BlockInfo blockInfo : blockInfoList) {
ret.add(generateFileBlockInfo(inodePath, blockInfo));
}
return ret;
}
/**
* Generates a {@link FileBlockInfo} object from internal metadata. This adds file information to
* the block, such as the file offset, and additional UFS locations for the block.
*
* @param inodePath the file the block is a part of
* @param blockInfo the {@link BlockInfo} to generate the {@link FileBlockInfo} from
* @return a new {@link FileBlockInfo} for the block
*/
private FileBlockInfo generateFileBlockInfo(LockedInodePath inodePath, BlockInfo blockInfo)
throws FileDoesNotExistException {
InodeFile file = inodePath.getInodeFile();
FileBlockInfo fileBlockInfo = new FileBlockInfo();
fileBlockInfo.setBlockInfo(blockInfo);
fileBlockInfo.setUfsLocations(new ArrayList<>());
// The sequence number part of the block id is the block index.
long offset = file.getBlockSizeBytes() * BlockId.getSequenceNumber(blockInfo.getBlockId());
fileBlockInfo.setOffset(offset);
if (fileBlockInfo.getBlockInfo().getLocations().isEmpty() && file.isPersisted()) {
// No alluxio locations, but there is a checkpoint in the under storage system. Add the
// locations from the under storage system.
long blockId = fileBlockInfo.getBlockInfo().getBlockId();
List<String> locations = mUfsBlockLocationCache.get(blockId, inodePath.getUri(),
fileBlockInfo.getOffset());
if (locations != null) {
fileBlockInfo.setUfsLocations(locations);
}
}
return fileBlockInfo;
}
/**
* Returns whether the inodeFile is fully in Alluxio or not. The file is fully in Alluxio only if
* all the blocks of the file are in Alluxio, in other words, the in-Alluxio percentage is 100.
*
* @return true if the file is fully in Alluxio, false otherwise
*/
private boolean isFullyInAlluxio(InodeFile inode) throws UnavailableException {
return getInAlluxioPercentage(inode) == 100;
}
/**
* Returns whether the inodeFile is fully in memory or not. The file is fully in memory only if
* all the blocks of the file are in memory, in other words, the in-memory percentage is 100.
*
* @return true if the file is fully in Alluxio, false otherwise
*/
private boolean isFullyInMemory(InodeFile inode) throws UnavailableException {
return getInMemoryPercentage(inode) == 100;
}
@Override
public List<AlluxioURI> getInAlluxioFiles() throws UnavailableException {
List<AlluxioURI> files = new ArrayList<>();
LockedInodePath rootPath;
try {
rootPath =
mInodeTree.lockFullInodePath(new AlluxioURI(AlluxioURI.SEPARATOR), LockPattern.READ);
} catch (FileDoesNotExistException | InvalidPathException e) {
// Root should always exist.
throw new RuntimeException(e);
}
try (LockedInodePath inodePath = rootPath) {
getInAlluxioFilesInternal(inodePath, files);
}
return files;
}
@Override
public List<AlluxioURI> getInMemoryFiles() throws UnavailableException {
List<AlluxioURI> files = new ArrayList<>();
LockedInodePath rootPath;
try {
rootPath =
mInodeTree.lockFullInodePath(new AlluxioURI(AlluxioURI.SEPARATOR), LockPattern.READ);
} catch (FileDoesNotExistException | InvalidPathException e) {
// Root should always exist.
throw new RuntimeException(e);
}
try (LockedInodePath inodePath = rootPath) {
getInMemoryFilesInternal(inodePath, files);
}
return files;
}
/**
* Adds in-Alluxio files to the array list passed in. This method assumes the inode passed in is
* already read locked.
*
* @param inodePath the inode path to search
* @param files the list to accumulate the results in
*/
private void getInAlluxioFilesInternal(LockedInodePath inodePath, List<AlluxioURI> files)
throws UnavailableException {
Inode inode = inodePath.getInodeOrNull();
if (inode == null) {
return;
}
if (inode.isFile()) {
if (isFullyInAlluxio(inode.asFile())) {
files.add(inodePath.getUri());
}
} else {
// This inode is a directory.
for (Inode child : mInodeStore.getChildren(inode.asDirectory())) {
try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) {
getInAlluxioFilesInternal(childPath, files);
} catch (InvalidPathException e) {
// Inode is no longer a child, continue.
continue;
}
}
}
}
/**
* Adds in-memory files to the array list passed in. This method assumes the inode passed in is
* already read locked.
*
* @param inodePath the inode path to search
* @param files the list to accumulate the results in
*/
private void getInMemoryFilesInternal(LockedInodePath inodePath, List<AlluxioURI> files)
throws UnavailableException {
Inode inode = inodePath.getInodeOrNull();
if (inode == null) {
return;
}
if (inode.isFile()) {
if (isFullyInMemory(inode.asFile())) {
files.add(inodePath.getUri());
}
} else {
// This inode is a directory.
for (Inode child : mInodeStore.getChildren(inode.asDirectory())) {
try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) {
getInMemoryFilesInternal(childPath, files);
} catch (InvalidPathException e) {
// Inode is no longer a child, continue.
continue;
}
}
}
}
/**
* Gets the in-memory percentage of an Inode. For a file that has all blocks in Alluxio, it
* returns 100; for a file that has no block in memory, it returns 0. Returns 0 for a directory.
*
* @param inode the inode
* @return the in memory percentage
*/
private int getInMemoryPercentage(Inode inode) throws UnavailableException {
if (!inode.isFile()) {
return 0;
}
InodeFile inodeFile = inode.asFile();
long length = inodeFile.getLength();
if (length == 0) {
return 100;
}
long inMemoryLength = 0;
for (BlockInfo info : mBlockMaster.getBlockInfoList(inodeFile.getBlockIds())) {
if (isInTopStorageTier(info)) {
inMemoryLength += info.getLength();
}
}
return (int) (inMemoryLength * 100 / length);
}
/**
* Gets the in-Alluxio percentage of an Inode. For a file that has all blocks in Alluxio, it
* returns 100; for a file that has no block in Alluxio, it returns 0. Returns 0 for a directory.
*
* @param inode the inode
* @return the in alluxio percentage
*/
private int getInAlluxioPercentage(Inode inode) throws UnavailableException {
if (!inode.isFile()) {
return 0;
}
InodeFile inodeFile = inode.asFile();
long length = inodeFile.getLength();
if (length == 0) {
return 100;
}
long inAlluxioLength = 0;
for (BlockInfo info : mBlockMaster.getBlockInfoList(inodeFile.getBlockIds())) {
if (!info.getLocations().isEmpty()) {
inAlluxioLength += info.getLength();
}
}
return (int) (inAlluxioLength * 100 / length);
}
/**
* @return true if the given block is in the top storage level in some worker, false otherwise
*/
private boolean isInTopStorageTier(BlockInfo blockInfo) {
for (BlockLocation location : blockInfo.getLocations()) {
if (mBlockMaster.getGlobalStorageTierAssoc().getOrdinal(location.getTierAlias()) == 0) {
return true;
}
}
return false;
}
@Override
public long createDirectory(AlluxioURI path, CreateDirectoryContext context)
throws InvalidPathException, FileAlreadyExistsException, IOException, AccessControlException,
FileDoesNotExistException {
Metrics.CREATE_DIRECTORIES_OPS.inc();
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("mkdir", path, null, null)) {
syncMetadata(rpcContext,
path,
context.getOptions().getCommonOptions(),
DescendantType.ONE,
auditContext,
inodePath -> context.getOptions().getRecursive()
? inodePath.getLastExistingInode() : inodePath.getParentInodeOrNull(),
(inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath)
);
LockingScheme lockingScheme =
createLockingScheme(path, context.getOptions().getCommonOptions(),
LockPattern.WRITE_EDGE);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) {
auditContext.setSrcInode(inodePath.getParentInodeOrNull());
if (context.getOptions().getRecursive()) {
auditContext.setSrcInode(inodePath.getLastExistingInode());
}
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
mMountTable.checkUnderWritableMountPoint(path);
if (context.isPersisted()) {
checkUfsMode(path, OperationType.WRITE);
}
createDirectoryInternal(rpcContext, inodePath, context);
auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true);
return inodePath.getInode().getId();
}
}
}
/**
* Implementation of directory creation for a given path.
*
* @param rpcContext the rpc context
* @param inodePath the path of the directory
* @param context method context
* @return a list of created inodes
*/
List<Inode> createDirectoryInternal(RpcContext rpcContext, LockedInodePath inodePath,
CreateDirectoryContext context) throws InvalidPathException, FileAlreadyExistsException,
IOException, FileDoesNotExistException {
Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE);
try {
List<Inode> createResult = mInodeTree.createPath(rpcContext, inodePath, context);
InodeDirectory inodeDirectory = inodePath.getInode().asDirectory();
String ufsFingerprint = Constants.INVALID_UFS_FINGERPRINT;
if (inodeDirectory.isPersisted()) {
UfsStatus ufsStatus = context.getUfsStatus();
// Retrieve the UFS fingerprint for this file.
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
AlluxioURI resolvedUri = resolution.getUri();
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
if (ufsStatus == null) {
ufsFingerprint = ufs.getFingerprint(resolvedUri.toString());
} else {
ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize();
}
}
}
mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder()
.setId(inodeDirectory.getId())
.setUfsFingerprint(ufsFingerprint)
.build());
if (context.isPersisted()) {
// The path exists in UFS, so it is no longer absent.
mUfsAbsentPathCache.processExisting(inodePath.getUri());
}
Metrics.DIRECTORIES_CREATED.inc();
return createResult;
} catch (BlockInfoException e) {
// Since we are creating a directory, the block size is ignored, no such exception should
// happen.
throw new RuntimeException(e);
}
}
@Override
public void rename(AlluxioURI srcPath, AlluxioURI dstPath, RenameContext context)
throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException,
IOException, AccessControlException {
Metrics.RENAME_PATH_OPS.inc();
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("rename", srcPath, dstPath, null)) {
syncMetadata(rpcContext,
srcPath,
context.getOptions().getCommonOptions(),
DescendantType.ONE,
auditContext,
LockedInodePath::getParentInodeOrNull,
(inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath)
);
syncMetadata(rpcContext,
dstPath,
context.getOptions().getCommonOptions(),
DescendantType.ONE,
auditContext,
LockedInodePath::getParentInodeOrNull,
(inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath)
);
LockingScheme srcLockingScheme =
createLockingScheme(srcPath, context.getOptions().getCommonOptions(),
LockPattern.WRITE_EDGE);
LockingScheme dstLockingScheme =
createLockingScheme(dstPath, context.getOptions().getCommonOptions(),
LockPattern.WRITE_EDGE);
try (InodePathPair inodePathPair = mInodeTree
.lockInodePathPair(srcLockingScheme.getPath(), srcLockingScheme.getPattern(),
dstLockingScheme.getPath(), dstLockingScheme.getPattern())) {
LockedInodePath srcInodePath = inodePathPair.getFirst();
LockedInodePath dstInodePath = inodePathPair.getSecond();
auditContext.setSrcInode(srcInodePath.getParentInodeOrNull());
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, srcInodePath);
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, dstInodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
mMountTable.checkUnderWritableMountPoint(srcPath);
mMountTable.checkUnderWritableMountPoint(dstPath);
renameInternal(rpcContext, srcInodePath, dstInodePath, context);
auditContext.setSrcInode(srcInodePath.getInode()).setSucceeded(true);
LOG.debug("Renamed {} to {}", srcPath, dstPath);
}
}
}
private boolean shouldPersistPath(String path) {
for (String pattern : mPersistBlacklist) {
if (path.contains(pattern)) {
LOG.debug("Not persisting path {} because it is in {}: {}", path,
PropertyKey.Name.MASTER_PERSISTENCE_BLACKLIST, mPersistBlacklist);
return false;
}
}
return true;
}
/**
* Renames a file to a destination.
*
* @param rpcContext the rpc context
* @param srcInodePath the source path to rename
* @param dstInodePath the destination path to rename the file to
* @param context method options
*/
private void renameInternal(RpcContext rpcContext, LockedInodePath srcInodePath,
LockedInodePath dstInodePath, RenameContext context) throws InvalidPathException,
FileDoesNotExistException, FileAlreadyExistsException, IOException, AccessControlException {
if (!srcInodePath.fullPathExists()) {
throw new FileDoesNotExistException(
ExceptionMessage.PATH_DOES_NOT_EXIST.getMessage(srcInodePath.getUri()));
}
Inode srcInode = srcInodePath.getInode();
// Renaming path to itself is a no-op.
if (srcInodePath.getUri().equals(dstInodePath.getUri())) {
return;
}
// Renaming the root is not allowed.
if (srcInodePath.getUri().isRoot()) {
throw new InvalidPathException(ExceptionMessage.ROOT_CANNOT_BE_RENAMED.getMessage());
}
if (dstInodePath.getUri().isRoot()) {
throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_TO_ROOT.getMessage());
}
// Renaming across mount points is not allowed.
String srcMount = mMountTable.getMountPoint(srcInodePath.getUri());
String dstMount = mMountTable.getMountPoint(dstInodePath.getUri());
if ((srcMount == null && dstMount != null) || (srcMount != null && dstMount == null) || (
srcMount != null && dstMount != null && !srcMount.equals(dstMount))) {
throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_ACROSS_MOUNTS
.getMessage(srcInodePath.getUri(), dstInodePath.getUri()));
}
// Renaming onto a mount point is not allowed.
if (mMountTable.isMountPoint(dstInodePath.getUri())) {
throw new InvalidPathException(
ExceptionMessage.RENAME_CANNOT_BE_ONTO_MOUNT_POINT.getMessage(dstInodePath.getUri()));
}
// Renaming a path to one of its subpaths is not allowed. Check for that, by making sure
// srcComponents isn't a prefix of dstComponents.
if (PathUtils.hasPrefix(dstInodePath.getUri().getPath(), srcInodePath.getUri().getPath())) {
throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_TO_SUBDIRECTORY
.getMessage(srcInodePath.getUri(), dstInodePath.getUri()));
}
// Get the inodes of the src and dst parents.
Inode srcParentInode = srcInodePath.getParentInodeDirectory();
if (!srcParentInode.isDirectory()) {
throw new InvalidPathException(
ExceptionMessage.PATH_MUST_HAVE_VALID_PARENT.getMessage(srcInodePath.getUri()));
}
Inode dstParentInode = dstInodePath.getParentInodeDirectory();
if (!dstParentInode.isDirectory()) {
throw new InvalidPathException(
ExceptionMessage.PATH_MUST_HAVE_VALID_PARENT.getMessage(dstInodePath.getUri()));
}
// Make sure destination path does not exist
if (dstInodePath.fullPathExists()) {
throw new FileAlreadyExistsException(String
.format("Cannot rename because destination already exists. src: %s dst: %s",
srcInodePath.getUri(), dstInodePath.getUri()));
}
// Now we remove srcInode from its parent and insert it into dstPath's parent
renameInternal(rpcContext, srcInodePath, dstInodePath, false, context);
// Check options and determine if we should schedule async persist. This is helpful for compute
// frameworks that use rename as a commit operation.
if (context.getPersist() && srcInode.isFile() && !srcInode.isPersisted()
&& shouldPersistPath(dstInodePath.toString())) {
LOG.debug("Schedule Async Persist on rename for File {}", srcInodePath);
mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder()
.setId(srcInode.getId())
.setPersistenceState(PersistenceState.TO_BE_PERSISTED.name())
.build());
long shouldPersistTime = srcInode.asFile().getShouldPersistTime();
long persistenceWaitTime = shouldPersistTime == Constants.NO_AUTO_PERSIST ? 0
: getPersistenceWaitTime(shouldPersistTime);
mPersistRequests.put(srcInode.getId(), new alluxio.time.ExponentialTimer(
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS),
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS),
persistenceWaitTime,
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS)));
}
// If a directory is being renamed with persist on rename, attempt to persist children
if (srcInode.isDirectory() && context.getPersist()
&& shouldPersistPath(dstInodePath.toString())) {
LOG.debug("Schedule Async Persist on rename for Dir: {}", dstInodePath);
try (LockedInodePathList descendants = mInodeTree.getDescendants(srcInodePath)) {
for (LockedInodePath childPath : descendants) {
Inode childInode = childPath.getInode();
// TODO(apc999): Resolve the child path legitimately
if (childInode.isFile() && !childInode.isPersisted()
&& shouldPersistPath(
childPath.toString().substring(srcInodePath.toString().length()))) {
LOG.debug("Schedule Async Persist on rename for Child File: {}", childPath);
mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder()
.setId(childInode.getId())
.setPersistenceState(PersistenceState.TO_BE_PERSISTED.name())
.build());
long shouldPersistTime = childInode.asFile().getShouldPersistTime();
long persistenceWaitTime = shouldPersistTime == Constants.NO_AUTO_PERSIST ? 0
: getPersistenceWaitTime(shouldPersistTime);
mPersistRequests.put(childInode.getId(), new alluxio.time.ExponentialTimer(
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS),
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS),
persistenceWaitTime,
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS)));
}
}
}
}
}
/**
* Implements renaming.
*
* @param rpcContext the rpc context
* @param srcInodePath the path of the rename source
* @param dstInodePath the path to the rename destination
* @param replayed whether the operation is a result of replaying the journal
* @param context method options
*/
private void renameInternal(RpcContext rpcContext, LockedInodePath srcInodePath,
LockedInodePath dstInodePath, boolean replayed, RenameContext context)
throws FileDoesNotExistException, InvalidPathException, IOException, AccessControlException {
// Rename logic:
// 1. Change the source inode name to the destination name.
// 2. Insert the source inode into the destination parent.
// 3. Do UFS operations if necessary.
// 4. Remove the source inode (reverting the name) from the source parent.
// 5. Set the last modification times for both source and destination parent inodes.
Inode srcInode = srcInodePath.getInode();
AlluxioURI srcPath = srcInodePath.getUri();
AlluxioURI dstPath = dstInodePath.getUri();
InodeDirectory srcParentInode = srcInodePath.getParentInodeDirectory();
InodeDirectory dstParentInode = dstInodePath.getParentInodeDirectory();
String srcName = srcPath.getName();
String dstName = dstPath.getName();
LOG.debug("Renaming {} to {}", srcPath, dstPath);
if (dstInodePath.fullPathExists()) {
throw new InvalidPathException("Destination path: " + dstPath + " already exists.");
}
mInodeTree.rename(rpcContext, RenameEntry.newBuilder()
.setId(srcInode.getId())
.setOpTimeMs(context.getOperationTimeMs())
.setNewParentId(dstParentInode.getId())
.setNewName(dstName)
.setPath(srcPath.getPath())
.setNewPath(dstPath.getPath())
.build());
// 3. Do UFS operations if necessary.
// If the source file is persisted, rename it in the UFS.
try {
if (!replayed && srcInode.isPersisted()) {
// Check if ufs is writable
checkUfsMode(srcPath, OperationType.WRITE);
checkUfsMode(dstPath, OperationType.WRITE);
MountTable.Resolution resolution = mMountTable.resolve(srcPath);
// Persist ancestor directories from top to the bottom. We cannot use recursive create
// parents here because the permission for the ancestors can be different.
// inodes from the same mount point as the dst
Stack<InodeDirectory> sameMountDirs = new Stack<>();
List<Inode> dstInodeList = dstInodePath.getInodeList();
for (int i = dstInodeList.size() - 1; i >= 0; i--) {
// Since dstInodePath is guaranteed not to be a full path, all inodes in the incomplete
// path are guaranteed to be a directory.
InodeDirectory dir = dstInodeList.get(i).asDirectory();
sameMountDirs.push(dir);
if (dir.isMountPoint()) {
break;
}
}
while (!sameMountDirs.empty()) {
InodeDirectory dir = sameMountDirs.pop();
if (!dir.isPersisted()) {
mInodeTree.syncPersistExistingDirectory(rpcContext, dir);
}
}
String ufsSrcPath = resolution.getUri().toString();
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
String ufsDstUri = mMountTable.resolve(dstPath).getUri().toString();
boolean success;
if (srcInode.isFile()) {
success = ufs.renameRenamableFile(ufsSrcPath, ufsDstUri);
} else {
success = ufs.renameRenamableDirectory(ufsSrcPath, ufsDstUri);
}
if (!success) {
throw new IOException(
ExceptionMessage.FAILED_UFS_RENAME.getMessage(ufsSrcPath, ufsDstUri));
}
}
// The destination was persisted in ufs.
mUfsAbsentPathCache.processExisting(dstPath);
}
} catch (Throwable t) {
// On failure, revert changes and throw exception.
mInodeTree.rename(rpcContext, RenameEntry.newBuilder()
.setId(srcInode.getId())
.setOpTimeMs(context.getOperationTimeMs())
.setNewName(srcName)
.setNewParentId(srcParentInode.getId())
.setPath(dstPath.getPath())
.setNewPath(srcPath.getPath())
.build());
throw t;
}
Metrics.PATHS_RENAMED.inc();
}
/**
* Propagates the persisted status to all parents of the given inode in the same mount partition.
*
* @param journalContext the journal context
* @param inodePath the inode to start the propagation at
* @return list of inodes which were marked as persisted
*/
private void propagatePersistedInternal(Supplier<JournalContext> journalContext,
LockedInodePath inodePath) throws FileDoesNotExistException {
Inode inode = inodePath.getInode();
List<Inode> inodes = inodePath.getInodeList();
// Traverse the inodes from target inode to the root.
Collections.reverse(inodes);
// Skip the first, to not examine the target inode itself.
inodes = inodes.subList(1, inodes.size());
List<Inode> persistedInodes = new ArrayList<>();
for (Inode ancestor : inodes) {
// the path is already locked.
AlluxioURI path = mInodeTree.getPath(ancestor);
if (mMountTable.isMountPoint(path)) {
// Stop propagating the persisted status at mount points.
break;
}
if (ancestor.isPersisted()) {
// Stop if a persisted directory is encountered.
break;
}
mInodeTree.updateInode(journalContext, UpdateInodeEntry.newBuilder()
.setId(ancestor.getId())
.setPersistenceState(PersistenceState.PERSISTED.name())
.build());
}
}
@Override
public void free(AlluxioURI path, FreeContext context)
throws FileDoesNotExistException, InvalidPathException, AccessControlException,
UnexpectedAlluxioException, IOException {
Metrics.FREE_FILE_OPS.inc();
// No need to syncMetadata before free.
try (RpcContext rpcContext = createRpcContext(context);
LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE);
FileSystemMasterAuditContext auditContext =
createAuditContext("free", path, null, inodePath.getInodeOrNull())) {
try {
mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
freeInternal(rpcContext, inodePath, context);
auditContext.setSucceeded(true);
}
}
/**
* Implements free operation.
*
* @param rpcContext the rpc context
* @param inodePath inode of the path to free
* @param context context to free method
*/
private void freeInternal(RpcContext rpcContext, LockedInodePath inodePath, FreeContext context)
throws FileDoesNotExistException, UnexpectedAlluxioException,
IOException, InvalidPathException, AccessControlException {
Inode inode = inodePath.getInode();
if (inode.isDirectory() && !context.getOptions().getRecursive()
&& mInodeStore.hasChildren(inode.asDirectory())) {
// inode is nonempty, and we don't free a nonempty directory unless recursive is true
throw new UnexpectedAlluxioException(
ExceptionMessage.CANNOT_FREE_NON_EMPTY_DIR.getMessage(mInodeTree.getPath(inode)));
}
long opTimeMs = System.currentTimeMillis();
List<Inode> freeInodes = new ArrayList<>();
freeInodes.add(inode);
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath descedant : Iterables.concat(descendants,
Collections.singleton(inodePath))) {
Inode freeInode = descedant.getInodeOrNull();
if (freeInode != null && freeInode.isFile()) {
if (freeInode.getPersistenceState() != PersistenceState.PERSISTED) {
throw new UnexpectedAlluxioException(ExceptionMessage.CANNOT_FREE_NON_PERSISTED_FILE
.getMessage(mInodeTree.getPath(freeInode)));
}
if (freeInode.isPinned()) {
if (!context.getOptions().getForced()) {
throw new UnexpectedAlluxioException(ExceptionMessage.CANNOT_FREE_PINNED_FILE
.getMessage(mInodeTree.getPath(freeInode)));
}
SetAttributeContext setAttributeContext = SetAttributeContext
.mergeFrom(SetAttributePOptions.newBuilder().setRecursive(false).setPinned(false));
setAttributeSingleFile(rpcContext, descedant, true, opTimeMs, setAttributeContext);
}
// Remove corresponding blocks from workers.
mBlockMaster.removeBlocks(freeInode.asFile().getBlockIds(), false /* delete */);
}
}
}
Metrics.FILES_FREED.inc(freeInodes.size());
}
@Override
public AlluxioURI getPath(long fileId) throws FileDoesNotExistException {
try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) {
// the path is already locked.
return mInodeTree.getPath(inodePath.getInode());
}
}
@Override
public Set<Long> getPinIdList() {
// return both the explicitly pinned inodes and not persisted inodes which should not be evicted
return Sets.union(mInodeTree.getPinIdSet(), mInodeTree.getToBePersistedIds());
}
@Override
public String getUfsAddress() {
return ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS);
}
@Override
public UfsInfo getUfsInfo(long mountId) {
MountInfo info = mMountTable.getMountInfo(mountId);
if (info == null) {
return new UfsInfo();
}
MountPOptions options = info.getOptions();
return new UfsInfo().setUri(info.getUfsUri())
.setMountOptions(MountContext
.mergeFrom(MountPOptions.newBuilder().putAllProperties(options.getPropertiesMap())
.setReadOnly(options.getReadOnly()).setShared(options.getShared()))
.getOptions().build());
}
@Override
public List<String> getWhiteList() {
return mWhitelist.getList();
}
@Override
public List<Long> getLostFiles() {
Set<Long> lostFiles = new HashSet<>();
for (long blockId : mBlockMaster.getLostBlocks()) {
// the file id is the container id of the block id
long containerId = BlockId.getContainerId(blockId);
long fileId = IdUtils.createFileId(containerId);
lostFiles.add(fileId);
}
return new ArrayList<>(lostFiles);
}
/**
* Loads metadata for the path if it is (non-existing || load direct children is set).
*
* See {@link #shouldLoadMetadataIfNotExists(LockedInodePath, LoadMetadataContext)}.
*
* @param rpcContext the rpc context
* @param path the path to load metadata for
* @param context the {@link LoadMetadataContext}
* @param isGetFileInfo whether this is loading for a {@link #getFileInfo} call
*/
private void loadMetadataIfNotExist(RpcContext rpcContext, AlluxioURI path,
LoadMetadataContext context, boolean isGetFileInfo)
throws InvalidPathException, AccessControlException {
DescendantType syncDescendantType =
GrpcUtils.fromProto(context.getOptions().getLoadDescendantType());
FileSystemMasterCommonPOptions commonOptions =
context.getOptions().getCommonOptions();
// load metadata only and force sync
InodeSyncStream sync = new InodeSyncStream(new LockingScheme(path, LockPattern.READ, false),
this, rpcContext, syncDescendantType, commonOptions, isGetFileInfo, true, true);
if (!sync.sync()) {
LOG.debug("Failed to load metadata for path from UFS: {}", path);
}
}
boolean shouldLoadMetadataIfNotExists(LockedInodePath inodePath, LoadMetadataContext context) {
boolean inodeExists = inodePath.fullPathExists();
boolean loadDirectChildren = false;
if (inodeExists) {
try {
Inode inode = inodePath.getInode();
loadDirectChildren = inode.isDirectory()
&& (context.getOptions().getLoadDescendantType() != LoadDescendantPType.NONE);
} catch (FileDoesNotExistException e) {
// This should never happen.
throw new RuntimeException(e);
}
}
return !inodeExists || loadDirectChildren;
}
private void prepareForMount(AlluxioURI ufsPath, long mountId, MountContext context)
throws IOException {
MountPOptions.Builder mountOption = context.getOptions();
try (CloseableResource<UnderFileSystem> ufsResource =
mUfsManager.get(mountId).acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
// Check that the ufsPath exists and is a directory
if (!ufs.isDirectory(ufsPath.toString())) {
throw new IOException(
ExceptionMessage.UFS_PATH_DOES_NOT_EXIST.getMessage(ufsPath.toString()));
}
if (UnderFileSystemUtils.isWeb(ufs)) {
mountOption.setReadOnly(true);
}
}
}
private void updateMountInternal(Supplier<JournalContext> journalContext,
LockedInodePath inodePath, AlluxioURI ufsPath, MountInfo mountInfo, MountContext context)
throws FileAlreadyExistsException, InvalidPathException, IOException {
long newMountId = IdUtils.createMountId();
// lock sync manager to ensure no sync point is added before the mount point is removed
try (LockResource r = new LockResource(mSyncManager.getLock())) {
List<AlluxioURI> syncPoints = mSyncManager.getFilterList(mountInfo.getMountId());
if (syncPoints != null && !syncPoints.isEmpty()) {
throw new InvalidArgumentException("Updating a mount point with ActiveSync enabled is not"
+ " supported. Please remove all sync'ed paths from the mount point and try again.");
}
AlluxioURI alluxioPath = inodePath.getUri();
// validate new UFS client before updating the mount table
mUfsManager.addMount(newMountId, new AlluxioURI(ufsPath.toString()),
UnderFileSystemConfiguration.defaults(ServerConfiguration.global())
.setReadOnly(context.getOptions().getReadOnly())
.setShared(context.getOptions().getShared())
.createMountSpecificConf(context.getOptions().getPropertiesMap()));
prepareForMount(ufsPath, newMountId, context);
// old ufsClient is removed as part of the mount table update process
mMountTable.update(journalContext, alluxioPath, newMountId, context.getOptions().build());
} catch (FileAlreadyExistsException | InvalidPathException | IOException e) {
// revert everything
mUfsManager.removeMount(newMountId);
throw e;
}
}
@Override
public void updateMount(AlluxioURI alluxioPath, MountContext context)
throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException,
IOException, AccessControlException {
LockingScheme lockingScheme = createLockingScheme(alluxioPath,
context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE);
try (RpcContext rpcContext = createRpcContext(context);
LockedInodePath inodePath = mInodeTree
.lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern());
FileSystemMasterAuditContext auditContext = createAuditContext(
"updateMount", alluxioPath, null, inodePath.getParentInodeOrNull())) {
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
MountInfo mountInfo = mMountTable.getMountTable().get(alluxioPath.getPath());
if (mountInfo == null) {
throw new InvalidPathException("Failed to update mount properties for "
+ inodePath.getUri() + ". Please ensure the path is an existing mount point.");
}
updateMountInternal(rpcContext, inodePath, mountInfo.getUfsUri(), mountInfo, context);
auditContext.setSucceeded(true);
}
}
@Override
public void mount(AlluxioURI alluxioPath, AlluxioURI ufsPath, MountContext context)
throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException,
IOException, AccessControlException {
Metrics.MOUNT_OPS.inc();
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("mount", alluxioPath, null, null)) {
ufsPath = new AlluxioURI(PathUtils.normalizePath(ufsPath.toString(), AlluxioURI.SEPARATOR));
syncMetadata(rpcContext,
alluxioPath,
context.getOptions().getCommonOptions(),
DescendantType.ONE,
auditContext,
LockedInodePath::getParentInodeOrNull,
(inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath)
);
LockingScheme lockingScheme =
createLockingScheme(alluxioPath, context.getOptions().getCommonOptions(),
LockPattern.WRITE_EDGE);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) {
auditContext.setSrcInode(inodePath.getParentInodeOrNull());
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
mMountTable.checkUnderWritableMountPoint(alluxioPath);
mountInternal(rpcContext, inodePath, ufsPath, context);
auditContext.setSucceeded(true);
Metrics.PATHS_MOUNTED.inc();
}
}
}
/**
* Mounts a UFS path onto an Alluxio path.
*
* @param rpcContext the rpc context
* @param inodePath the Alluxio path to mount to
* @param ufsPath the UFS path to mount
* @param context the mount context
*/
private void mountInternal(RpcContext rpcContext, LockedInodePath inodePath, AlluxioURI ufsPath,
MountContext context) throws InvalidPathException, FileAlreadyExistsException,
FileDoesNotExistException, IOException, AccessControlException {
// Check that the Alluxio Path does not exist
if (inodePath.fullPathExists()) {
// TODO(calvin): Add a test to validate this (ALLUXIO-1831)
throw new InvalidPathException(
ExceptionMessage.MOUNT_POINT_ALREADY_EXISTS.getMessage(inodePath.getUri()));
}
long mountId = IdUtils.createMountId();
mountInternal(rpcContext, inodePath, ufsPath, mountId, context);
boolean loadMetadataSucceeded = false;
try {
// This will create the directory at alluxioPath
InodeSyncStream.loadDirectoryMetadata(rpcContext,
inodePath,
LoadMetadataContext.mergeFrom(
LoadMetadataPOptions.newBuilder().setCreateAncestors(false)),
mMountTable,
this);
loadMetadataSucceeded = true;
} finally {
if (!loadMetadataSucceeded) {
mMountTable.delete(rpcContext, inodePath.getUri(), true);
}
}
}
/**
* Updates the mount table with the specified mount point. The mount options may be updated during
* this method.
*
* @param journalContext the journal context
* @param inodePath the Alluxio mount point
* @param ufsPath the UFS endpoint to mount
* @param mountId the mount id
* @param context the mount context (may be updated)
*/
private void mountInternal(Supplier<JournalContext> journalContext, LockedInodePath inodePath,
AlluxioURI ufsPath, long mountId, MountContext context)
throws FileAlreadyExistsException, InvalidPathException, IOException {
AlluxioURI alluxioPath = inodePath.getUri();
// Adding the mount point will not create the UFS instance and thus not connect to UFS
mUfsManager.addMount(mountId, new AlluxioURI(ufsPath.toString()),
UnderFileSystemConfiguration.defaults(ServerConfiguration.global())
.setReadOnly(context.getOptions().getReadOnly())
.setShared(context.getOptions().getShared())
.createMountSpecificConf(context.getOptions().getPropertiesMap()));
try {
prepareForMount(ufsPath, mountId, context);
// Check that the alluxioPath we're creating doesn't shadow a path in the parent UFS
MountTable.Resolution resolution = mMountTable.resolve(alluxioPath);
try (CloseableResource<UnderFileSystem> ufsResource =
resolution.acquireUfsResource()) {
String ufsResolvedPath = resolution.getUri().getPath();
if (ufsResource.get().exists(ufsResolvedPath)) {
throw new IOException(
ExceptionMessage.MOUNT_PATH_SHADOWS_PARENT_UFS.getMessage(alluxioPath,
ufsResolvedPath));
}
}
// Add the mount point. This will only succeed if we are not mounting a prefix of an existing
// mount.
mMountTable.add(journalContext, alluxioPath, ufsPath, mountId, context.getOptions().build());
} catch (Exception e) {
mUfsManager.removeMount(mountId);
throw e;
}
}
@Override
public void unmount(AlluxioURI alluxioPath) throws FileDoesNotExistException,
InvalidPathException, IOException, AccessControlException {
Metrics.UNMOUNT_OPS.inc();
// Unmount should lock the parent to remove the child inode.
try (RpcContext rpcContext = createRpcContext();
LockedInodePath inodePath = mInodeTree
.lockInodePath(alluxioPath, LockPattern.WRITE_EDGE);
FileSystemMasterAuditContext auditContext =
createAuditContext("unmount", alluxioPath, null, inodePath.getInodeOrNull())) {
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
unmountInternal(rpcContext, inodePath);
auditContext.setSucceeded(true);
Metrics.PATHS_UNMOUNTED.inc();
}
}
/**
* Unmounts a UFS path previously mounted onto an Alluxio path.
*
* This method does not delete blocks. Instead, it adds the to the passed-in block deletion
* context so that the blocks can be deleted after the inode deletion journal entry has been
* written. We cannot delete blocks earlier because the inode deletion may fail, leaving us with
* inode containing deleted blocks.
*
* @param rpcContext the rpc context
* @param inodePath the Alluxio path to unmount, must be a mount point
*/
private void unmountInternal(RpcContext rpcContext, LockedInodePath inodePath)
throws InvalidPathException, FileDoesNotExistException, IOException {
if (!inodePath.fullPathExists()) {
throw new FileDoesNotExistException(
"Failed to unmount: Path " + inodePath.getUri() + " does not exist");
}
MountInfo mountInfo = mMountTable.getMountTable().get(inodePath.getUri().getPath());
if (mountInfo == null) {
throw new InvalidPathException("Failed to unmount " + inodePath.getUri() + ". Please ensure"
+ " the path is an existing mount point.");
}
mSyncManager.stopSyncForMount(mountInfo.getMountId());
if (!mMountTable.delete(rpcContext, inodePath.getUri(), true)) {
throw new InvalidPathException("Failed to unmount " + inodePath.getUri() + ". Please ensure"
+ " the path is an existing mount point and not root.");
}
try {
// Use the internal delete API, setting {@code alluxioOnly} to true to prevent the delete
// operations from being persisted in the UFS.
deleteInternal(rpcContext, inodePath, DeleteContext
.mergeFrom(DeletePOptions.newBuilder().setRecursive(true).setAlluxioOnly(true)));
} catch (DirectoryNotEmptyException e) {
throw new RuntimeException(String.format(
"We should never see this exception because %s should never be thrown when recursive "
+ "is true.",
e.getClass()));
}
}
@Override
public void setAcl(AlluxioURI path, SetAclAction action, List<AclEntry> entries,
SetAclContext context)
throws FileDoesNotExistException, AccessControlException, InvalidPathException, IOException {
Metrics.SET_ACL_OPS.inc();
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext("setAcl", path, null, null)) {
syncMetadata(rpcContext,
path,
context.getOptions().getCommonOptions(),
context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.NONE,
auditContext,
LockedInodePath::getInodeOrNull,
(inodePath, permChecker) ->
permChecker.checkSetAttributePermission(inodePath, false, true, false)
);
LockingScheme lockingScheme =
createLockingScheme(path, context.getOptions().getCommonOptions(),
LockPattern.WRITE_INODE);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) {
mPermissionChecker.checkSetAttributePermission(inodePath, false, true, false);
if (context.getOptions().getRecursive()) {
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath child : descendants) {
mPermissionChecker.checkSetAttributePermission(child, false, true, false);
}
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
}
if (!inodePath.fullPathExists()) {
throw new FileDoesNotExistException(ExceptionMessage
.PATH_DOES_NOT_EXIST.getMessage(path));
}
setAclInternal(rpcContext, action, inodePath, entries, context);
auditContext.setSucceeded(true);
}
}
}
private void setAclInternal(RpcContext rpcContext, SetAclAction action, LockedInodePath inodePath,
List<AclEntry> entries, SetAclContext context)
throws IOException, FileDoesNotExistException {
Preconditions.checkState(inodePath.getLockPattern().isWrite());
long opTimeMs = System.currentTimeMillis();
// Check inputs for setAcl
switch (action) {
case REPLACE:
Set<AclEntryType> types =
entries.stream().map(AclEntry::getType).collect(Collectors.toSet());
Set<AclEntryType> requiredTypes =
Sets.newHashSet(AclEntryType.OWNING_USER, AclEntryType.OWNING_GROUP,
AclEntryType.OTHER);
requiredTypes.removeAll(types);
// make sure the required entries are present
if (!requiredTypes.isEmpty()) {
throw new IOException(ExceptionMessage.ACL_BASE_REQUIRED.getMessage(
String.join(", ", requiredTypes.stream().map(AclEntryType::toString).collect(
Collectors.toList()))));
}
break;
case MODIFY: // fall through
case REMOVE:
if (entries.isEmpty()) {
// Nothing to do.
return;
}
break;
case REMOVE_ALL:
break;
case REMOVE_DEFAULT:
break;
default:
}
setAclRecursive(rpcContext, action, inodePath, entries, false, opTimeMs, context);
}
private void setUfsAcl(LockedInodePath inodePath)
throws InvalidPathException, AccessControlException {
Inode inode = inodePath.getInodeOrNull();
checkUfsMode(inodePath.getUri(), OperationType.WRITE);
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
String ufsUri = resolution.getUri().toString();
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
if (ufs.isObjectStorage()) {
LOG.warn("SetACL is not supported to object storage UFS via Alluxio. "
+ "UFS: " + ufsUri + ". This has no effect on the underlying object.");
} else {
try {
List<AclEntry> entries = new ArrayList<>(inode.getACL().getEntries());
if (inode.isDirectory()) {
entries.addAll(inode.asDirectory().getDefaultACL().getEntries());
}
ufs.setAclEntries(ufsUri, entries);
} catch (IOException e) {
throw new AccessControlException("Could not setAcl for UFS file: " + ufsUri);
}
}
}
}
private void setAclSingleInode(RpcContext rpcContext, SetAclAction action,
LockedInodePath inodePath, List<AclEntry> entries, boolean replay, long opTimeMs)
throws IOException, FileDoesNotExistException {
Preconditions.checkState(inodePath.getLockPattern().isWrite());
Inode inode = inodePath.getInode();
// Check that we are not removing an extended mask.
if (action == SetAclAction.REMOVE) {
for (AclEntry entry : entries) {
if ((entry.isDefault() && inode.getDefaultACL().hasExtended())
|| (!entry.isDefault() && inode.getACL().hasExtended())) {
if (entry.getType() == AclEntryType.MASK) {
throw new InvalidArgumentException(
"Deleting the mask for an extended ACL is not allowed. entry: " + entry);
}
}
}
}
// Check that we are not setting default ACL to a file
if (inode.isFile()) {
for (AclEntry entry : entries) {
if (entry.isDefault()) {
throw new UnsupportedOperationException("Can not set default ACL for a file");
}
}
}
mInodeTree.setAcl(rpcContext, SetAclEntry.newBuilder()
.setId(inode.getId())
.setOpTimeMs(opTimeMs)
.setAction(ProtoUtils.toProto(action))
.addAllEntries(entries.stream().map(ProtoUtils::toProto).collect(Collectors.toList()))
.build());
try {
if (!replay && inode.isPersisted()) {
setUfsAcl(inodePath);
}
} catch (InvalidPathException | AccessControlException e) {
LOG.warn("Setting ufs ACL failed for path: {}", inodePath.getUri(), e);
// TODO(david): revert the acl and default acl to the initial state if writing to ufs failed.
}
}
private void setAclRecursive(RpcContext rpcContext, SetAclAction action,
LockedInodePath inodePath, List<AclEntry> entries, boolean replay, long opTimeMs,
SetAclContext context) throws IOException, FileDoesNotExistException {
Preconditions.checkState(inodePath.getLockPattern().isWrite());
setAclSingleInode(rpcContext, action, inodePath, entries, replay, opTimeMs);
if (context.getOptions().getRecursive()) {
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath childPath : descendants) {
rpcContext.throwIfCancelled();
setAclSingleInode(rpcContext, action, childPath, entries, replay, opTimeMs);
}
}
}
}
@Override
public void setAttribute(AlluxioURI path, SetAttributeContext context)
throws FileDoesNotExistException, AccessControlException, InvalidPathException, IOException {
SetAttributePOptions.Builder options = context.getOptions();
Metrics.SET_ATTRIBUTE_OPS.inc();
// for chown
boolean rootRequired = options.hasOwner();
// for chgrp, chmod
boolean ownerRequired = (options.hasGroup()) || (options.hasMode());
// for other attributes
boolean writeRequired = !rootRequired && !ownerRequired;
if (options.hasOwner() && options.hasGroup()) {
try {
checkUserBelongsToGroup(options.getOwner(), options.getGroup());
} catch (IOException e) {
throw new IOException(String.format("Could not update owner:group for %s to %s:%s. %s",
path.toString(), options.getOwner(), options.getGroup(), e.toString()), e);
}
}
String commandName;
boolean checkWritableMountPoint = false;
if (options.hasOwner()) {
commandName = "chown";
checkWritableMountPoint = true;
} else if (options.hasGroup()) {
commandName = "chgrp";
checkWritableMountPoint = true;
} else if (options.hasMode()) {
commandName = "chmod";
checkWritableMountPoint = true;
} else {
commandName = "setAttribute";
}
try (RpcContext rpcContext = createRpcContext(context);
FileSystemMasterAuditContext auditContext =
createAuditContext(commandName, path, null, null)) {
// Force recursive sync metadata if it is a pinning and unpinning operation
boolean recursiveSync = options.hasPinned() || options.getRecursive();
syncMetadata(rpcContext,
path,
context.getOptions().getCommonOptions(),
recursiveSync ? DescendantType.ALL : DescendantType.ONE,
auditContext,
LockedInodePath::getInodeOrNull,
(inodePath, permChecker) -> permChecker.checkSetAttributePermission(
inodePath, rootRequired, ownerRequired, writeRequired)
);
LockingScheme lockingScheme = createLockingScheme(path, options.getCommonOptions(),
LockPattern.WRITE_INODE);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) {
auditContext.setSrcInode(inodePath.getInodeOrNull());
if (checkWritableMountPoint) {
mMountTable.checkUnderWritableMountPoint(path);
}
if (!inodePath.fullPathExists()) {
throw new FileDoesNotExistException(ExceptionMessage
.PATH_DOES_NOT_EXIST.getMessage(path));
}
try {
mPermissionChecker
.checkSetAttributePermission(inodePath, rootRequired, ownerRequired, writeRequired);
if (context.getOptions().getRecursive()) {
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath childPath : descendants) {
mPermissionChecker
.checkSetAttributePermission(childPath, rootRequired, ownerRequired,
writeRequired);
}
}
}
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
setAttributeInternal(rpcContext, inodePath, context);
auditContext.setSucceeded(true);
}
}
}
/**
* Checks whether the owner belongs to the group.
*
* @param owner the owner to check
* @param group the group to check
* @throws FailedPreconditionException if owner does not belong to group
*/
private void checkUserBelongsToGroup(String owner, String group)
throws IOException {
List<String> groups = CommonUtils.getGroups(owner, ServerConfiguration.global());
if (groups == null || !groups.contains(group)) {
throw new FailedPreconditionException("Owner " + owner
+ " does not belong to the group " + group);
}
}
/**
* Sets the file attribute.
*
* @param rpcContext the rpc context
* @param inodePath the {@link LockedInodePath} to set attribute for
* @param context attributes to be set, see {@link SetAttributePOptions}
*/
private void setAttributeInternal(RpcContext rpcContext, LockedInodePath inodePath,
SetAttributeContext context)
throws InvalidPathException, FileDoesNotExistException, AccessControlException, IOException {
Inode targetInode = inodePath.getInode();
long opTimeMs = System.currentTimeMillis();
if (context.getOptions().getRecursive() && targetInode.isDirectory()) {
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath childPath : descendants) {
rpcContext.throwIfCancelled();
setAttributeSingleFile(rpcContext, childPath, true, opTimeMs, context);
}
}
}
setAttributeSingleFile(rpcContext, inodePath, true, opTimeMs, context);
}
@Override
public void scheduleAsyncPersistence(AlluxioURI path, ScheduleAsyncPersistenceContext context)
throws AlluxioException, UnavailableException {
try (RpcContext rpcContext = createRpcContext(context);
LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE)) {
scheduleAsyncPersistenceInternal(inodePath, context, rpcContext);
}
}
private void scheduleAsyncPersistenceInternal(LockedInodePath inodePath,
ScheduleAsyncPersistenceContext context, RpcContext rpcContext)
throws InvalidPathException, FileDoesNotExistException {
InodeFile inode = inodePath.getInodeFile();
if (!inode.isCompleted()) {
throw new InvalidPathException(
"Cannot persist an incomplete Alluxio file: " + inodePath.getUri());
}
if (shouldPersistPath(inodePath.toString())) {
mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder().setId(inode.getId())
.setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()).build());
mPersistRequests.put(inode.getId(),
new alluxio.time.ExponentialTimer(
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS),
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS),
context.getPersistenceWaitTime(),
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS)));
}
}
/**
* Actively sync metadata, based on a list of changed files.
*
* @param path the path to sync
* @param changedFiles collection of files that are changed under the path to sync, if this is
* null, force sync the entire directory
* @param executorService executor to execute the parallel incremental sync
*/
public void activeSyncMetadata(AlluxioURI path, Collection<AlluxioURI> changedFiles,
ExecutorService executorService) throws IOException {
if (changedFiles == null) {
LOG.info("Start an active full sync of {}", path.toString());
} else {
LOG.info("Start an active incremental sync of {} files", changedFiles.size());
}
long start = System.currentTimeMillis();
if (changedFiles != null && changedFiles.isEmpty()) {
return;
}
try (RpcContext rpcContext = createRpcContext()) {
if (changedFiles == null) {
// full sync
// Set sync interval to 0 to force a sync.
FileSystemMasterCommonPOptions options =
FileSystemMasterCommonPOptions.newBuilder().setSyncIntervalMs(0).build();
LockingScheme scheme = createSyncLockingScheme(path, options, false);
InodeSyncStream sync = new InodeSyncStream(scheme, this, rpcContext,
DescendantType.ALL, options, false, false, false);
if (!sync.sync()) {
LOG.debug("Active full sync on {} didn't sync any paths.", path);
}
long end = System.currentTimeMillis();
LOG.info("Ended an active full sync of {} in {}ms", path.toString(), end - start);
return;
} else {
// incremental sync
Set<Callable<Void>> callables = new HashSet<>();
for (AlluxioURI changedFile : changedFiles) {
callables.add(() -> {
// Set sync interval to 0 to force a sync.
FileSystemMasterCommonPOptions options =
FileSystemMasterCommonPOptions.newBuilder().setSyncIntervalMs(0).build();
LockingScheme scheme = createSyncLockingScheme(changedFile, options, false);
InodeSyncStream sync = new InodeSyncStream(scheme,
this, rpcContext,
DescendantType.ONE, options, false, false, false);
if (!sync.sync()) {
// Use debug because this can be a noisy log
LOG.debug("Incremental sync on {} didn't sync any paths.", path);
}
return null;
});
}
executorService.invokeAll(callables);
}
} catch (InterruptedException e) {
LOG.warn("InterruptedException during active sync: {}", e.toString());
Thread.currentThread().interrupt();
return;
} catch (InvalidPathException | AccessControlException e) {
LogUtils.warnWithException(LOG, "Failed to active sync on path {}", path, e);
}
if (changedFiles != null) {
long end = System.currentTimeMillis();
LOG.info("Ended an active incremental sync of {} files in {}ms", changedFiles.size(),
end - start);
}
}
@Override
public boolean recordActiveSyncTxid(long txId, long mountId) {
MountInfo mountInfo = mMountTable.getMountInfo(mountId);
if (mountInfo == null) {
return false;
}
AlluxioURI mountPath = mountInfo.getAlluxioUri();
try (RpcContext rpcContext = createRpcContext();
LockedInodePath inodePath = mInodeTree
.lockFullInodePath(mountPath, LockPattern.READ)) {
File.ActiveSyncTxIdEntry txIdEntry =
File.ActiveSyncTxIdEntry.newBuilder().setTxId(txId).setMountId(mountId).build();
rpcContext.journal(JournalEntry.newBuilder().setActiveSyncTxId(txIdEntry).build());
} catch (UnavailableException | InvalidPathException | FileDoesNotExistException e) {
LOG.warn("Exception when recording activesync txid, path {}, exception {}",
mountPath, e);
return false;
}
return true;
}
private boolean syncMetadata(RpcContext rpcContext, AlluxioURI path,
FileSystemMasterCommonPOptions options, DescendantType syncDescendantType,
@Nullable FileSystemMasterAuditContext auditContext,
@Nullable Function<LockedInodePath, Inode> auditContextSrcInodeFunc,
@Nullable PermissionCheckFunction permissionCheckOperation) throws AccessControlException,
InvalidPathException {
return syncMetadata(rpcContext, path, options, syncDescendantType, auditContext,
auditContextSrcInodeFunc, permissionCheckOperation, false);
}
/**
* Sync metadata for an Alluxio path with the UFS.
*
* @param rpcContext the current RPC context
* @param path the path to sync
* @param options options included with the RPC
* @param syncDescendantType how deep the sync should be performed
* @param auditContextSrcInodeFunc the src inode for the audit context, if null, no source inode
* is set on the audit context
* @param permissionCheckOperation a consumer that accepts a locked inode path and a
* {@link PermissionChecker}. The consumer is expected to call one
* of the permission checkers functions with the given inode path.
* If null, no permission checking is performed
* @param isGetFileInfo true if syncing for a getFileInfo operation
* @return
*/
private boolean syncMetadata(RpcContext rpcContext, AlluxioURI path,
FileSystemMasterCommonPOptions options, DescendantType syncDescendantType,
@Nullable FileSystemMasterAuditContext auditContext,
@Nullable Function<LockedInodePath, Inode> auditContextSrcInodeFunc,
@Nullable PermissionCheckFunction permissionCheckOperation,
boolean isGetFileInfo) throws AccessControlException, InvalidPathException {
LockingScheme syncScheme = createSyncLockingScheme(path, options, isGetFileInfo);
if (!syncScheme.shouldSync()) {
return false;
}
InodeSyncStream sync = new InodeSyncStream(syncScheme, this, rpcContext, syncDescendantType,
options, auditContext, auditContextSrcInodeFunc, permissionCheckOperation, isGetFileInfo,
false, false);
return sync.sync();
}
@FunctionalInterface
interface PermissionCheckFunction {
/**
* Performs this operation on the given arguments.
*
* @param l the first input argument
* @param c the second input argument
*/
void accept(LockedInodePath l, PermissionChecker c) throws AccessControlException,
InvalidPathException;
}
ReadOnlyInodeStore getInodeStore() {
return mInodeStore;
}
InodeTree getInodeTree() {
return mInodeTree;
}
InodeLockManager getInodeLockManager() {
return mInodeLockManager;
}
MountTable getMountTable() {
return mMountTable;
}
UfsSyncPathCache getSyncPathCache() {
return mUfsSyncPathCache;
}
PermissionChecker getPermissionChecker() {
return mPermissionChecker;
}
@Override
public FileSystemCommand workerHeartbeat(long workerId, List<Long> persistedFiles,
WorkerHeartbeatContext context) throws IOException {
List<String> persistedUfsFingerprints = context.getOptions().getPersistedFileFingerprintsList();
boolean hasPersistedFingerprints = persistedUfsFingerprints.size() == persistedFiles.size();
for (int i = 0; i < persistedFiles.size(); i++) {
long fileId = persistedFiles.get(i);
String ufsFingerprint = hasPersistedFingerprints ? persistedUfsFingerprints.get(i) :
Constants.INVALID_UFS_FINGERPRINT;
try {
// Permission checking for each file is performed inside setAttribute
setAttribute(getPath(fileId),
SetAttributeContext
.mergeFrom(SetAttributePOptions.newBuilder().setPersisted(true))
.setUfsFingerprint(ufsFingerprint));
} catch (FileDoesNotExistException | AccessControlException | InvalidPathException e) {
LOG.error("Failed to set file {} as persisted, because {}", fileId, e);
}
}
// TODO(zac) Clean up master and worker code since this is taken care of by job service now.
// Worker should not persist any files. Instead, files are persisted through job service.
List<PersistFile> filesToPersist = new ArrayList<>();
FileSystemCommandOptions commandOptions = new FileSystemCommandOptions();
commandOptions.setPersistOptions(new PersistCommandOptions(filesToPersist));
return new FileSystemCommand(CommandType.Persist, commandOptions);
}
/**
* @param inodePath the {@link LockedInodePath} to use
* @param updateUfs whether to update the UFS with the attribute change
* @param opTimeMs the operation time (in milliseconds)
* @param context the method context
*/
protected void setAttributeSingleFile(RpcContext rpcContext, LockedInodePath inodePath,
boolean updateUfs, long opTimeMs, SetAttributeContext context)
throws FileDoesNotExistException, InvalidPathException, AccessControlException {
Inode inode = inodePath.getInode();
SetAttributePOptions.Builder protoOptions = context.getOptions();
if (protoOptions.hasPinned()) {
mInodeTree.setPinned(rpcContext, inodePath, context.getOptions().getPinned(),
context.getOptions().getPinnedMediaList(), opTimeMs);
}
UpdateInodeEntry.Builder entry = UpdateInodeEntry.newBuilder().setId(inode.getId());
if (protoOptions.hasReplicationMax() || protoOptions.hasReplicationMin()) {
Integer replicationMax =
protoOptions.hasReplicationMax() ? protoOptions.getReplicationMax() : null;
Integer replicationMin =
protoOptions.hasReplicationMin() ? protoOptions.getReplicationMin() : null;
mInodeTree.setReplication(rpcContext, inodePath, replicationMax, replicationMin, opTimeMs);
}
// protoOptions may not have both fields set
if (protoOptions.hasCommonOptions()) {
FileSystemMasterCommonPOptions commonOpts = protoOptions.getCommonOptions();
TtlAction action = commonOpts.hasTtlAction() ? commonOpts.getTtlAction() : null;
Long ttl = commonOpts.hasTtl() ? commonOpts.getTtl() : null;
boolean modified = false;
if (ttl != null && inode.getTtl() != ttl) {
entry.setTtl(ttl);
modified = true;
}
if (action != null && inode.getTtlAction() != action) {
entry.setTtlAction(ProtobufUtils.toProtobuf(action));
modified = true;
}
if (modified) {
entry.setLastModificationTimeMs(opTimeMs);
}
}
if (protoOptions.hasPersisted()) {
Preconditions.checkArgument(inode.isFile(), PreconditionMessage.PERSIST_ONLY_FOR_FILE);
Preconditions.checkArgument(inode.asFile().isCompleted(),
PreconditionMessage.FILE_TO_PERSIST_MUST_BE_COMPLETE);
// TODO(manugoyal) figure out valid behavior in the un-persist case
Preconditions
.checkArgument(protoOptions.getPersisted(), PreconditionMessage.ERR_SET_STATE_UNPERSIST);
if (!inode.asFile().isPersisted()) {
entry.setPersistenceState(PersistenceState.PERSISTED.name());
entry.setLastModificationTimeMs(context.getOperationTimeMs());
propagatePersistedInternal(rpcContext, inodePath);
Metrics.FILES_PERSISTED.inc();
}
}
boolean ownerGroupChanged = (protoOptions.hasOwner()) || (protoOptions.hasGroup());
boolean modeChanged = protoOptions.hasMode();
// If the file is persisted in UFS, also update corresponding owner/group/permission.
if ((ownerGroupChanged || modeChanged) && updateUfs && inode.isPersisted()) {
if ((inode instanceof InodeFile) && !inode.asFile().isCompleted()) {
LOG.debug("Alluxio does not propagate chown/chgrp/chmod to UFS for incomplete files.");
} else {
checkUfsMode(inodePath.getUri(), OperationType.WRITE);
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
String ufsUri = resolution.getUri().toString();
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
if (ufs.isObjectStorage()) {
LOG.debug("setOwner/setMode is not supported to object storage UFS via Alluxio. "
+ "UFS: " + ufsUri + ". This has no effect on the underlying object.");
} else {
String owner = null;
String group = null;
String mode = null;
if (ownerGroupChanged) {
try {
owner =
protoOptions.getOwner() != null ? protoOptions.getOwner() : inode.getOwner();
group =
protoOptions.getGroup() != null ? protoOptions.getGroup() : inode.getGroup();
ufs.setOwner(ufsUri, owner, group);
} catch (IOException e) {
throw new AccessControlException("Could not setOwner for UFS file " + ufsUri
+ " . Aborting the setAttribute operation in Alluxio.", e);
}
}
if (modeChanged) {
try {
mode = String.valueOf(protoOptions.getMode());
ufs.setMode(ufsUri, ModeUtils.protoToShort(protoOptions.getMode()));
} catch (IOException e) {
throw new AccessControlException("Could not setMode for UFS file " + ufsUri
+ " . Aborting the setAttribute operation in Alluxio.", e);
}
}
// Retrieve the ufs fingerprint after the ufs changes.
String existingFingerprint = inode.getUfsFingerprint();
if (!existingFingerprint.equals(Constants.INVALID_UFS_FINGERPRINT)) {
// Update existing fingerprint, since contents did not change
Fingerprint fp = Fingerprint.parse(existingFingerprint);
fp.putTag(Fingerprint.Tag.OWNER, owner);
fp.putTag(Fingerprint.Tag.GROUP, group);
fp.putTag(Fingerprint.Tag.MODE, mode);
context.setUfsFingerprint(fp.serialize());
} else {
// Need to retrieve the fingerprint from ufs.
context.setUfsFingerprint(ufs.getFingerprint(ufsUri));
}
}
}
}
}
if (!context.getUfsFingerprint().equals(Constants.INVALID_UFS_FINGERPRINT)) {
entry.setUfsFingerprint(context.getUfsFingerprint());
}
// Only commit the set permission to inode after the propagation to UFS succeeded.
if (protoOptions.hasOwner()) {
entry.setOwner(protoOptions.getOwner());
}
if (protoOptions.hasGroup()) {
entry.setGroup(protoOptions.getGroup());
}
if (modeChanged) {
entry.setMode(ModeUtils.protoToShort(protoOptions.getMode()));
}
mInodeTree.updateInode(rpcContext, entry.build());
}
@Override
public List<SyncPointInfo> getSyncPathList() {
return mSyncManager.getSyncPathList();
}
@Override
public void startSync(AlluxioURI syncPoint)
throws IOException, InvalidPathException, AccessControlException, ConnectionFailedException {
LockingScheme lockingScheme = new LockingScheme(syncPoint, LockPattern.WRITE_EDGE, true);
try (RpcContext rpcContext = createRpcContext();
LockedInodePath inodePath = mInodeTree
.lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern());
FileSystemMasterAuditContext auditContext =
createAuditContext("startSync", syncPoint, null,
inodePath.getParentInodeOrNull())) {
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
mSyncManager.startSyncAndJournal(rpcContext, syncPoint);
auditContext.setSucceeded(true);
}
}
@Override
public void stopSync(AlluxioURI syncPoint)
throws IOException, InvalidPathException, AccessControlException {
try (RpcContext rpcContext = createRpcContext()) {
boolean isSuperUser = true;
try {
mPermissionChecker.checkSuperUser();
} catch (AccessControlException e) {
isSuperUser = false;
}
if (isSuperUser) {
// TODO(AM): Remove once we don't require a write lock on the sync point during a full sync
// Stop sync w/o acquiring an inode lock to terminate an initial full scan (if running)
mSyncManager.stopSyncAndJournal(rpcContext, syncPoint);
}
LockingScheme lockingScheme = new LockingScheme(syncPoint, LockPattern.READ, false);
try (LockedInodePath inodePath =
mInodeTree.lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern());
FileSystemMasterAuditContext auditContext =
createAuditContext("stopSync", syncPoint, null,
inodePath.getParentInodeOrNull())) {
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
if (!isSuperUser) {
// Stop sync here only if not terminated w/o holding the inode lock
mSyncManager.stopSyncAndJournal(rpcContext, syncPoint);
}
auditContext.setSucceeded(true);
}
}
}
@Override
public List<WorkerInfo> getWorkerInfoList() throws UnavailableException {
return mBlockMaster.getWorkerInfoList();
}
/**
* @param fileId file ID
* @param jobId persist job ID
* @param persistenceWaitTime persistence initial wait time
* @param uri Alluxio Uri of the file
* @param tempUfsPath temp UFS path
*/
private void addPersistJob(long fileId, long jobId, long persistenceWaitTime, AlluxioURI uri,
String tempUfsPath) {
alluxio.time.ExponentialTimer timer = mPersistRequests.remove(fileId);
if (timer == null) {
timer = new alluxio.time.ExponentialTimer(
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS),
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS),
persistenceWaitTime,
ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS));
}
mPersistJobs.put(fileId, new PersistJob(jobId, fileId, uri, tempUfsPath, timer));
}
private long getPersistenceWaitTime(long shouldPersistTime) {
long currentTime = System.currentTimeMillis();
if (shouldPersistTime >= currentTime) {
return shouldPersistTime - currentTime;
} else {
return 0;
}
}
/**
* Periodically schedules jobs to persist files and updates metadata accordingly.
*/
@NotThreadSafe
private final class PersistenceScheduler implements alluxio.heartbeat.HeartbeatExecutor {
private static final long MAX_QUIET_PERIOD_SECONDS = 64;
/**
* Quiet period for job service flow control (in seconds). When job service refuses starting new
* jobs, we use exponential backoff to alleviate the job service pressure.
*/
private long mQuietPeriodSeconds;
/**
* Creates a new instance of {@link PersistenceScheduler}.
*/
PersistenceScheduler() {
mQuietPeriodSeconds = 0;
}
@Override
public void close() {} // Nothing to clean up
/**
* Updates the file system metadata to reflect the fact that the persist file request expired.
*
* @param fileId the file ID
*/
private void handleExpired(long fileId) throws AlluxioException, UnavailableException {
try (JournalContext journalContext = createJournalContext();
LockedInodePath inodePath = mInodeTree
.lockFullInodePath(fileId, LockPattern.WRITE_INODE)) {
InodeFile inode = inodePath.getInodeFile();
switch (inode.getPersistenceState()) {
case LOST:
// fall through
case NOT_PERSISTED:
// fall through
case PERSISTED:
LOG.warn("File {} (id={}) persistence state is {} and will not be changed.",
inodePath.getUri(), fileId, inode.getPersistenceState());
return;
case TO_BE_PERSISTED:
mInodeTree.updateInode(journalContext, UpdateInodeEntry.newBuilder()
.setId(inode.getId())
.setPersistenceState(PersistenceState.NOT_PERSISTED.name())
.build());
mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder()
.setId(inode.getId())
.setPersistJobId(Constants.PERSISTENCE_INVALID_JOB_ID)
.setTempUfsPath(Constants.PERSISTENCE_INVALID_UFS_PATH)
.build());
break;
default:
throw new IllegalStateException(
"Unrecognized persistence state: " + inode.getPersistenceState());
}
}
}
/**
* Attempts to schedule a persist job and updates the file system metadata accordingly.
*
* @param fileId the file ID
*/
private void handleReady(long fileId) throws AlluxioException, IOException {
alluxio.time.ExponentialTimer timer = mPersistRequests.get(fileId);
// Lookup relevant file information.
AlluxioURI uri;
String tempUfsPath;
try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) {
InodeFile inode = inodePath.getInodeFile();
uri = inodePath.getUri();
switch (inode.getPersistenceState()) {
case LOST:
// fall through
case NOT_PERSISTED:
// fall through
case PERSISTED:
LOG.warn("File {} (id={}) persistence state is {} and will not be changed.",
inodePath.getUri(), fileId, inode.getPersistenceState());
return;
case TO_BE_PERSISTED:
tempUfsPath = inodePath.getInodeFile().getTempUfsPath();
break;
default:
throw new IllegalStateException(
"Unrecognized persistence state: " + inode.getPersistenceState());
}
}
MountTable.Resolution resolution = mMountTable.resolve(uri);
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
// If previous persist job failed, clean up the temporary file.
cleanup(ufsResource.get(), tempUfsPath);
// Generate a temporary path to be used by the persist job.
// If the persist destination is on object store, let persist job copy files to destination
// directly
if (ServerConfiguration.getBoolean(PropertyKey.MASTER_UNSAFE_DIRECT_PERSIST_OBJECT_ENABLED)
&& ufsResource.get().isObjectStorage()) {
tempUfsPath = resolution.getUri().toString();
} else {
tempUfsPath = PathUtils.temporaryFileName(
System.currentTimeMillis(), resolution.getUri().toString());
}
}
PersistConfig config =
new PersistConfig(uri.getPath(), resolution.getMountId(), false, tempUfsPath);
// Schedule the persist job.
long jobId;
JobMasterClient client = mJobMasterClientPool.acquire();
try {
jobId = client.run(config);
} finally {
mJobMasterClientPool.release(client);
}
mQuietPeriodSeconds /= 2;
mPersistJobs.put(fileId, new PersistJob(jobId, fileId, uri, tempUfsPath, timer));
// Update the inode and journal the change.
try (JournalContext journalContext = createJournalContext();
LockedInodePath inodePath = mInodeTree
.lockFullInodePath(fileId, LockPattern.WRITE_INODE)) {
InodeFile inode = inodePath.getInodeFile();
mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder()
.setId(inode.getId())
.setPersistJobId(jobId)
.setTempUfsPath(tempUfsPath)
.build());
}
}
/**
* {@inheritDoc}
*
* The method iterates through the set of files to be persisted (identified by their ID) and
* attempts to schedule a file persist job. Each iteration removes the file ID from the set
* of files to be persisted unless the execution sets the {@code remove} flag to false.
*
* @throws InterruptedException if the thread is interrupted
*/
@Override
public void heartbeat() throws InterruptedException {
java.util.concurrent.TimeUnit.SECONDS.sleep(mQuietPeriodSeconds);
// Process persist requests.
for (long fileId : mPersistRequests.keySet()) {
// Throw if interrupted.
if (Thread.interrupted()) {
throw new InterruptedException("PersistenceScheduler interrupted.");
}
boolean remove = true;
alluxio.time.ExponentialTimer timer = mPersistRequests.get(fileId);
if (timer == null) {
// This could occur if a key is removed from mPersistRequests while we are iterating.
continue;
}
alluxio.time.ExponentialTimer.Result timerResult = timer.tick();
if (timerResult == alluxio.time.ExponentialTimer.Result.NOT_READY) {
// operation is not ready to be scheduled
continue;
}
AlluxioURI uri = null;
try {
try (LockedInodePath inodePath = mInodeTree
.lockFullInodePath(fileId, LockPattern.READ)) {
uri = inodePath.getUri();
}
try {
checkUfsMode(uri, OperationType.WRITE);
} catch (Exception e) {
LOG.warn("Unable to schedule persist request for path {}: {}", uri, e.getMessage());
// Retry when ufs mode permits operation
remove = false;
continue;
}
switch (timerResult) {
case EXPIRED:
handleExpired(fileId);
break;
case READY:
handleReady(fileId);
break;
default:
throw new IllegalStateException("Unrecognized timer state: " + timerResult);
}
} catch (FileDoesNotExistException | InvalidPathException e) {
LOG.warn("The file {} (id={}) to be persisted was not found : {}", uri, fileId,
e.getMessage());
LOG.debug("Exception: ", e);
} catch (UnavailableException e) {
LOG.warn("Failed to persist file {}, will retry later: {}", uri, e.toString());
remove = false;
} catch (ResourceExhaustedException e) {
LOG.warn("The job service is busy, will retry later: {}", e.toString());
LOG.debug("Exception: ", e);
mQuietPeriodSeconds = (mQuietPeriodSeconds == 0) ? 1 :
Math.min(MAX_QUIET_PERIOD_SECONDS, mQuietPeriodSeconds * 2);
remove = false;
// End the method here until the next heartbeat. No more jobs should be scheduled during
// the current heartbeat if the job master is at full capacity.
return;
} catch (Exception e) {
LOG.warn("Unexpected exception encountered when scheduling the persist job for file {} "
+ "(id={}) : {}", uri, fileId, e.getMessage());
LOG.debug("Exception: ", e);
} finally {
if (remove) {
mPersistRequests.remove(fileId);
}
}
}
}
}
/**
* Periodically polls for the result of the jobs and updates metadata accordingly.
*/
@NotThreadSafe
private final class PersistenceChecker implements alluxio.heartbeat.HeartbeatExecutor {
/**
* Creates a new instance of {@link PersistenceChecker}.
*/
PersistenceChecker() {}
@Override
public void close() {} // nothing to clean up
/**
* Updates the file system metadata to reflect the fact that the persist job succeeded.
*
* NOTE: It is the responsibility of the caller to update {@link #mPersistJobs}.
*
* @param job the successful job
*/
private void handleSuccess(PersistJob job) {
long fileId = job.getFileId();
String tempUfsPath = job.getTempUfsPath();
List<Long> blockIds = new ArrayList<>();
UfsManager.UfsClient ufsClient = null;
try (JournalContext journalContext = createJournalContext();
LockedInodePath inodePath = mInodeTree
.lockFullInodePath(fileId, LockPattern.WRITE_INODE)) {
InodeFile inode = inodePath.getInodeFile();
MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri());
ufsClient = mUfsManager.get(resolution.getMountId());
switch (inode.getPersistenceState()) {
case LOST:
// fall through
case NOT_PERSISTED:
// fall through
case PERSISTED:
LOG.warn("File {} (id={}) persistence state is {}. Successful persist has no effect.",
job.getUri(), fileId, inode.getPersistenceState());
break;
case TO_BE_PERSISTED:
UpdateInodeEntry.Builder builder = UpdateInodeEntry.newBuilder();
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
String ufsPath = resolution.getUri().toString();
ufs.setOwner(tempUfsPath, inode.getOwner(), inode.getGroup());
ufs.setMode(tempUfsPath, inode.getMode());
if (!ufsPath.equals(tempUfsPath)) {
// Make rename only when tempUfsPath is different from final ufsPath. Note that,
// on object store, we take the optimization to skip the rename by having
// tempUfsPath the same as final ufsPath.
if (!ufs.renameRenamableFile(tempUfsPath, ufsPath)) {
throw new IOException(
String.format("Failed to rename %s to %s.", tempUfsPath, ufsPath));
}
}
builder.setUfsFingerprint(ufs.getFingerprint(ufsPath));
}
mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder()
.setId(inode.getId())
.setPersistJobId(Constants.PERSISTENCE_INVALID_JOB_ID)
.setTempUfsPath(Constants.PERSISTENCE_INVALID_UFS_PATH)
.build());
mInodeTree.updateInode(journalContext, builder
.setId(inode.getId())
.setPersistenceState(PersistenceState.PERSISTED.name())
.build());
propagatePersistedInternal(journalContext, inodePath);
Metrics.FILES_PERSISTED.inc();
// Save state for possible cleanup
blockIds.addAll(inode.getBlockIds());
break;
default:
throw new IllegalStateException(
"Unrecognized persistence state: " + inode.getPersistenceState());
}
} catch (FileDoesNotExistException | InvalidPathException e) {
LOG.warn("The file {} (id={}) to be persisted was not found: {}", job.getUri(), fileId,
e.getMessage());
LOG.debug("Exception: ", e);
// Cleanup the temporary file.
if (ufsClient != null) {
try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) {
cleanup(ufsResource.get(), tempUfsPath);
}
}
} catch (Exception e) {
LOG.warn(
"Unexpected exception encountered when trying to complete persistence of a file {} "
+ "(id={}) : {}",
job.getUri(), fileId, e.getMessage());
LOG.debug("Exception: ", e);
if (ufsClient != null) {
try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) {
cleanup(ufsResource.get(), tempUfsPath);
}
}
mPersistRequests.put(fileId, job.getTimer());
}
// Cleanup possible staging UFS blocks files due to fast durable write fallback.
// Note that this is best effort
if (ufsClient != null) {
for (long blockId : blockIds) {
String ufsBlockPath = alluxio.worker.BlockUtils.getUfsBlockPath(ufsClient, blockId);
try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) {
alluxio.util.UnderFileSystemUtils.deleteFileIfExists(ufsResource.get(), ufsBlockPath);
} catch (Exception e) {
LOG.warn("Failed to clean up staging UFS block file {}: {}",
ufsBlockPath, e.toString());
}
}
}
}
@Override
public void heartbeat() throws InterruptedException {
boolean queueEmpty = mPersistCheckerPool.getQueue().isEmpty();
// Check the progress of persist jobs.
for (long fileId : mPersistJobs.keySet()) {
// Throw if interrupted.
if (Thread.interrupted()) {
throw new InterruptedException("PersistenceChecker interrupted.");
}
final PersistJob job = mPersistJobs.get(fileId);
if (job == null) {
// This could happen if a key is removed from mPersistJobs while we are iterating.
continue;
}
// Cancel any jobs marked as canceled
switch (job.getCancelState()) {
case NOT_CANCELED:
break;
case TO_BE_CANCELED:
// Send the message to cancel this job
JobMasterClient client = mJobMasterClientPool.acquire();
try {
client.cancel(job.getId());
job.setCancelState(PersistJob.CancelState.CANCELING);
} catch (alluxio.exception.status.NotFoundException e) {
LOG.warn("Persist job (id={}) for file {} (id={}) to cancel was not found: {}",
job.getId(), job.getUri(), fileId, e.getMessage());
LOG.debug("Exception: ", e);
mPersistJobs.remove(fileId);
continue;
} catch (Exception e) {
LOG.warn("Unexpected exception encountered when cancelling a persist job (id={}) for "
+ "file {} (id={}) : {}", job.getId(), job.getUri(), fileId, e.getMessage());
LOG.debug("Exception: ", e);
} finally {
mJobMasterClientPool.release(client);
}
continue;
case CANCELING:
break;
default:
throw new IllegalStateException("Unrecognized cancel state: " + job.getCancelState());
}
if (!queueEmpty) {
// There are tasks waiting in the queue, so do not try to schedule anything
continue;
}
long jobId = job.getId();
JobMasterClient client = mJobMasterClientPool.acquire();
try {
JobInfo jobInfo = client.getJobStatus(jobId);
switch (jobInfo.getStatus()) {
case RUNNING:
// fall through
case CREATED:
break;
case FAILED:
LOG.warn("The persist job (id={}) for file {} (id={}) failed: {}", jobId,
job.getUri(), fileId, jobInfo.getErrorMessage());
mPersistJobs.remove(fileId);
mPersistRequests.put(fileId, job.getTimer());
break;
case CANCELED:
mPersistJobs.remove(fileId);
break;
case COMPLETED:
mPersistJobs.remove(fileId);
mPersistCheckerPool.execute(() -> handleSuccess(job));
break;
default:
throw new IllegalStateException("Unrecognized job status: " + jobInfo.getStatus());
}
} catch (Exception e) {
LOG.warn("Exception encountered when trying to retrieve the status of a "
+ " persist job (id={}) for file {} (id={}): {}.", jobId, job.getUri(), fileId,
e.getMessage());
LOG.debug("Exception: ", e);
mPersistJobs.remove(fileId);
mPersistRequests.put(fileId, job.getTimer());
} finally {
mJobMasterClientPool.release(client);
}
}
}
}
@NotThreadSafe
private final class TimeSeriesRecorder implements alluxio.heartbeat.HeartbeatExecutor {
@Override
public void heartbeat() throws InterruptedException {
// TODO(calvin): Provide a better way to keep track of metrics collected as time series
MetricRegistry registry = MetricsSystem.METRIC_REGISTRY;
SortedMap<String, Gauge> gauges = registry.getGauges();
// % Alluxio space used
Long masterCapacityTotal = (Long) gauges
.get(MetricKey.CLUSTER_CAPACITY_TOTAL.getName()).getValue();
Long masterCapacityUsed = (Long) gauges
.get(MetricKey.CLUSTER_CAPACITY_USED.getName()).getValue();
int percentAlluxioSpaceUsed =
(masterCapacityTotal > 0) ? (int) (100L * masterCapacityUsed / masterCapacityTotal) : 0;
mTimeSeriesStore.record("% Alluxio Space Used", percentAlluxioSpaceUsed);
// % UFS space used
Long masterUnderfsCapacityTotal = (Long) gauges
.get(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_TOTAL.getName()).getValue();
Long masterUnderfsCapacityUsed =
(Long) gauges
.get(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_USED.getName()).getValue();
int percentUfsSpaceUsed =
(masterUnderfsCapacityTotal > 0) ? (int) (100L * masterUnderfsCapacityUsed
/ masterUnderfsCapacityTotal) : 0;
mTimeSeriesStore.record("% UFS Space Used", percentUfsSpaceUsed);
// Bytes read
Long bytesReadLocalThroughput = (Long) gauges.get(
MetricKey.CLUSTER_BYTES_READ_LOCAL_THROUGHPUT.getName()).getValue();
Long bytesReadDomainSocketThroughput = (Long) gauges
.get(MetricKey.CLUSTER_BYTES_READ_DOMAIN_THROUGHPUT.getName()).getValue();
Long bytesReadRemoteThroughput = (Long) gauges
.get(MetricKey.CLUSTER_BYTES_READ_ALLUXIO_THROUGHPUT.getName()).getValue();
Long bytesReadUfsThroughput = (Long) gauges
.get(MetricKey.CLUSTER_BYTES_READ_UFS_THROUGHPUT.getName()).getValue();
mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_LOCAL_THROUGHPUT.getName(),
bytesReadLocalThroughput);
mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_DOMAIN_THROUGHPUT.getName(),
bytesReadDomainSocketThroughput);
mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_ALLUXIO_THROUGHPUT.getName(),
bytesReadRemoteThroughput);
mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_UFS_THROUGHPUT.getName(),
bytesReadUfsThroughput);
// Bytes written
Long bytesWrittenLocalThroughput = (Long) gauges
.get(MetricKey.CLUSTER_BYTES_WRITTEN_LOCAL_THROUGHPUT.getName())
.getValue();
Long bytesWrittenAlluxioThroughput = (Long) gauges
.get(MetricKey.CLUSTER_BYTES_WRITTEN_ALLUXIO_THROUGHPUT.getName()).getValue();
Long bytesWrittenDomainSocketThroughput = (Long) gauges.get(
MetricKey.CLUSTER_BYTES_WRITTEN_DOMAIN_THROUGHPUT.getName()).getValue();
Long bytesWrittenUfsThroughput = (Long) gauges
.get(MetricKey.CLUSTER_BYTES_WRITTEN_UFS_THROUGHPUT.getName()).getValue();
mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_LOCAL_THROUGHPUT.getName(),
bytesWrittenLocalThroughput);
mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_ALLUXIO_THROUGHPUT.getName(),
bytesWrittenAlluxioThroughput);
mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_DOMAIN_THROUGHPUT.getName(),
bytesWrittenDomainSocketThroughput);
mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_UFS_THROUGHPUT.getName(),
bytesWrittenUfsThroughput);
}
@Override
public void close() {} // Nothing to clean up.
}
private static void cleanup(UnderFileSystem ufs, String ufsPath) {
final String errMessage = "Failed to delete UFS file {}.";
if (!ufsPath.isEmpty()) {
try {
if (!ufs.deleteExistingFile(ufsPath)) {
LOG.warn(errMessage, ufsPath);
}
} catch (IOException e) {
LOG.warn(errMessage, ufsPath, e);
}
}
}
@Override
public void updateUfsMode(AlluxioURI ufsPath, UfsMode ufsMode) throws InvalidPathException,
InvalidArgumentException, UnavailableException, AccessControlException {
// TODO(adit): Create new fsadmin audit context
try (RpcContext rpcContext = createRpcContext();
FileSystemMasterAuditContext auditContext =
createAuditContext("updateUfsMode", ufsPath, null, null)) {
mUfsManager.setUfsMode(rpcContext, ufsPath, ufsMode);
auditContext.setSucceeded(true);
}
}
/**
* Check if the specified operation type is allowed to the ufs.
*
* @param alluxioPath the Alluxio path
* @param opType the operation type
*/
private void checkUfsMode(AlluxioURI alluxioPath, OperationType opType)
throws AccessControlException, InvalidPathException {
MountTable.Resolution resolution = mMountTable.resolve(alluxioPath);
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
UfsMode ufsMode =
ufs.getOperationMode(mUfsManager.getPhysicalUfsState(ufs.getPhysicalStores()));
switch (ufsMode) {
case NO_ACCESS:
throw new AccessControlException(ExceptionMessage.UFS_OP_NOT_ALLOWED.getMessage(opType,
resolution.getUri(), UfsMode.NO_ACCESS));
case READ_ONLY:
if (opType == OperationType.WRITE) {
throw new AccessControlException(ExceptionMessage.UFS_OP_NOT_ALLOWED.getMessage(opType,
resolution.getUri(), UfsMode.READ_ONLY));
}
break;
default:
// All operations are allowed
break;
}
}
}
/**
* The operation type. This class is used to check if an operation to the under storage is allowed
* during maintenance.
*/
enum OperationType {
READ,
WRITE,
}
/**
* Class that contains metrics for FileSystemMaster.
* This class is public because the counter names are referenced in
* {@link alluxio.web.WebInterfaceAbstractMetricsServlet}.
*/
public static final class Metrics {
private static final Counter DIRECTORIES_CREATED
= MetricsSystem.counter(MetricKey.MASTER_DIRECTORIES_CREATED.getName());
private static final Counter FILE_BLOCK_INFOS_GOT
= MetricsSystem.counter(MetricKey.MASTER_FILE_BLOCK_INFOS_GOT.getName());
private static final Counter FILE_INFOS_GOT
= MetricsSystem.counter(MetricKey.MASTER_FILE_INFOS_GOT.getName());
private static final Counter FILES_COMPLETED
= MetricsSystem.counter(MetricKey.MASTER_FILES_COMPLETED.getName());
private static final Counter FILES_CREATED
= MetricsSystem.counter(MetricKey.MASTER_FILES_CREATED.getName());
private static final Counter FILES_FREED
= MetricsSystem.counter(MetricKey.MASTER_FILES_FREED.getName());
private static final Counter FILES_PERSISTED
= MetricsSystem.counter(MetricKey.MASTER_FILES_PERSISTED.getName());
private static final Counter NEW_BLOCKS_GOT
= MetricsSystem.counter(MetricKey.MASTER_NEW_BLOCKS_GOT.getName());
private static final Counter PATHS_DELETED
= MetricsSystem.counter(MetricKey.MASTER_PATHS_DELETED.getName());
private static final Counter PATHS_MOUNTED
= MetricsSystem.counter(MetricKey.MASTER_PATHS_MOUNTED.getName());
private static final Counter PATHS_RENAMED
= MetricsSystem.counter(MetricKey.MASTER_PATHS_RENAMED.getName());
private static final Counter PATHS_UNMOUNTED
= MetricsSystem.counter(MetricKey.MASTER_PATHS_UNMOUNTED.getName());
// TODO(peis): Increment the RPCs OPs at the place where we receive the RPCs.
private static final Counter COMPLETE_FILE_OPS
= MetricsSystem.counter(MetricKey.MASTER_COMPLETE_FILE_OPS.getName());
private static final Counter CREATE_DIRECTORIES_OPS
= MetricsSystem.counter(MetricKey.MASTER_CREATE_DIRECTORIES_OPS.getName());
private static final Counter CREATE_FILES_OPS
= MetricsSystem.counter(MetricKey.MASTER_CREATE_FILES_OPS.getName());
private static final Counter DELETE_PATHS_OPS
= MetricsSystem.counter(MetricKey.MASTER_DELETE_PATHS_OPS.getName());
private static final Counter FREE_FILE_OPS
= MetricsSystem.counter(MetricKey.MASTER_FREE_FILE_OPS.getName());
private static final Counter GET_FILE_BLOCK_INFO_OPS
= MetricsSystem.counter(MetricKey.MASTER_GET_FILE_BLOCK_INFO_OPS.getName());
private static final Counter GET_FILE_INFO_OPS
= MetricsSystem.counter(MetricKey.MASTER_GET_FILE_INFO_OPS.getName());
private static final Counter GET_NEW_BLOCK_OPS
= MetricsSystem.counter(MetricKey.MASTER_GET_NEW_BLOCK_OPS.getName());
private static final Counter MOUNT_OPS
= MetricsSystem.counter(MetricKey.MASTER_MOUNT_OPS.getName());
private static final Counter RENAME_PATH_OPS
= MetricsSystem.counter(MetricKey.MASTER_RENAME_PATH_OPS.getName());
private static final Counter SET_ACL_OPS
= MetricsSystem.counter(MetricKey.MASTER_SET_ACL_OPS.getName());
private static final Counter SET_ATTRIBUTE_OPS
= MetricsSystem.counter(MetricKey.MASTER_SET_ATTRIBUTE_OPS.getName());
private static final Counter UNMOUNT_OPS
= MetricsSystem.counter(MetricKey.MASTER_UNMOUNT_OPS.getName());
private static final Map<String, Map<UFSOps, Counter>> SAVED_UFS_OPS
= new ConcurrentHashMap<>();
/**
* UFS operations enum.
*/
public enum UFSOps {
CREATE_FILE, GET_FILE_INFO, DELETE_FILE, LIST_STATUS
}
/**
* Get operations saved per ufs counter.
*
* @param ufsPath ufsPath
* @param ufsOp ufs operation
* @return the counter object
*/
@VisibleForTesting
public static Counter getUfsCounter(String ufsPath, UFSOps ufsOp) {
return SAVED_UFS_OPS.compute(ufsPath, (k, v) -> {
if (v != null) {
return v;
} else {
return new ConcurrentHashMap<>();
}
}).compute(ufsOp, (k, v) -> {
if (v != null) {
return v;
} else {
return MetricsSystem.counter(
Metric.getMetricNameWithTags(UFS_OP_SAVED_PREFIX + ufsOp.name(),
MetricInfo.TAG_UFS, MetricsSystem.escape(new AlluxioURI(ufsPath))));
}
});
}
/**
* Register some file system master related gauges.
*
* @param master the file system master
* @param ufsManager the under filesystem manager
*/
@VisibleForTesting
public static void registerGauges(
final FileSystemMaster master, final UfsManager ufsManager) {
MetricsSystem.registerGaugeIfAbsent(MetricKey.MASTER_FILES_PINNED.getName(),
master::getNumberOfPinnedFiles);
MetricsSystem.registerGaugeIfAbsent(MetricKey.MASTER_TOTAL_PATHS.getName(),
() -> master.getInodeCount());
final String ufsDataFolder = ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS);
MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_TOTAL.getName(),
() -> {
try (CloseableResource<UnderFileSystem> ufsResource =
ufsManager.getRoot().acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
return ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_TOTAL);
} catch (IOException e) {
LOG.error(e.getMessage(), e);
return Stream.empty();
}
});
MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_USED.getName(),
() -> {
try (CloseableResource<UnderFileSystem> ufsResource =
ufsManager.getRoot().acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
return ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_USED);
} catch (IOException e) {
LOG.error(e.getMessage(), e);
return Stream.empty();
}
});
MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_FREE.getName(),
() -> {
long ret = 0L;
try (CloseableResource<UnderFileSystem> ufsResource =
ufsManager.getRoot().acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
ret = ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_FREE);
} catch (IOException e) {
LOG.error(e.getMessage(), e);
}
return ret;
});
}
private Metrics() {} // prevent instantiation
}
/**
* Creates a {@link FileSystemMasterAuditContext} instance.
*
* @param command the command to be logged by this {@link AuditContext}
* @param srcPath the source path of this command
* @param dstPath the destination path of this command
* @param srcInode the source inode of this command
* @return newly-created {@link FileSystemMasterAuditContext} instance
*/
private FileSystemMasterAuditContext createAuditContext(String command, AlluxioURI srcPath,
@Nullable AlluxioURI dstPath, @Nullable Inode srcInode) {
FileSystemMasterAuditContext auditContext =
new FileSystemMasterAuditContext(mAsyncAuditLogWriter);
if (mAsyncAuditLogWriter != null) {
String user = null;
String ugi = "";
try {
user = AuthenticatedClientUser.getClientUser(ServerConfiguration.global());
} catch (AccessControlException e) {
ugi = "N/A";
}
if (user != null) {
try {
String primaryGroup = CommonUtils.getPrimaryGroupName(user, ServerConfiguration.global());
ugi = user + "," + primaryGroup;
} catch (IOException e) {
LOG.debug("Failed to get primary group for user {}.", user);
ugi = user + ",N/A";
}
}
AuthType authType =
ServerConfiguration.getEnum(PropertyKey.SECURITY_AUTHENTICATION_TYPE, AuthType.class);
auditContext.setUgi(ugi)
.setAuthType(authType)
.setIp(ClientIpAddressInjector.getIpAddress())
.setCommand(command).setSrcPath(srcPath).setDstPath(dstPath)
.setSrcInode(srcInode).setAllowed(true);
}
return auditContext;
}
private BlockDeletionContext createBlockDeletionContext() {
return new DefaultBlockDeletionContext(this::removeBlocks,
blocks -> blocks.forEach(mUfsBlockLocationCache::invalidate));
}
private void removeBlocks(List<Long> blocks) throws IOException {
if (blocks.isEmpty()) {
return;
}
RetryPolicy retry = new CountingRetry(3);
IOException lastThrown = null;
while (retry.attempt()) {
try {
mBlockMaster.removeBlocks(blocks, true);
return;
} catch (UnavailableException e) {
lastThrown = e;
}
}
throw new IOException("Failed to remove deleted blocks from block master", lastThrown);
}
/**
* @return a context for executing an RPC
*/
@VisibleForTesting
public RpcContext createRpcContext() throws UnavailableException {
return createRpcContext(new InternalOperationContext());
}
/**
* @param operationContext the operation context
* @return a context for executing an RPC
*/
@VisibleForTesting
public RpcContext createRpcContext(OperationContext operationContext)
throws UnavailableException {
return new RpcContext(createBlockDeletionContext(), createJournalContext(),
operationContext.withTracker(mStateLockCallTracker));
}
private LockingScheme createLockingScheme(AlluxioURI path, FileSystemMasterCommonPOptions options,
LockPattern desiredLockMode) {
return new LockingScheme(path, desiredLockMode, options, mUfsSyncPathCache, false);
}
private LockingScheme createSyncLockingScheme(AlluxioURI path,
FileSystemMasterCommonPOptions options, boolean isGetFileInfo) {
return new LockingScheme(path, LockPattern.READ, options, mUfsSyncPathCache, isGetFileInfo);
}
boolean isAclEnabled() {
return ServerConfiguration.getBoolean(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_ENABLED);
}
@Override
public List<TimeSeries> getTimeSeries() {
return mTimeSeriesStore.getTimeSeries();
}
@Override
public AlluxioURI reverseResolve(AlluxioURI ufsUri) throws InvalidPathException {
MountTable.ReverseResolution resolution = mMountTable.reverseResolve(ufsUri);
if (resolution == null) {
throw new InvalidPathException(ufsUri.toString() + " is not a valid ufs uri");
}
return resolution.getUri();
}
@Override
@Nullable
public String getRootInodeOwner() {
return mInodeTree.getRootUserName();
}
}
| EvilMcJerkface/alluxio | core/server/master/src/main/java/alluxio/master/file/DefaultFileSystemMaster.java | Java | apache-2.0 | 193,983 |
$.mockjax({
url: "*",
response: function(options) {
this.responseText = ExampleData.exampleData;
},
responseTime: 0
});
$(function() {
$("#tree1").tree();
});
| mbraak/jqTree | static/examples/load_json_data_from_server.js | JavaScript | apache-2.0 | 188 |
using System;
using System.Configuration;
using MyWpfFramework.Common.MVVM;
namespace MyWpfFramework.Common.Config
{
/// <summary>
/// خواندن تنظیمات از فایل کانفیگ
/// </summary>
public class ConfigSetGet : IConfigSetGet
{
/// <summary>
/// read settings from app.config file
/// </summary>
/// <param name="key">کلید</param>
/// <returns>مقدار کلید</returns>
public string GetConfigData(string key)
{
//don't load on design time
if (Designer.IsInDesignModeStatic)
return "0";
var configuration = ConfigurationManager.OpenExeConfiguration(ConfigurationUserLevel.None);
var appSettings = configuration.AppSettings;
string res = appSettings.Settings[key].Value;
if (res == null) throw new Exception("Undefined: " + key);
return res;
}
/// <summary>
/// ذخیره سازی تنظیمات در فایل کانفیگ برنامه
/// </summary>
/// <param name="key">کلید</param>
/// <param name="data">مقدار</param>
public void SetConfigData(string key, string data)
{
var config = ConfigurationManager.OpenExeConfiguration(ConfigurationUserLevel.None);
config.AppSettings.Settings[key].Value = data;
config.Save(ConfigurationSaveMode.Modified);
ConfigurationManager.RefreshSection("appSettings");
}
}
} | VahidN/WpfFramework | MyWpfFramework.Common/Config/ConfigSetGet.cs | C# | apache-2.0 | 1,554 |
namespace GraphSharp.Algorithms.Layout.Simple.FDP
{
public enum FRCoolingFunction
{
Linear,
Exponential
}
}
| FTSRG/seviz | Source/Graph#/Algorithms/Layout/Simple/FDP/FRCoolingFunction.cs | C# | apache-2.0 | 139 |
package main
import "fmt"
//var (
// samples = []int{}
// b = 1
//)
func main() {
var samples = []int{}
samples = append(samples, 1)
fmt.Println(samples)
}
// Output:
// [1]
| containous/yaegi | _test/a9.go | GO | apache-2.0 | 187 |
/**
* @author Hincu Andrei (andreih1981@gmail.com)on 01.12.2017.
* @version $Id$.
* @since 0.1.
*/
package ru.job4j.jdbc; | andreiHi/hincuA | chapter_008/src/main/java/ru/job4j/jdbc/package-info.java | Java | apache-2.0 | 125 |
//
// Copyright 2017 Bryan T. Meyers <bmeyers@datadrake.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package types
import (
"fmt"
"github.com/DataDrake/csv-analyze/tests"
"io"
)
const groupFormat = "\t\033[96m\033[4m%s\033[0m\n"
// Group is a set of Type tests to run for a set of related values
type Group struct {
tests map[string][]tests.Test
names []string
}
// NewGroup creates a new test group for the type tests
func NewGroup() tests.Group {
return &Group{
map[string][]tests.Test{
"Numerical": []tests.Test{
NewUnsignedTest(),
NewSignedTest(),
NewFloatTest(),
},
"Logical": []tests.Test{
NewBooleanTest(),
},
"DateTime": []tests.Test{
NewTimeTest(),
},
"String": []tests.Test{
NewStringTest(),
},
},
[]string{"Numerical", "Logical", "DateTime", "String"},
}
}
// Run hands the same string to all of the tests
func (g *Group) Run(cell string) {
for _, ts := range g.tests {
for _, t := range ts {
t.Run(cell)
}
}
}
// PrintResult writes out the results of the type tests
func (g *Group) PrintResult(dst io.Writer) {
for _, name := range g.names {
fmt.Fprintf(dst, groupFormat, name)
for _, t := range g.tests[name] {
t.PrintResult(dst)
}
}
}
| DataDrake/csv-analyze | tests/types/group.go | GO | apache-2.0 | 1,759 |
#
# Copyright 2013-2016, Noah Kantrowitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'poise_boiler/spec_helper'
# If we aren't on Windows, inject our fake win32/process module.
unless RbConfig::CONFIG['host_os'] =~ /mswin|mingw|cygwin/
$LOAD_PATH.insert(0, File.expand_path('../utils/win32_helper', __FILE__))
end
require 'poise'
| poise/poise | test/spec/spec_helper.rb | Ruby | apache-2.0 | 843 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec;
import org.apache.drill.exec.physical.impl.common.HashTable;
import org.apache.drill.exec.rpc.user.InboundImpersonationManager;
import org.apache.drill.exec.server.options.OptionValidator;
import org.apache.drill.exec.server.options.TypeValidators.BooleanValidator;
import org.apache.drill.exec.server.options.TypeValidators.DoubleValidator;
import org.apache.drill.exec.server.options.TypeValidators.EnumeratedStringValidator;
import org.apache.drill.exec.server.options.TypeValidators.LongValidator;
import org.apache.drill.exec.server.options.TypeValidators.MaxWidthValidator;
import org.apache.drill.exec.server.options.TypeValidators.PositiveLongValidator;
import org.apache.drill.exec.server.options.TypeValidators.PowerOfTwoLongValidator;
import org.apache.drill.exec.server.options.TypeValidators.RangeDoubleValidator;
import org.apache.drill.exec.server.options.TypeValidators.RangeLongValidator;
import org.apache.drill.exec.server.options.TypeValidators.StringValidator;
import org.apache.drill.exec.server.options.TypeValidators.AdminUsersValidator;
import org.apache.drill.exec.server.options.TypeValidators.AdminUserGroupsValidator;
import org.apache.drill.exec.testing.ExecutionControls;
public final class ExecConstants {
private ExecConstants() {
// Don't allow instantiation
}
public static final String ZK_RETRY_TIMES = "drill.exec.zk.retry.count";
public static final String ZK_RETRY_DELAY = "drill.exec.zk.retry.delay";
public static final String ZK_CONNECTION = "drill.exec.zk.connect";
public static final String ZK_TIMEOUT = "drill.exec.zk.timeout";
public static final String ZK_ROOT = "drill.exec.zk.root";
public static final String ZK_REFRESH = "drill.exec.zk.refresh";
public static final String BIT_RETRY_TIMES = "drill.exec.rpc.bit.server.retry.count";
public static final String BIT_RETRY_DELAY = "drill.exec.rpc.bit.server.retry.delay";
public static final String BIT_TIMEOUT = "drill.exec.bit.timeout" ;
public static final String SERVICE_NAME = "drill.exec.cluster-id";
public static final String INITIAL_BIT_PORT = "drill.exec.rpc.bit.server.port";
public static final String INITIAL_DATA_PORT = "drill.exec.rpc.bit.server.dataport";
public static final String BIT_RPC_TIMEOUT = "drill.exec.rpc.bit.timeout";
public static final String INITIAL_USER_PORT = "drill.exec.rpc.user.server.port";
public static final String USER_RPC_TIMEOUT = "drill.exec.rpc.user.timeout";
public static final String METRICS_CONTEXT_NAME = "drill.exec.metrics.context";
public static final String USE_IP_ADDRESS = "drill.exec.rpc.use.ip";
public static final String CLIENT_RPC_THREADS = "drill.exec.rpc.user.client.threads";
public static final String BIT_SERVER_RPC_THREADS = "drill.exec.rpc.bit.server.threads";
public static final String USER_SERVER_RPC_THREADS = "drill.exec.rpc.user.server.threads";
public static final String FRAG_RUNNER_RPC_TIMEOUT = "drill.exec.rpc.fragrunner.timeout";
public static final PositiveLongValidator FRAG_RUNNER_RPC_TIMEOUT_VALIDATOR = new PositiveLongValidator(FRAG_RUNNER_RPC_TIMEOUT, Long.MAX_VALUE);
public static final String TRACE_DUMP_DIRECTORY = "drill.exec.trace.directory";
public static final String TRACE_DUMP_FILESYSTEM = "drill.exec.trace.filesystem";
public static final String TEMP_DIRECTORIES = "drill.exec.tmp.directories";
public static final String TEMP_FILESYSTEM = "drill.exec.tmp.filesystem";
public static final String INCOMING_BUFFER_IMPL = "drill.exec.buffer.impl";
/** incoming buffer size (number of batches) */
public static final String INCOMING_BUFFER_SIZE = "drill.exec.buffer.size";
public static final String SPOOLING_BUFFER_DELETE = "drill.exec.buffer.spooling.delete";
public static final String SPOOLING_BUFFER_MEMORY = "drill.exec.buffer.spooling.size";
public static final String BATCH_PURGE_THRESHOLD = "drill.exec.sort.purge.threshold";
// Spill boot-time Options common to all spilling operators
// (Each individual operator may override the common options)
public static final String SPILL_FILESYSTEM = "drill.exec.spill.fs";
public static final String SPILL_DIRS = "drill.exec.spill.directories";
public static final String OUTPUT_BATCH_SIZE = "drill.exec.memory.operator.output_batch_size";
// Output Batch Size in Bytes. We have a small lower bound so we can test with unit tests without the
// need to produce very large batches that take up lot of memory.
public static final LongValidator OUTPUT_BATCH_SIZE_VALIDATOR = new RangeLongValidator(OUTPUT_BATCH_SIZE, 128, 512 * 1024 * 1024);
// External Sort Boot configuration
public static final String EXTERNAL_SORT_TARGET_SPILL_BATCH_SIZE = "drill.exec.sort.external.spill.batch.size";
public static final String EXTERNAL_SORT_SPILL_GROUP_SIZE = "drill.exec.sort.external.spill.group.size";
public static final String EXTERNAL_SORT_SPILL_THRESHOLD = "drill.exec.sort.external.spill.threshold";
public static final String EXTERNAL_SORT_SPILL_DIRS = "drill.exec.sort.external.spill.directories";
public static final String EXTERNAL_SORT_SPILL_FILESYSTEM = "drill.exec.sort.external.spill.fs";
public static final String EXTERNAL_SORT_SPILL_FILE_SIZE = "drill.exec.sort.external.spill.file_size";
public static final String EXTERNAL_SORT_MSORT_MAX_BATCHSIZE = "drill.exec.sort.external.msort.batch.maxsize";
public static final String EXTERNAL_SORT_DISABLE_MANAGED = "drill.exec.sort.external.disable_managed";
public static final String EXTERNAL_SORT_MERGE_LIMIT = "drill.exec.sort.external.merge_limit";
public static final String EXTERNAL_SORT_SPILL_BATCH_SIZE = "drill.exec.sort.external.spill.spill_batch_size";
public static final String EXTERNAL_SORT_MERGE_BATCH_SIZE = "drill.exec.sort.external.spill.merge_batch_size";
public static final String EXTERNAL_SORT_MAX_MEMORY = "drill.exec.sort.external.mem_limit";
public static final String EXTERNAL_SORT_BATCH_LIMIT = "drill.exec.sort.external.batch_limit";
// External Sort Runtime options
public static final BooleanValidator EXTERNAL_SORT_DISABLE_MANAGED_OPTION = new BooleanValidator("exec.sort.disable_managed");
// Hash Aggregate Options
public static final String HASHAGG_NUM_PARTITIONS_KEY = "exec.hashagg.num_partitions";
public static final LongValidator HASHAGG_NUM_PARTITIONS_VALIDATOR = new RangeLongValidator(HASHAGG_NUM_PARTITIONS_KEY, 1, 128); // 1 means - no spilling
public static final String HASHAGG_MAX_MEMORY_KEY = "exec.hashagg.mem_limit";
public static final LongValidator HASHAGG_MAX_MEMORY_VALIDATOR = new RangeLongValidator(HASHAGG_MAX_MEMORY_KEY, 0, Integer.MAX_VALUE);
// min batches is used for tuning (each partition needs so many batches when planning the number of partitions,
// or reserve this number when calculating whether the remaining available memory is too small and requires a spill.)
// Low value may OOM (e.g., when incoming rows become wider), higher values use fewer partitions but are safer
public static final String HASHAGG_MIN_BATCHES_PER_PARTITION_KEY = "exec.hashagg.min_batches_per_partition";
public static final LongValidator HASHAGG_MIN_BATCHES_PER_PARTITION_VALIDATOR = new RangeLongValidator(HASHAGG_MIN_BATCHES_PER_PARTITION_KEY, 1, 5);
// Can be turned off mainly for testing. Memory prediction is used to decide on when to spill to disk; with this option off,
// spill would be triggered only by another mechanism -- "catch OOMs and then spill".
public static final String HASHAGG_USE_MEMORY_PREDICTION_KEY = "exec.hashagg.use_memory_prediction";
public static final BooleanValidator HASHAGG_USE_MEMORY_PREDICTION_VALIDATOR = new BooleanValidator(HASHAGG_USE_MEMORY_PREDICTION_KEY);
public static final String HASHAGG_SPILL_DIRS = "drill.exec.hashagg.spill.directories";
public static final String HASHAGG_SPILL_FILESYSTEM = "drill.exec.hashagg.spill.fs";
public static final String HASHAGG_FALLBACK_ENABLED_KEY = "drill.exec.hashagg.fallback.enabled";
public static final BooleanValidator HASHAGG_FALLBACK_ENABLED_VALIDATOR = new BooleanValidator(HASHAGG_FALLBACK_ENABLED_KEY);
public static final String SSL_PROVIDER = "drill.exec.ssl.provider"; // valid values are "JDK", "OPENSSL" // default JDK
public static final String SSL_PROTOCOL = "drill.exec.ssl.protocol"; // valid values are SSL, SSLV2, SSLV3, TLS, TLSV1, TLSv1.1, TLSv1.2(default)
public static final String SSL_KEYSTORE_TYPE = "drill.exec.ssl.keyStoreType";
public static final String SSL_KEYSTORE_PATH = "drill.exec.ssl.keyStorePath"; // path to keystore. default : $JRE_HOME/lib/security/keystore.jks
public static final String SSL_KEYSTORE_PASSWORD = "drill.exec.ssl.keyStorePassword"; // default: changeit
public static final String SSL_KEY_PASSWORD = "drill.exec.ssl.keyPassword"; //
public static final String SSL_TRUSTSTORE_TYPE = "drill.exec.ssl.trustStoreType"; // valid values are jks(default), jceks, pkcs12
public static final String SSL_TRUSTSTORE_PATH = "drill.exec.ssl.trustStorePath"; // path to keystore. default : $JRE_HOME/lib/security/cacerts.jks
public static final String SSL_TRUSTSTORE_PASSWORD = "drill.exec.ssl.trustStorePassword"; // default: changeit
public static final String SSL_USE_HADOOP_CONF = "drill.exec.ssl.useHadoopConfig"; // Initialize ssl params from hadoop if not provided by drill. default: true
public static final String SSL_HANDSHAKE_TIMEOUT = "drill.exec.security.user.encryption.ssl.handshakeTimeout"; // Default 10 seconds
public static final String TEXT_LINE_READER_BATCH_SIZE = "drill.exec.storage.file.text.batch.size";
public static final String TEXT_LINE_READER_BUFFER_SIZE = "drill.exec.storage.file.text.buffer.size";
public static final String HAZELCAST_SUBNETS = "drill.exec.cache.hazel.subnets";
public static final String HTTP_ENABLE = "drill.exec.http.enabled";
public static final String HTTP_MAX_PROFILES = "drill.exec.http.max_profiles";
public static final String HTTP_PORT = "drill.exec.http.port";
public static final String HTTP_PORT_HUNT = "drill.exec.http.porthunt";
public static final String HTTP_ENABLE_SSL = "drill.exec.http.ssl_enabled";
public static final String HTTP_CORS_ENABLED = "drill.exec.http.cors.enabled";
public static final String HTTP_CORS_ALLOWED_ORIGINS = "drill.exec.http.cors.allowedOrigins";
public static final String HTTP_CORS_ALLOWED_METHODS = "drill.exec.http.cors.allowedMethods";
public static final String HTTP_CORS_ALLOWED_HEADERS = "drill.exec.http.cors.allowedHeaders";
public static final String HTTP_CORS_CREDENTIALS = "drill.exec.http.cors.credentials";
public static final String HTTP_SESSION_MEMORY_RESERVATION = "drill.exec.http.session.memory.reservation";
public static final String HTTP_SESSION_MEMORY_MAXIMUM = "drill.exec.http.session.memory.maximum";
public static final String HTTP_SESSION_MAX_IDLE_SECS = "drill.exec.http.session_max_idle_secs";
public static final String HTTP_KEYSTORE_PATH = SSL_KEYSTORE_PATH;
public static final String HTTP_KEYSTORE_PASSWORD = SSL_KEYSTORE_PASSWORD;
public static final String HTTP_TRUSTSTORE_PATH = SSL_TRUSTSTORE_PATH;
public static final String HTTP_TRUSTSTORE_PASSWORD = SSL_TRUSTSTORE_PASSWORD;
public static final String HTTP_AUTHENTICATION_MECHANISMS = "drill.exec.http.auth.mechanisms";
public static final String HTTP_SPNEGO_PRINCIPAL = "drill.exec.http.auth.spnego.principal";
public static final String HTTP_SPNEGO_KEYTAB = "drill.exec.http.auth.spnego.keytab";
public static final String SYS_STORE_PROVIDER_CLASS = "drill.exec.sys.store.provider.class";
public static final String SYS_STORE_PROVIDER_LOCAL_PATH = "drill.exec.sys.store.provider.local.path";
public static final String SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE = "drill.exec.sys.store.provider.local.write";
public static final String PROFILES_STORE_INMEMORY = "drill.exec.profiles.store.inmemory";
public static final String PROFILES_STORE_CAPACITY = "drill.exec.profiles.store.capacity";
public static final String IMPERSONATION_ENABLED = "drill.exec.impersonation.enabled";
public static final String IMPERSONATION_MAX_CHAINED_USER_HOPS = "drill.exec.impersonation.max_chained_user_hops";
public static final String AUTHENTICATION_MECHANISMS = "drill.exec.security.auth.mechanisms";
public static final String USER_AUTHENTICATION_ENABLED = "drill.exec.security.user.auth.enabled";
public static final String USER_AUTHENTICATOR_IMPL = "drill.exec.security.user.auth.impl";
public static final String PAM_AUTHENTICATOR_PROFILES = "drill.exec.security.user.auth.pam_profiles";
public static final String BIT_AUTHENTICATION_ENABLED = "drill.exec.security.bit.auth.enabled";
public static final String BIT_AUTHENTICATION_MECHANISM = "drill.exec.security.bit.auth.mechanism";
public static final String USE_LOGIN_PRINCIPAL = "drill.exec.security.bit.auth.use_login_principal";
public static final String USER_ENCRYPTION_SASL_ENABLED = "drill.exec.security.user.encryption.sasl.enabled";
public static final String USER_ENCRYPTION_SASL_MAX_WRAPPED_SIZE = "drill.exec.security.user.encryption.sasl.max_wrapped_size";
public static final String WEB_SERVER_THREAD_POOL_MAX = "drill.exec.web_server.thread_pool_max";
public static final String USER_SSL_ENABLED = "drill.exec.security.user.encryption.ssl.enabled";
public static final String BIT_ENCRYPTION_SASL_ENABLED = "drill.exec.security.bit.encryption.sasl.enabled";
public static final String BIT_ENCRYPTION_SASL_MAX_WRAPPED_SIZE = "drill.exec.security.bit.encryption.sasl.max_wrapped_size";
/** Size of JDBC batch queue (in batches) above which throttling begins. */
public static final String JDBC_BATCH_QUEUE_THROTTLING_THRESHOLD =
"drill.jdbc.batch_queue_throttling_threshold";
// Thread pool size for scan threads. Used by the Parquet scan.
public static final String SCAN_THREADPOOL_SIZE = "drill.exec.scan.threadpool_size";
// The size of the thread pool used by a scan to decode the data. Used by Parquet
public static final String SCAN_DECODE_THREADPOOL_SIZE = "drill.exec.scan.decode_threadpool_size";
/**
* Currently if a query is cancelled, but one of the fragments reports the status as FAILED instead of CANCELLED or
* FINISHED we report the query result as CANCELLED by swallowing the failures occurred in fragments. This BOOT
* setting allows the user to see the query status as failure. Useful for developers/testers.
*/
public static final String RETURN_ERROR_FOR_FAILURE_IN_CANCELLED_FRAGMENTS = "drill.exec.debug.return_error_for_failure_in_cancelled_fragments";
public static final String CLIENT_SUPPORT_COMPLEX_TYPES = "drill.client.supports-complex-types";
/**
* Configuration properties connected with dynamic UDFs support
*/
public static final String UDF_RETRY_ATTEMPTS = "drill.exec.udf.retry-attempts";
public static final String UDF_DIRECTORY_LOCAL = "drill.exec.udf.directory.local";
public static final String UDF_DIRECTORY_FS = "drill.exec.udf.directory.fs";
public static final String UDF_DIRECTORY_ROOT = "drill.exec.udf.directory.root";
public static final String UDF_DIRECTORY_STAGING = "drill.exec.udf.directory.staging";
public static final String UDF_DIRECTORY_REGISTRY = "drill.exec.udf.directory.registry";
public static final String UDF_DIRECTORY_TMP = "drill.exec.udf.directory.tmp";
public static final String UDF_DISABLE_DYNAMIC = "drill.exec.udf.disable_dynamic";
/**
* Local temporary directory is used as base for temporary storage of Dynamic UDF jars.
*/
public static final String DRILL_TMP_DIR = "drill.tmp-dir";
/**
* Temporary tables can be created ONLY in default temporary workspace.
*/
public static final String DEFAULT_TEMPORARY_WORKSPACE = "drill.exec.default_temporary_workspace";
public static final String OUTPUT_FORMAT_OPTION = "store.format";
public static final OptionValidator OUTPUT_FORMAT_VALIDATOR = new StringValidator(OUTPUT_FORMAT_OPTION);
public static final String PARQUET_BLOCK_SIZE = "store.parquet.block-size";
public static final String PARQUET_WRITER_USE_SINGLE_FS_BLOCK = "store.parquet.writer.use_single_fs_block";
public static final OptionValidator PARQUET_WRITER_USE_SINGLE_FS_BLOCK_VALIDATOR = new BooleanValidator(
PARQUET_WRITER_USE_SINGLE_FS_BLOCK);
public static final OptionValidator PARQUET_BLOCK_SIZE_VALIDATOR = new PositiveLongValidator(PARQUET_BLOCK_SIZE, Integer.MAX_VALUE);
public static final String PARQUET_PAGE_SIZE = "store.parquet.page-size";
public static final OptionValidator PARQUET_PAGE_SIZE_VALIDATOR = new PositiveLongValidator(PARQUET_PAGE_SIZE, Integer.MAX_VALUE);
public static final String PARQUET_DICT_PAGE_SIZE = "store.parquet.dictionary.page-size";
public static final OptionValidator PARQUET_DICT_PAGE_SIZE_VALIDATOR = new PositiveLongValidator(PARQUET_DICT_PAGE_SIZE, Integer.MAX_VALUE);
public static final String PARQUET_WRITER_COMPRESSION_TYPE = "store.parquet.compression";
public static final OptionValidator PARQUET_WRITER_COMPRESSION_TYPE_VALIDATOR = new EnumeratedStringValidator(
PARQUET_WRITER_COMPRESSION_TYPE, "snappy", "gzip", "none");
public static final String PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING = "store.parquet.enable_dictionary_encoding";
public static final OptionValidator PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING_VALIDATOR = new BooleanValidator(
PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING);
public static final String PARQUET_VECTOR_FILL_THRESHOLD = "store.parquet.vector_fill_threshold";
public static final OptionValidator PARQUET_VECTOR_FILL_THRESHOLD_VALIDATOR = new PositiveLongValidator(PARQUET_VECTOR_FILL_THRESHOLD, 99l);
public static final String PARQUET_VECTOR_FILL_CHECK_THRESHOLD = "store.parquet.vector_fill_check_threshold";
public static final OptionValidator PARQUET_VECTOR_FILL_CHECK_THRESHOLD_VALIDATOR = new PositiveLongValidator(PARQUET_VECTOR_FILL_CHECK_THRESHOLD, 100l);
public static final String PARQUET_NEW_RECORD_READER = "store.parquet.use_new_reader";
public static final OptionValidator PARQUET_RECORD_READER_IMPLEMENTATION_VALIDATOR = new BooleanValidator(PARQUET_NEW_RECORD_READER);
public static final String PARQUET_READER_INT96_AS_TIMESTAMP = "store.parquet.reader.int96_as_timestamp";
public static final OptionValidator PARQUET_READER_INT96_AS_TIMESTAMP_VALIDATOR = new BooleanValidator(PARQUET_READER_INT96_AS_TIMESTAMP);
public static final String PARQUET_PAGEREADER_ASYNC = "store.parquet.reader.pagereader.async";
public static final OptionValidator PARQUET_PAGEREADER_ASYNC_VALIDATOR = new BooleanValidator(PARQUET_PAGEREADER_ASYNC);
// Number of pages the Async Parquet page reader will read before blocking
public static final String PARQUET_PAGEREADER_QUEUE_SIZE = "store.parquet.reader.pagereader.queuesize";
public static final OptionValidator PARQUET_PAGEREADER_QUEUE_SIZE_VALIDATOR = new PositiveLongValidator(PARQUET_PAGEREADER_QUEUE_SIZE, Integer.MAX_VALUE);
public static final String PARQUET_PAGEREADER_ENFORCETOTALSIZE = "store.parquet.reader.pagereader.enforceTotalSize";
public static final OptionValidator PARQUET_PAGEREADER_ENFORCETOTALSIZE_VALIDATOR = new BooleanValidator(PARQUET_PAGEREADER_ENFORCETOTALSIZE);
public static final String PARQUET_COLUMNREADER_ASYNC = "store.parquet.reader.columnreader.async";
public static final OptionValidator PARQUET_COLUMNREADER_ASYNC_VALIDATOR = new BooleanValidator(PARQUET_COLUMNREADER_ASYNC);
// Use a buffering reader for Parquet page reader
public static final String PARQUET_PAGEREADER_USE_BUFFERED_READ = "store.parquet.reader.pagereader.bufferedread";
public static final OptionValidator PARQUET_PAGEREADER_USE_BUFFERED_READ_VALIDATOR = new BooleanValidator(PARQUET_PAGEREADER_USE_BUFFERED_READ);
// Size in MiB of the buffer the Parquet page reader will use to read from disk. Default is 1 MiB
public static final String PARQUET_PAGEREADER_BUFFER_SIZE = "store.parquet.reader.pagereader.buffersize";
public static final OptionValidator PARQUET_PAGEREADER_BUFFER_SIZE_VALIDATOR = new LongValidator(PARQUET_PAGEREADER_BUFFER_SIZE);
// try to use fadvise if available
public static final String PARQUET_PAGEREADER_USE_FADVISE = "store.parquet.reader.pagereader.usefadvise";
public static final OptionValidator PARQUET_PAGEREADER_USE_FADVISE_VALIDATOR = new BooleanValidator(PARQUET_PAGEREADER_USE_FADVISE);
public static final OptionValidator COMPILE_SCALAR_REPLACEMENT = new BooleanValidator("exec.compile.scalar_replacement");
public static final String JSON_ALL_TEXT_MODE = "store.json.all_text_mode";
public static final BooleanValidator JSON_READER_ALL_TEXT_MODE_VALIDATOR = new BooleanValidator(JSON_ALL_TEXT_MODE);
public static final BooleanValidator JSON_EXTENDED_TYPES = new BooleanValidator("store.json.extended_types");
public static final BooleanValidator JSON_WRITER_UGLIFY = new BooleanValidator("store.json.writer.uglify");
public static final BooleanValidator JSON_WRITER_SKIPNULLFIELDS = new BooleanValidator("store.json.writer.skip_null_fields");
public static final String JSON_READER_SKIP_INVALID_RECORDS_FLAG = "store.json.reader.skip_invalid_records";
public static final BooleanValidator JSON_SKIP_MALFORMED_RECORDS_VALIDATOR = new BooleanValidator(JSON_READER_SKIP_INVALID_RECORDS_FLAG);
public static final String JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG = "store.json.reader.print_skipped_invalid_record_number";
public static final BooleanValidator JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG_VALIDATOR = new BooleanValidator(JSON_READER_PRINT_INVALID_RECORDS_LINE_NOS_FLAG);
public static final DoubleValidator TEXT_ESTIMATED_ROW_SIZE = new RangeDoubleValidator("store.text.estimated_row_size_bytes", 1, Long.MAX_VALUE);
/**
* Json writer option for writing `NaN` and `Infinity` tokens as numbers (not enclosed with double quotes)
*/
public static final String JSON_WRITER_NAN_INF_NUMBERS = "store.json.writer.allow_nan_inf";
public static final BooleanValidator JSON_WRITER_NAN_INF_NUMBERS_VALIDATOR = new BooleanValidator(JSON_WRITER_NAN_INF_NUMBERS);
/**
* Json reader option that enables parser to read `NaN` and `Infinity` tokens as numbers
*/
public static final String JSON_READER_NAN_INF_NUMBERS = "store.json.reader.allow_nan_inf";
public static final BooleanValidator JSON_READER_NAN_INF_NUMBERS_VALIDATOR = new BooleanValidator(JSON_READER_NAN_INF_NUMBERS);
/**
* The column label (for directory levels) in results when querying files in a directory
* E.g. labels: dir0 dir1<pre>
* structure: foo
* |- bar - a.parquet
* |- baz - b.parquet</pre>
*/
public static final String FILESYSTEM_PARTITION_COLUMN_LABEL = "drill.exec.storage.file.partition.column.label";
public static final StringValidator FILESYSTEM_PARTITION_COLUMN_LABEL_VALIDATOR = new StringValidator(FILESYSTEM_PARTITION_COLUMN_LABEL);
/**
* Implicit file columns
*/
public static final String IMPLICIT_FILENAME_COLUMN_LABEL = "drill.exec.storage.implicit.filename.column.label";
public static final OptionValidator IMPLICIT_FILENAME_COLUMN_LABEL_VALIDATOR = new StringValidator(IMPLICIT_FILENAME_COLUMN_LABEL);
public static final String IMPLICIT_SUFFIX_COLUMN_LABEL = "drill.exec.storage.implicit.suffix.column.label";
public static final OptionValidator IMPLICIT_SUFFIX_COLUMN_LABEL_VALIDATOR = new StringValidator(IMPLICIT_SUFFIX_COLUMN_LABEL);
public static final String IMPLICIT_FQN_COLUMN_LABEL = "drill.exec.storage.implicit.fqn.column.label";
public static final OptionValidator IMPLICIT_FQN_COLUMN_LABEL_VALIDATOR = new StringValidator(IMPLICIT_FQN_COLUMN_LABEL);
public static final String IMPLICIT_FILEPATH_COLUMN_LABEL = "drill.exec.storage.implicit.filepath.column.label";
public static final OptionValidator IMPLICIT_FILEPATH_COLUMN_LABEL_VALIDATOR = new StringValidator(IMPLICIT_FILEPATH_COLUMN_LABEL);
public static final String JSON_READ_NUMBERS_AS_DOUBLE = "store.json.read_numbers_as_double";
public static final BooleanValidator JSON_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(JSON_READ_NUMBERS_AS_DOUBLE);
public static final String MONGO_ALL_TEXT_MODE = "store.mongo.all_text_mode";
public static final OptionValidator MONGO_READER_ALL_TEXT_MODE_VALIDATOR = new BooleanValidator(MONGO_ALL_TEXT_MODE);
public static final String MONGO_READER_READ_NUMBERS_AS_DOUBLE = "store.mongo.read_numbers_as_double";
public static final OptionValidator MONGO_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(MONGO_READER_READ_NUMBERS_AS_DOUBLE);
public static final String MONGO_BSON_RECORD_READER = "store.mongo.bson.record.reader";
public static final OptionValidator MONGO_BSON_RECORD_READER_VALIDATOR = new BooleanValidator(MONGO_BSON_RECORD_READER);
public static final String ENABLE_UNION_TYPE_KEY = "exec.enable_union_type";
public static final BooleanValidator ENABLE_UNION_TYPE = new BooleanValidator(ENABLE_UNION_TYPE_KEY);
// Kafka plugin related options.
public static final String KAFKA_ALL_TEXT_MODE = "store.kafka.all_text_mode";
public static final OptionValidator KAFKA_READER_ALL_TEXT_MODE_VALIDATOR = new BooleanValidator(KAFKA_ALL_TEXT_MODE);
public static final String KAFKA_READER_READ_NUMBERS_AS_DOUBLE = "store.kafka.read_numbers_as_double";
public static final OptionValidator KAFKA_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(
KAFKA_READER_READ_NUMBERS_AS_DOUBLE);
public static final String KAFKA_RECORD_READER = "store.kafka.record.reader";
public static final OptionValidator KAFKA_RECORD_READER_VALIDATOR = new StringValidator(KAFKA_RECORD_READER);
public static final String KAFKA_POLL_TIMEOUT = "store.kafka.poll.timeout";
public static final PositiveLongValidator KAFKA_POLL_TIMEOUT_VALIDATOR = new PositiveLongValidator(KAFKA_POLL_TIMEOUT,
Long.MAX_VALUE);
// TODO: We need to add a feature that enables storage plugins to add their own options. Currently we have to declare
// in core which is not right. Move this option and above two mongo plugin related options once we have the feature.
public static final String HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS = "store.hive.optimize_scan_with_native_readers";
public static final OptionValidator HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS_VALIDATOR =
new BooleanValidator(HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS);
public static final String SLICE_TARGET = "planner.slice_target";
public static final long SLICE_TARGET_DEFAULT = 100000l;
public static final PositiveLongValidator SLICE_TARGET_OPTION = new PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE);
public static final String CAST_TO_NULLABLE_NUMERIC = "drill.exec.functions.cast_empty_string_to_null";
public static final BooleanValidator CAST_TO_NULLABLE_NUMERIC_OPTION = new BooleanValidator(CAST_TO_NULLABLE_NUMERIC);
/**
* HashTable runtime settings
*/
public static final String MIN_HASH_TABLE_SIZE_KEY = "exec.min_hash_table_size";
public static final PositiveLongValidator MIN_HASH_TABLE_SIZE = new PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY);
public static final String MAX_HASH_TABLE_SIZE_KEY = "exec.max_hash_table_size";
public static final PositiveLongValidator MAX_HASH_TABLE_SIZE = new PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY);
/**
* Limits the maximum level of parallelization to this factor time the number of Drillbits
*/
public static final String CPU_LOAD_AVERAGE_KEY = "planner.cpu_load_average";
public static final DoubleValidator CPU_LOAD_AVERAGE = new DoubleValidator(CPU_LOAD_AVERAGE_KEY);
public static final String MAX_WIDTH_PER_NODE_KEY = "planner.width.max_per_node";
public static final MaxWidthValidator MAX_WIDTH_PER_NODE = new MaxWidthValidator(MAX_WIDTH_PER_NODE_KEY);
/**
* The maximum level or parallelization any stage of the query can do. Note that while this
* might be the number of active Drillbits, realistically, this could be well beyond that
* number of we want to do things like speed results return.
*/
public static final String MAX_WIDTH_GLOBAL_KEY = "planner.width.max_per_query";
public static final OptionValidator MAX_WIDTH_GLOBAL = new PositiveLongValidator(MAX_WIDTH_GLOBAL_KEY, Integer.MAX_VALUE);
/**
* Factor by which a node with endpoint affinity will be favored while creating assignment
*/
public static final String AFFINITY_FACTOR_KEY = "planner.affinity_factor";
public static final OptionValidator AFFINITY_FACTOR = new DoubleValidator(AFFINITY_FACTOR_KEY);
public static final String EARLY_LIMIT0_OPT_KEY = "planner.enable_limit0_optimization";
public static final BooleanValidator EARLY_LIMIT0_OPT = new BooleanValidator(EARLY_LIMIT0_OPT_KEY);
public static final String ENABLE_MEMORY_ESTIMATION_KEY = "planner.memory.enable_memory_estimation";
public static final OptionValidator ENABLE_MEMORY_ESTIMATION = new BooleanValidator(ENABLE_MEMORY_ESTIMATION_KEY);
/**
* Maximum query memory per node (in MB). Re-plan with cheaper operators if
* memory estimation exceeds this limit.
* <p/>
* DEFAULT: 2048 MB
*/
public static final String MAX_QUERY_MEMORY_PER_NODE_KEY = "planner.memory.max_query_memory_per_node";
public static final LongValidator MAX_QUERY_MEMORY_PER_NODE = new RangeLongValidator(MAX_QUERY_MEMORY_PER_NODE_KEY, 1024 * 1024, Long.MAX_VALUE);
/**
* Alternative way to compute per-query-per-node memory as a percent
* of the total available system memory.
* <p>
* Suggestion for computation.
* <ul>
* <li>Assume an allowance for non-managed operators. Default assumption:
* 50%</li>
* <li>Assume a desired number of concurrent queries. Default assumption:
* 10.</li>
* <li>The value of this parameter is<br>
* (1 - non-managed allowance) / concurrency</li>
* </ul>
* Doing the math produces the default 5% number. The actual number
* given is no less than the <tt>max_query_memory_per_node</tt>
* amount.
* <p>
* This number is used only when throttling is disabled. Setting the
* number to 0 effectively disables this technique as it will always
* produce values lower than <tt>max_query_memory_per_node</tt>.
* <p>
* DEFAULT: 5%
*/
public static String PERCENT_MEMORY_PER_QUERY_KEY = "planner.memory.percent_per_query";
public static DoubleValidator PERCENT_MEMORY_PER_QUERY = new RangeDoubleValidator(
PERCENT_MEMORY_PER_QUERY_KEY, 0, 1.0);
/**
* Minimum memory allocated to each buffered operator instance.
* <p/>
* DEFAULT: 40 MB
*/
public static final String MIN_MEMORY_PER_BUFFERED_OP_KEY = "planner.memory.min_memory_per_buffered_op";
public static final LongValidator MIN_MEMORY_PER_BUFFERED_OP = new RangeLongValidator(MIN_MEMORY_PER_BUFFERED_OP_KEY, 1024 * 1024, Long.MAX_VALUE);
/**
* Extra query memory per node for non-blocking operators.
* NOTE: This option is currently used only for memory estimation.
* <p/>
* DEFAULT: 64 MB
* MAXIMUM: 2048 MB
*/
public static final String NON_BLOCKING_OPERATORS_MEMORY_KEY = "planner.memory.non_blocking_operators_memory";
public static final OptionValidator NON_BLOCKING_OPERATORS_MEMORY = new PowerOfTwoLongValidator(
NON_BLOCKING_OPERATORS_MEMORY_KEY, 1 << 11);
public static final String HASH_JOIN_TABLE_FACTOR_KEY = "planner.memory.hash_join_table_factor";
public static final OptionValidator HASH_JOIN_TABLE_FACTOR = new DoubleValidator(HASH_JOIN_TABLE_FACTOR_KEY);
public static final String HASH_AGG_TABLE_FACTOR_KEY = "planner.memory.hash_agg_table_factor";
public static final OptionValidator HASH_AGG_TABLE_FACTOR = new DoubleValidator(HASH_AGG_TABLE_FACTOR_KEY);
public static final String AVERAGE_FIELD_WIDTH_KEY = "planner.memory.average_field_width";
public static final OptionValidator AVERAGE_FIELD_WIDTH = new PositiveLongValidator(AVERAGE_FIELD_WIDTH_KEY, Long.MAX_VALUE);
// Mux Exchange options.
public static final String ORDERED_MUX_EXCHANGE = "planner.enable_ordered_mux_exchange";
// Resource management boot-time options.
public static final String MAX_MEMORY_PER_NODE = "drill.exec.rm.memory_per_node";
public static final String MAX_CPUS_PER_NODE = "drill.exec.rm.cpus_per_node";
// Resource management system run-time options.
// Enables queues. When running embedded, enables an in-process queue. When
// running distributed, enables the Zookeeper-based distributed queue.
public static final BooleanValidator ENABLE_QUEUE = new BooleanValidator("exec.queue.enable");
public static final LongValidator LARGE_QUEUE_SIZE = new PositiveLongValidator("exec.queue.large", 10_000);
public static final LongValidator SMALL_QUEUE_SIZE = new PositiveLongValidator("exec.queue.small", 100_000);
public static final LongValidator QUEUE_THRESHOLD_SIZE = new PositiveLongValidator("exec.queue.threshold", Long.MAX_VALUE);
public static final LongValidator QUEUE_TIMEOUT = new PositiveLongValidator("exec.queue.timeout_millis", Long.MAX_VALUE);
// Ratio of memory for small queries vs. large queries.
// Each small query gets 1 unit, each large query gets QUEUE_MEMORY_RATIO units.
// A lower limit of 1 enforces the intuition that a large query should never get
// *less* memory than a small one.
public static final DoubleValidator QUEUE_MEMORY_RATIO = new RangeDoubleValidator("exec.queue.memory_ratio", 1.0, 1000);
public static final DoubleValidator QUEUE_MEMORY_RESERVE = new RangeDoubleValidator("exec.queue.memory_reserve_ratio", 0, 1.0);
public static final String ENABLE_VERBOSE_ERRORS_KEY = "exec.errors.verbose";
public static final OptionValidator ENABLE_VERBOSE_ERRORS = new BooleanValidator(ENABLE_VERBOSE_ERRORS_KEY);
public static final String ENABLE_NEW_TEXT_READER_KEY = "exec.storage.enable_new_text_reader";
public static final OptionValidator ENABLE_NEW_TEXT_READER = new BooleanValidator(ENABLE_NEW_TEXT_READER_KEY);
public static final String BOOTSTRAP_STORAGE_PLUGINS_FILE = "bootstrap-storage-plugins.json";
public static final String DRILL_SYS_FILE_SUFFIX = ".sys.drill";
public static final String ENABLE_WINDOW_FUNCTIONS = "window.enable";
public static final OptionValidator ENABLE_WINDOW_FUNCTIONS_VALIDATOR = new BooleanValidator(ENABLE_WINDOW_FUNCTIONS);
public static final String DRILLBIT_CONTROL_INJECTIONS = "drill.exec.testing.controls";
public static final OptionValidator DRILLBIT_CONTROLS_VALIDATOR = new ExecutionControls.ControlsOptionValidator(DRILLBIT_CONTROL_INJECTIONS, 1);
public static final String NEW_VIEW_DEFAULT_PERMS_KEY = "new_view_default_permissions";
public static final OptionValidator NEW_VIEW_DEFAULT_PERMS_VALIDATOR = new StringValidator(NEW_VIEW_DEFAULT_PERMS_KEY);
public static final String CTAS_PARTITIONING_HASH_DISTRIBUTE = "store.partition.hash_distribute";
public static final BooleanValidator CTAS_PARTITIONING_HASH_DISTRIBUTE_VALIDATOR = new BooleanValidator(CTAS_PARTITIONING_HASH_DISTRIBUTE);
public static final String ENABLE_BULK_LOAD_TABLE_LIST_KEY = "exec.enable_bulk_load_table_list";
public static final BooleanValidator ENABLE_BULK_LOAD_TABLE_LIST = new BooleanValidator(ENABLE_BULK_LOAD_TABLE_LIST_KEY);
/**
* When getting Hive Table information with exec.enable_bulk_load_table_list set to true,
* use the exec.bulk_load_table_list.bulk_size to determine how many tables to fetch from HiveMetaStore
* at a time. (The number of tables can get to be quite large.)
*/
public static final String BULK_LOAD_TABLE_LIST_BULK_SIZE_KEY = "exec.bulk_load_table_list.bulk_size";
public static final PositiveLongValidator BULK_LOAD_TABLE_LIST_BULK_SIZE = new PositiveLongValidator(BULK_LOAD_TABLE_LIST_BULK_SIZE_KEY, Integer.MAX_VALUE);
/**
* Option whose value is a comma separated list of admin usernames. Admin users are users who have special privileges
* such as changing system options.
*/
public static final String ADMIN_USERS_KEY = "security.admin.users";
public static final AdminUsersValidator ADMIN_USERS_VALIDATOR = new AdminUsersValidator(ADMIN_USERS_KEY);
/**
* Option whose value is a comma separated list of admin usergroups.
*/
public static final String ADMIN_USER_GROUPS_KEY = "security.admin.user_groups";
public static final AdminUserGroupsValidator ADMIN_USER_GROUPS_VALIDATOR =
new AdminUserGroupsValidator(ADMIN_USER_GROUPS_KEY);
/**
* Option whose value is a string representing list of inbound impersonation policies.
*
* Impersonation policy format:
* [
* {
* proxy_principals : { users : [“...”], groups : [“...”] },
* target_principals : { users : [“...”], groups : [“...”] }
* },
* ...
* ]
*/
public static final String IMPERSONATION_POLICIES_KEY = "exec.impersonation.inbound_policies";
public static final StringValidator IMPERSONATION_POLICY_VALIDATOR =
new InboundImpersonationManager.InboundImpersonationPolicyValidator(IMPERSONATION_POLICIES_KEY);
/**
* Web settings
*/
public static final String WEB_LOGS_MAX_LINES = "web.logs.max_lines";
public static final OptionValidator WEB_LOGS_MAX_LINES_VALIDATOR = new PositiveLongValidator(WEB_LOGS_MAX_LINES, Integer.MAX_VALUE);
public static final String CODE_GEN_EXP_IN_METHOD_SIZE = "exec.java.compiler.exp_in_method_size";
public static final LongValidator CODE_GEN_EXP_IN_METHOD_SIZE_VALIDATOR = new LongValidator(CODE_GEN_EXP_IN_METHOD_SIZE);
/**
* Timeout for create prepare statement request. If the request exceeds this timeout, then request is timed out.
* Default value is 10mins.
*/
public static final String CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS = "prepare.statement.create_timeout_ms";
public static final OptionValidator CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS_VALIDATOR =
new PositiveLongValidator(CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS, Integer.MAX_VALUE);
public static final String DYNAMIC_UDF_SUPPORT_ENABLED = "exec.udf.enable_dynamic_support";
public static final BooleanValidator DYNAMIC_UDF_SUPPORT_ENABLED_VALIDATOR = new BooleanValidator(DYNAMIC_UDF_SUPPORT_ENABLED);
/**
* Option to save query profiles. If false, no query profile will be saved
* for any query.
*/
public static final String ENABLE_QUERY_PROFILE_OPTION = "exec.query_profile.save";
public static final BooleanValidator ENABLE_QUERY_PROFILE_VALIDATOR = new BooleanValidator(ENABLE_QUERY_PROFILE_OPTION);
/**
* Profiles are normally written after the last client message to reduce latency.
* When running tests, however, we want the profile written <i>before</i> the
* return so that the client can immediately read the profile for test
* verification.
*/
public static final String QUERY_PROFILE_DEBUG_OPTION = "exec.query_profile.debug_mode";
public static final BooleanValidator QUERY_PROFILE_DEBUG_VALIDATOR = new BooleanValidator(QUERY_PROFILE_DEBUG_OPTION);
public static final String USE_DYNAMIC_UDFS_KEY = "exec.udf.use_dynamic";
public static final BooleanValidator USE_DYNAMIC_UDFS = new BooleanValidator(USE_DYNAMIC_UDFS_KEY);
public static final String QUERY_TRANSIENT_STATE_UPDATE_KEY = "exec.query.progress.update";
public static final BooleanValidator QUERY_TRANSIENT_STATE_UPDATE = new BooleanValidator(QUERY_TRANSIENT_STATE_UPDATE_KEY);
public static final String PERSISTENT_TABLE_UMASK = "exec.persistent_table.umask";
public static final StringValidator PERSISTENT_TABLE_UMASK_VALIDATOR = new StringValidator(PERSISTENT_TABLE_UMASK);
/**
* Enables batch iterator (operator) validation. Validation is normally enabled
* only when assertions are enabled. This option enables iterator validation even
* if assertions are not enabled. That is, it allows iterator validation even on
* a "production" Drill instance.
*/
public static final String ENABLE_ITERATOR_VALIDATION_OPTION = "debug.validate_iterators";
public static final BooleanValidator ENABLE_ITERATOR_VALIDATOR = new BooleanValidator(ENABLE_ITERATOR_VALIDATION_OPTION);
/**
* Boot-time config option to enable validation. Primarily used for tests.
* If true, overrrides the above. (That is validation is done if assertions are on,
* if the above session option is set to true, or if this config option is set to true.
*/
public static final String ENABLE_ITERATOR_VALIDATION = "drill.exec.debug.validate_iterators";
/**
* When iterator validation is enabled, additionally validates the vectors in
* each batch passed to each iterator.
*/
public static final String ENABLE_VECTOR_VALIDATION_OPTION = "debug.validate_vectors";
public static final BooleanValidator ENABLE_VECTOR_VALIDATOR = new BooleanValidator(ENABLE_VECTOR_VALIDATION_OPTION);
/**
* Boot-time config option to enable vector validation. Primarily used for
* tests. Add the following to the command line to enable:<br>
* <tt>-ea -Ddrill.exec.debug.validate_vectors=true</tt>
*/
public static final String ENABLE_VECTOR_VALIDATION = "drill.exec.debug.validate_vectors";
public static final String OPTION_DEFAULTS_ROOT = "drill.exec.options.";
public static String bootDefaultFor(String name) {
return OPTION_DEFAULTS_ROOT + name;
}
/**
* Boot-time config option provided to modify duration of the grace period.
* Grace period is the amount of time where the drillbit accepts work after
* the shutdown request is triggered. The primary use of grace period is to
* avoid the race conditions caused by zookeeper delay in updating the state
* information of the drillbit that is shutting down. So, it is advisable
* to have a grace period that is atleast twice the amount of zookeeper
* refresh time.
*/
public static final String GRACE_PERIOD = "drill.exec.grace_period_ms";
public static final String DRILL_PORT_HUNT = "drill.exec.port_hunt";
}
| KulykRoman/drill | exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java | Java | apache-2.0 | 42,918 |
using System;
using Akka.Util;
using Microsoft.VisualStudio.TestTools.UnitTesting;
namespace Akka.Tests.Util
{
[TestClass]
public class SwitchTests : AkkaSpec
{
[TestMethod]
public void OnAndOff()
{
var s = new Switch(false);
Assert.IsTrue(s.IsOff, "Initially should be off");
Assert.IsFalse(s.IsOn, "Initially should not be on");
Assert.IsTrue(s.SwitchOn(), "Switch on from off should succeed");
Assert.IsTrue(s.IsOn, "Switched on should be on");
Assert.IsFalse(s.IsOff, "Switched on should not be off");
Assert.IsFalse(s.SwitchOn(), "Switch on when already on should not succeed");
Assert.IsTrue(s.IsOn, "Already switched on should be on");
Assert.IsFalse(s.IsOff, "Already switched on should not be off");
Assert.IsTrue(s.SwitchOff(), "Switch off from on should succeed");
Assert.IsTrue(s.IsOff, "Switched off should be off");
Assert.IsFalse(s.IsOn, "Switched off should not be on");
Assert.IsFalse(s.SwitchOff(), "Switch off when already off should not succeed");
Assert.IsTrue(s.IsOff, "Already switched off should be off");
Assert.IsFalse(s.IsOn, "Already switched off should not be on");
}
[TestMethod]
public void InitiallyOnShouldBeOn()
{
var s = new Switch(true);
Assert.IsTrue(s.IsOn, "Switched on should be on");
Assert.IsFalse(s.IsOff, "Switched on should not be off");
}
[TestMethod]
public void Given_OffSwitch_When_SwitchOn_throws_exception_Then_Should_revert()
{
var s = new Switch(false);
intercept<InvalidOperationException>(() => s.SwitchOn(() => { throw new InvalidOperationException(); }));
Assert.IsTrue(s.IsOff);
Assert.IsFalse(s.IsOn);
}
[TestMethod]
public void Given_OnSwitch_When_SwitchOff_throws_exception_Then_Should_revert()
{
var s = new Switch(true);
intercept<InvalidOperationException>(() => s.SwitchOff(() => { throw new InvalidOperationException(); }));
Assert.IsTrue(s.IsOn);
Assert.IsFalse(s.IsOff);
}
[TestMethod]
public void RunActionWithoutLocking()
{
var s = new Switch(false);
var actionRun = false;
Assert.IsTrue(s.IfOff(() => { actionRun = true; }));
Assert.IsTrue(actionRun);
actionRun = false;
Assert.IsFalse(s.IfOn(() => { actionRun = true; }));
Assert.IsFalse(actionRun);
s.SwitchOn();
actionRun = false;
Assert.IsTrue(s.IfOn(() => { actionRun = true; }));
Assert.IsTrue(actionRun);
actionRun = false;
Assert.IsFalse(s.IfOff(() => { actionRun = true; }));
Assert.IsFalse(actionRun);
}
[TestMethod]
public void RunActionWithLocking()
{
var s = new Switch(false);
var actionRun = false;
Assert.IsTrue(s.WhileOff(() => { actionRun = true; }));
Assert.IsTrue(actionRun);
actionRun = false;
Assert.IsFalse(s.WhileOn(() => { actionRun = true; }));
Assert.IsFalse(actionRun);
s.SwitchOn();
actionRun = false;
Assert.IsTrue(s.WhileOn(() => { actionRun = true; }));
Assert.IsTrue(actionRun);
actionRun = false;
Assert.IsFalse(s.WhileOff(() => { actionRun = true; }));
Assert.IsFalse(actionRun);
}
}
} | Horusiath/akka.net | test/Akka.Tests/Util/SwitchTests.cs | C# | apache-2.0 | 3,717 |
from flask import Flask
app = Flask(__name__)
@app.get("/")
def index():
return "hello, world"
if __name__ == "__main__":
# Dev only: run "python main.py" and open http://localhost:8080
app.run(host="localhost", port=8080, debug=True)
| GoogleCloudPlatform/buildpack-samples | sample-python/main.py | Python | apache-2.0 | 252 |
/*
* Copyright 2012 Mike Adamson
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.assemblade.opendj.acis;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class AciFactory implements AciPatterns {
private static Pattern headerPattern = Pattern.compile(header);
private static Pattern targetPattern = Pattern.compile(target);
private static Pattern bodyPattern = Pattern.compile(body);
public static AccessControlItem parse(String aci) {
String name = null;
String targets = null;
String rules = null;
Matcher headerMatcher = headerPattern.matcher(aci);
if (headerMatcher.find()) {
targets = aci.substring(0, headerMatcher.start());
name = headerMatcher.group(1);
rules = aci.substring(headerMatcher.end());
}
List<Target> targetList = new ArrayList<Target>();
Matcher targetMatcher = targetPattern.matcher(targets);
while (targetMatcher.find()) {
String keyword = targetMatcher.group(1);
String operator = targetMatcher.group(2);
String expression = targetMatcher.group(3);
targetList.add(new Target(keyword, operator, expression));
}
List<Permission> ruleList = new ArrayList<Permission>();
Matcher bodyMatcher = bodyPattern.matcher(rules);
while (bodyMatcher.find()) {
String permission = bodyMatcher.group(1);
String rights = bodyMatcher.group(2);
String rule = bodyMatcher.group(3);
ruleList.add(new Permission(permission, rights, Subject.parse(rule)));
}
return new AccessControlItem(name, targetList, ruleList);
}
} | assemblade/CAT | cat-directory/src/main/java/com/assemblade/opendj/acis/AciFactory.java | Java | apache-2.0 | 2,298 |
//*******************************************************************************************//
// //
// Download Free Evaluation Version From: https://bytescout.com/download/web-installer //
// //
// Also available as Web API! Get Your Free API Key: https://app.pdf.co/signup //
// //
// Copyright © 2017-2020 ByteScout, Inc. All rights reserved. //
// https://www.bytescout.com //
// https://pdf.co //
// //
//*******************************************************************************************//
var myHeaders = new Headers();
myHeaders.append("Content-Type", "application/json");
myHeaders.append("x-api-key", "");
// You can also upload your own file into PDF.co and use it as url. Check "Upload File" samples for code snippets: https://github.com/bytescout/pdf-co-api-samples/tree/master/File%20Upload/
var raw = JSON.stringify({
"url": "https://bytescout-com.s3-us-west-2.amazonaws.com/files/demo-files/cloud-api/document-parser/sample-invoice.pdf",
"rulescsv": "Amazon,Amazon Web Services Invoice|Amazon CloudFront\nDigital Ocean,DigitalOcean|DOInvoice\nAcme,ACME Inc.|1540 Long Street, Jacksonville, 32099",
"caseSensitive": "true",
"async": false,
"encrypt": "false",
"inline": "true",
"password": "",
"profiles": ""
});
var requestOptions = {
method: 'POST',
headers: myHeaders,
body: raw,
redirect: 'follow'
};
fetch("https://api.pdf.co/v1/pdf/classifier", requestOptions)
.then(response => response.text())
.then(result => console.log(result))
.catch(error => console.log('error', error));
| bytescout/ByteScout-SDK-SourceCode | PDF.co Web API/PDF Classifier/JavaScript/Classify PDF From URL (jQuery)/program.js | JavaScript | apache-2.0 | 2,113 |
/*
Copyright 2015 Verloka Vadim, http://ogy.pp.ua
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
using System;
using System.Collections.Generic;
namespace JesusPassword.assets.core
{
[Serializable]
public struct Site
{
public string Name { get; set; }
public string Address { get; set; }
public string Login { get; set; }
public string Password { get; set; }
public string Mail { get; set; }
public Dictionary<string, string> CustomFields { get; set; }
public DateTime DateAdd { get; set; }
}
}
| ogycode/JesusPassword | src/Windows/Unsupported projects/Jesus password/assets/core/Site.cs | C# | apache-2.0 | 1,134 |
/*
* Copyright 2017 Benedikt Ritter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.britter.bootifytestpyramid.domain;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import java.math.BigDecimal;
import static com.github.britter.bootifytestpyramid.domain.WeightTemplates.ONE;
import static com.github.britter.bootifytestpyramid.domain.WeightTemplates.TWO;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertAll;
import static org.junit.jupiter.api.Assertions.assertThrows;
class WeightTest {
@Nested
class Invariants {
@Test
void should_throw_exception_when_passing_null_value() {
assertThrows(NullPointerException.class, () -> new Weight(null));
}
@Test
void should_throw_exception_when_passing_negative_value() {
assertAll(
() -> assertThrows(IllegalArgumentException.class, () -> new Weight(BigDecimal.valueOf(-1))),
() -> assertThrows(IllegalArgumentException.class, () -> new Weight(-1))
);
}
}
@Nested
class Calculations {
@Nested
class Add {
@Test
void should_add_weights() {
assertThat(ONE.add(ONE)).isEqualTo(TWO);
}
}
@Nested
class Multiply {
@Test
void should_multiply_weights() {
assertThat(ONE.multiply(2)).isEqualTo(TWO);
}
@Test
void should_throw_exception_when_multiply_with_negtaive_factor() {
assertThrows(IllegalArgumentException.class, () -> ONE.multiply(-2));
}
}
}
@Nested
class Comparing {
@Test
void should_compare_to_other_weights() {
assertAll(
() -> assertThat(ONE.compareTo(ONE)).isEqualTo(0),
() -> assertThat(ONE.compareTo(TWO)).isLessThan(0),
() -> assertThat(TWO.compareTo(ONE)).isGreaterThan(0)
);
}
}
}
| britter/bootify-testpyramid | src/test/java/com/github/britter/bootifytestpyramid/domain/WeightTest.java | Java | apache-2.0 | 2,642 |
package com.occar.test.rest;
import javax.ws.rs.FormParam;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
@Path("/db")
public interface DBRestClient {
@POST
@Produces(MediaType.APPLICATION_JSON)
public Response query(@FormParam("q") String query, @FormParam("uid") String uid);
} | richygreat/service | src/test/java/com/occar/test/rest/DBRestClient.java | Java | apache-2.0 | 385 |
/*******************************************************************************
*
* This file is part of iBioSim. Please visit <http://www.async.ece.utah.edu/ibiosim>
* for the latest version of iBioSim.
*
* Copyright (C) 2017 University of Utah
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the Apache License. A copy of the license agreement is provided
* in the file named "LICENSE.txt" included with this software distribution
* and also available online at <http://www.async.ece.utah.edu/ibiosim/License>.
*
*******************************************************************************/
// $ANTLR 3.4 /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g 2013-06-26 17:00:36
package edu.utah.ece.async.lema.verification.platu.platuLpn.io;
import org.antlr.runtime.*;
import java.util.Stack;
import java.util.List;
import java.util.ArrayList;
@SuppressWarnings({"all", "warnings", "unchecked"})
public class PlatuGrammarLexer extends Lexer {
public static final int EOF=-1;
public static final int T__57=57;
public static final int T__58=58;
public static final int T__59=59;
public static final int T__60=60;
public static final int T__61=61;
public static final int T__62=62;
public static final int T__63=63;
public static final int AND=4;
public static final int BITWISE_AND=5;
public static final int BITWISE_LSHIFT=6;
public static final int BITWISE_NEGATION=7;
public static final int BITWISE_OR=8;
public static final int BITWISE_RSHIFT=9;
public static final int BITWISE_XOR=10;
public static final int COLON=11;
public static final int COMMA=12;
public static final int COMMENT=13;
public static final int DIGIT=14;
public static final int DIV=15;
public static final int EQUALS=16;
public static final int EQUIV=17;
public static final int FALSE=18;
public static final int GREATER=19;
public static final int GREATER_EQUAL=20;
public static final int ID=21;
public static final int IGNORE=22;
public static final int IMPLICATION=23;
public static final int INPUT=24;
public static final int INT=25;
public static final int INTERNAL=26;
public static final int LABEL=27;
public static final int LESS=28;
public static final int LESS_EQUAL=29;
public static final int LETTER=30;
public static final int LPAREN=31;
public static final int MARKING=32;
public static final int MINUS=33;
public static final int MOD=34;
public static final int MODULE=35;
public static final int MULTILINECOMMENT=36;
public static final int NAME=37;
public static final int NEGATION=38;
public static final int NOT_EQUIV=39;
public static final int OR=40;
public static final int OUTPUT=41;
public static final int PERIOD=42;
public static final int PLUS=43;
public static final int POSTSET=44;
public static final int PRESET=45;
public static final int QMARK=46;
public static final int QUOTE=47;
public static final int RPAREN=48;
public static final int SEMICOLON=49;
public static final int STATE_VECTOR=50;
public static final int TIMES=51;
public static final int TRANSITION=52;
public static final int TRUE=53;
public static final int UNDERSCORE=54;
public static final int WS=55;
public static final int XMLCOMMENT=56;
// delegates
// delegators
public Lexer[] getDelegates() {
return new Lexer[] {};
}
public PlatuGrammarLexer() {}
public PlatuGrammarLexer(CharStream input) {
this(input, new RecognizerSharedState());
}
public PlatuGrammarLexer(CharStream input, RecognizerSharedState state) {
super(input,state);
}
public String getGrammarFileName() { return "/Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g"; }
// $ANTLR start "T__57"
public final void mT__57() throws RecognitionException {
try {
int _type = T__57;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:11:7: ( '[' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:11:9: '['
{
match('[');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "T__57"
// $ANTLR start "T__58"
public final void mT__58() throws RecognitionException {
try {
int _type = T__58;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:12:7: ( ']' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:12:9: ']'
{
match(']');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "T__58"
// $ANTLR start "T__59"
public final void mT__59() throws RecognitionException {
try {
int _type = T__59;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:13:7: ( 'assert' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:13:9: 'assert'
{
match("assert");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "T__59"
// $ANTLR start "T__60"
public final void mT__60() throws RecognitionException {
try {
int _type = T__60;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:14:7: ( 'const' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:14:9: 'const'
{
match("const");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "T__60"
// $ANTLR start "T__61"
public final void mT__61() throws RecognitionException {
try {
int _type = T__61;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:15:7: ( 'inf' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:15:9: 'inf'
{
match("inf");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "T__61"
// $ANTLR start "T__62"
public final void mT__62() throws RecognitionException {
try {
int _type = T__62;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:16:7: ( 'inst' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:16:9: 'inst'
{
match("inst");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "T__62"
// $ANTLR start "T__63"
public final void mT__63() throws RecognitionException {
try {
int _type = T__63;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:17:7: ( 'main' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:17:9: 'main'
{
match("main");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "T__63"
// $ANTLR start "LPAREN"
public final void mLPAREN() throws RecognitionException {
try {
int _type = LPAREN;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1343:7: ( '(' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1343:9: '('
{
match('(');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "LPAREN"
// $ANTLR start "RPAREN"
public final void mRPAREN() throws RecognitionException {
try {
int _type = RPAREN;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1344:7: ( ')' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1344:9: ')'
{
match(')');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "RPAREN"
// $ANTLR start "QMARK"
public final void mQMARK() throws RecognitionException {
try {
int _type = QMARK;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1345:6: ( '?' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1345:8: '?'
{
match('?');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "QMARK"
// $ANTLR start "COLON"
public final void mCOLON() throws RecognitionException {
try {
int _type = COLON;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1346:6: ( ':' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1346:8: ':'
{
match(':');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "COLON"
// $ANTLR start "SEMICOLON"
public final void mSEMICOLON() throws RecognitionException {
try {
int _type = SEMICOLON;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1347:10: ( ';' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1347:12: ';'
{
match(';');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "SEMICOLON"
// $ANTLR start "PERIOD"
public final void mPERIOD() throws RecognitionException {
try {
int _type = PERIOD;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1348:7: ( '.' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1348:9: '.'
{
match('.');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "PERIOD"
// $ANTLR start "UNDERSCORE"
public final void mUNDERSCORE() throws RecognitionException {
try {
int _type = UNDERSCORE;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1349:11: ( '_' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1349:13: '_'
{
match('_');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "UNDERSCORE"
// $ANTLR start "COMMA"
public final void mCOMMA() throws RecognitionException {
try {
int _type = COMMA;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1350:6: ( ',' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1350:8: ','
{
match(',');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "COMMA"
// $ANTLR start "QUOTE"
public final void mQUOTE() throws RecognitionException {
try {
int _type = QUOTE;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1351:6: ( '\"' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1351:8: '\"'
{
match('\"');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "QUOTE"
// $ANTLR start "MODULE"
public final void mMODULE() throws RecognitionException {
try {
int _type = MODULE;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1354:7: ( 'mod' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1354:9: 'mod'
{
match("mod");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "MODULE"
// $ANTLR start "NAME"
public final void mNAME() throws RecognitionException {
try {
int _type = NAME;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1355:5: ( 'name' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1355:7: 'name'
{
match("name");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "NAME"
// $ANTLR start "INPUT"
public final void mINPUT() throws RecognitionException {
try {
int _type = INPUT;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1356:6: ( 'input' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1356:8: 'input'
{
match("input");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "INPUT"
// $ANTLR start "OUTPUT"
public final void mOUTPUT() throws RecognitionException {
try {
int _type = OUTPUT;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1357:7: ( 'output' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1357:9: 'output'
{
match("output");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "OUTPUT"
// $ANTLR start "INTERNAL"
public final void mINTERNAL() throws RecognitionException {
try {
int _type = INTERNAL;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1358:9: ( 'var' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1358:11: 'var'
{
match("var");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "INTERNAL"
// $ANTLR start "MARKING"
public final void mMARKING() throws RecognitionException {
try {
int _type = MARKING;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1359:8: ( 'marking' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1359:10: 'marking'
{
match("marking");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "MARKING"
// $ANTLR start "STATE_VECTOR"
public final void mSTATE_VECTOR() throws RecognitionException {
try {
int _type = STATE_VECTOR;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1360:13: ( 'statevector' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1360:15: 'statevector'
{
match("statevector");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "STATE_VECTOR"
// $ANTLR start "TRANSITION"
public final void mTRANSITION() throws RecognitionException {
try {
int _type = TRANSITION;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1361:11: ( 'transition' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1361:13: 'transition'
{
match("transition");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "TRANSITION"
// $ANTLR start "LABEL"
public final void mLABEL() throws RecognitionException {
try {
int _type = LABEL;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1362:6: ( 'label' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1362:8: 'label'
{
match("label");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "LABEL"
// $ANTLR start "PRESET"
public final void mPRESET() throws RecognitionException {
try {
int _type = PRESET;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1363:7: ( 'preset' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1363:9: 'preset'
{
match("preset");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "PRESET"
// $ANTLR start "POSTSET"
public final void mPOSTSET() throws RecognitionException {
try {
int _type = POSTSET;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1364:8: ( 'postset' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1364:10: 'postset'
{
match("postset");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "POSTSET"
// $ANTLR start "TRUE"
public final void mTRUE() throws RecognitionException {
try {
int _type = TRUE;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1365:5: ( 'true' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1365:7: 'true'
{
match("true");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "TRUE"
// $ANTLR start "FALSE"
public final void mFALSE() throws RecognitionException {
try {
int _type = FALSE;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1366:6: ( 'false' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1366:8: 'false'
{
match("false");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "FALSE"
// $ANTLR start "PLUS"
public final void mPLUS() throws RecognitionException {
try {
int _type = PLUS;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1369:5: ( '+' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1369:7: '+'
{
match('+');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "PLUS"
// $ANTLR start "MINUS"
public final void mMINUS() throws RecognitionException {
try {
int _type = MINUS;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1370:6: ( '-' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1370:8: '-'
{
match('-');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "MINUS"
// $ANTLR start "TIMES"
public final void mTIMES() throws RecognitionException {
try {
int _type = TIMES;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1371:6: ( '*' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1371:8: '*'
{
match('*');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "TIMES"
// $ANTLR start "DIV"
public final void mDIV() throws RecognitionException {
try {
int _type = DIV;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1372:4: ( '/' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1372:6: '/'
{
match('/');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "DIV"
// $ANTLR start "MOD"
public final void mMOD() throws RecognitionException {
try {
int _type = MOD;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1373:4: ( '%' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1373:6: '%'
{
match('%');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "MOD"
// $ANTLR start "EQUALS"
public final void mEQUALS() throws RecognitionException {
try {
int _type = EQUALS;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1374:7: ( '=' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1374:9: '='
{
match('=');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "EQUALS"
// $ANTLR start "GREATER"
public final void mGREATER() throws RecognitionException {
try {
int _type = GREATER;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1377:8: ( '>' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1377:10: '>'
{
match('>');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "GREATER"
// $ANTLR start "LESS"
public final void mLESS() throws RecognitionException {
try {
int _type = LESS;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1378:5: ( '<' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1378:7: '<'
{
match('<');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "LESS"
// $ANTLR start "GREATER_EQUAL"
public final void mGREATER_EQUAL() throws RecognitionException {
try {
int _type = GREATER_EQUAL;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1379:14: ( '>=' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1379:16: '>='
{
match(">=");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "GREATER_EQUAL"
// $ANTLR start "LESS_EQUAL"
public final void mLESS_EQUAL() throws RecognitionException {
try {
int _type = LESS_EQUAL;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1380:11: ( '<=' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1380:13: '<='
{
match("<=");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "LESS_EQUAL"
// $ANTLR start "EQUIV"
public final void mEQUIV() throws RecognitionException {
try {
int _type = EQUIV;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1381:6: ( '==' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1381:8: '=='
{
match("==");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "EQUIV"
// $ANTLR start "NOT_EQUIV"
public final void mNOT_EQUIV() throws RecognitionException {
try {
int _type = NOT_EQUIV;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1382:10: ( '!=' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1382:12: '!='
{
match("!=");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "NOT_EQUIV"
// $ANTLR start "NEGATION"
public final void mNEGATION() throws RecognitionException {
try {
int _type = NEGATION;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1385:9: ( '!' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1385:11: '!'
{
match('!');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "NEGATION"
// $ANTLR start "AND"
public final void mAND() throws RecognitionException {
try {
int _type = AND;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1386:4: ( '&&' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1386:6: '&&'
{
match("&&");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "AND"
// $ANTLR start "OR"
public final void mOR() throws RecognitionException {
try {
int _type = OR;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1387:3: ( '||' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1387:5: '||'
{
match("||");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "OR"
// $ANTLR start "IMPLICATION"
public final void mIMPLICATION() throws RecognitionException {
try {
int _type = IMPLICATION;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1388:12: ( '->' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1388:14: '->'
{
match("->");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "IMPLICATION"
// $ANTLR start "BITWISE_NEGATION"
public final void mBITWISE_NEGATION() throws RecognitionException {
try {
int _type = BITWISE_NEGATION;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1391:17: ( '~' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1391:19: '~'
{
match('~');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "BITWISE_NEGATION"
// $ANTLR start "BITWISE_AND"
public final void mBITWISE_AND() throws RecognitionException {
try {
int _type = BITWISE_AND;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1392:12: ( '&' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1392:14: '&'
{
match('&');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "BITWISE_AND"
// $ANTLR start "BITWISE_OR"
public final void mBITWISE_OR() throws RecognitionException {
try {
int _type = BITWISE_OR;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1393:11: ( '|' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1393:13: '|'
{
match('|');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "BITWISE_OR"
// $ANTLR start "BITWISE_XOR"
public final void mBITWISE_XOR() throws RecognitionException {
try {
int _type = BITWISE_XOR;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1394:12: ( '^' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1394:14: '^'
{
match('^');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "BITWISE_XOR"
// $ANTLR start "BITWISE_LSHIFT"
public final void mBITWISE_LSHIFT() throws RecognitionException {
try {
int _type = BITWISE_LSHIFT;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1395:15: ( '<<' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1395:17: '<<'
{
match("<<");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "BITWISE_LSHIFT"
// $ANTLR start "BITWISE_RSHIFT"
public final void mBITWISE_RSHIFT() throws RecognitionException {
try {
int _type = BITWISE_RSHIFT;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1396:15: ( '>>' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1396:17: '>>'
{
match(">>");
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "BITWISE_RSHIFT"
// $ANTLR start "LETTER"
public final void mLETTER() throws RecognitionException {
try {
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1398:16: ( ( 'a' .. 'z' | 'A' .. 'Z' ) )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:
{
if ( (input.LA(1) >= 'A' && input.LA(1) <= 'Z')||(input.LA(1) >= 'a' && input.LA(1) <= 'z') ) {
input.consume();
}
else {
MismatchedSetException mse = new MismatchedSetException(null,input);
recover(mse);
throw mse;
}
}
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "LETTER"
// $ANTLR start "DIGIT"
public final void mDIGIT() throws RecognitionException {
try {
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1399:15: ( '0' .. '9' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:
{
if ( (input.LA(1) >= '0' && input.LA(1) <= '9') ) {
input.consume();
}
else {
MismatchedSetException mse = new MismatchedSetException(null,input);
recover(mse);
throw mse;
}
}
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "DIGIT"
// $ANTLR start "INT"
public final void mINT() throws RecognitionException {
try {
int _type = INT;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:4: ( ( '-' )? ( DIGIT )+ )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:6: ( '-' )? ( DIGIT )+
{
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:6: ( '-' )?
int alt1=2;
int LA1_0 = input.LA(1);
if ( (LA1_0=='-') ) {
alt1=1;
}
switch (alt1) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:6: '-'
{
match('-');
}
break;
}
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1400:11: ( DIGIT )+
int cnt2=0;
loop2:
do {
int alt2=2;
int LA2_0 = input.LA(1);
if ( ((LA2_0 >= '0' && LA2_0 <= '9')) ) {
alt2=1;
}
switch (alt2) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:
{
if ( (input.LA(1) >= '0' && input.LA(1) <= '9') ) {
input.consume();
}
else {
MismatchedSetException mse = new MismatchedSetException(null,input);
recover(mse);
throw mse;
}
}
break;
default :
if ( cnt2 >= 1 ) break loop2;
EarlyExitException eee =
new EarlyExitException(2, input);
throw eee;
}
cnt2++;
} while (true);
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "INT"
// $ANTLR start "ID"
public final void mID() throws RecognitionException {
try {
int _type = ID;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:3: ( LETTER ( ( UNDERSCORE | PERIOD )? ( LETTER | DIGIT ) )* )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:5: LETTER ( ( UNDERSCORE | PERIOD )? ( LETTER | DIGIT ) )*
{
mLETTER();
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:12: ( ( UNDERSCORE | PERIOD )? ( LETTER | DIGIT ) )*
loop4:
do {
int alt4=2;
int LA4_0 = input.LA(1);
if ( (LA4_0=='.'||(LA4_0 >= '0' && LA4_0 <= '9')||(LA4_0 >= 'A' && LA4_0 <= 'Z')||LA4_0=='_'||(LA4_0 >= 'a' && LA4_0 <= 'z')) ) {
alt4=1;
}
switch (alt4) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:13: ( UNDERSCORE | PERIOD )? ( LETTER | DIGIT )
{
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1401:13: ( UNDERSCORE | PERIOD )?
int alt3=2;
int LA3_0 = input.LA(1);
if ( (LA3_0=='.'||LA3_0=='_') ) {
alt3=1;
}
switch (alt3) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:
{
if ( input.LA(1)=='.'||input.LA(1)=='_' ) {
input.consume();
}
else {
MismatchedSetException mse = new MismatchedSetException(null,input);
recover(mse);
throw mse;
}
}
break;
}
if ( (input.LA(1) >= '0' && input.LA(1) <= '9')||(input.LA(1) >= 'A' && input.LA(1) <= 'Z')||(input.LA(1) >= 'a' && input.LA(1) <= 'z') ) {
input.consume();
}
else {
MismatchedSetException mse = new MismatchedSetException(null,input);
recover(mse);
throw mse;
}
}
break;
default :
break loop4;
}
} while (true);
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "ID"
// $ANTLR start "WS"
public final void mWS() throws RecognitionException {
try {
int _type = WS;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1402:3: ( ( ' ' | '\\t' | '\\n' | '\\r' | '\\f' )+ )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1402:5: ( ' ' | '\\t' | '\\n' | '\\r' | '\\f' )+
{
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1402:5: ( ' ' | '\\t' | '\\n' | '\\r' | '\\f' )+
int cnt5=0;
loop5:
do {
int alt5=2;
int LA5_0 = input.LA(1);
if ( ((LA5_0 >= '\t' && LA5_0 <= '\n')||(LA5_0 >= '\f' && LA5_0 <= '\r')||LA5_0==' ') ) {
alt5=1;
}
switch (alt5) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:
{
if ( (input.LA(1) >= '\t' && input.LA(1) <= '\n')||(input.LA(1) >= '\f' && input.LA(1) <= '\r')||input.LA(1)==' ' ) {
input.consume();
}
else {
MismatchedSetException mse = new MismatchedSetException(null,input);
recover(mse);
throw mse;
}
}
break;
default :
if ( cnt5 >= 1 ) break loop5;
EarlyExitException eee =
new EarlyExitException(5, input);
throw eee;
}
cnt5++;
} while (true);
_channel = HIDDEN;
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "WS"
// $ANTLR start "COMMENT"
public final void mCOMMENT() throws RecognitionException {
try {
int _type = COMMENT;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1403:8: ( '//' ( . )* ( '\\n' | '\\r' ) )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1403:10: '//' ( . )* ( '\\n' | '\\r' )
{
match("//");
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1403:15: ( . )*
loop6:
do {
int alt6=2;
int LA6_0 = input.LA(1);
if ( (LA6_0=='\n'||LA6_0=='\r') ) {
alt6=2;
}
else if ( ((LA6_0 >= '\u0000' && LA6_0 <= '\t')||(LA6_0 >= '\u000B' && LA6_0 <= '\f')||(LA6_0 >= '\u000E' && LA6_0 <= '\uFFFF')) ) {
alt6=1;
}
switch (alt6) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1403:15: .
{
matchAny();
}
break;
default :
break loop6;
}
} while (true);
if ( input.LA(1)=='\n'||input.LA(1)=='\r' ) {
input.consume();
}
else {
MismatchedSetException mse = new MismatchedSetException(null,input);
recover(mse);
throw mse;
}
_channel = HIDDEN;
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "COMMENT"
// $ANTLR start "MULTILINECOMMENT"
public final void mMULTILINECOMMENT() throws RecognitionException {
try {
int _type = MULTILINECOMMENT;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1404:17: ( '/*' ( . )* '*/' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1404:19: '/*' ( . )* '*/'
{
match("/*");
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1404:24: ( . )*
loop7:
do {
int alt7=2;
int LA7_0 = input.LA(1);
if ( (LA7_0=='*') ) {
int LA7_1 = input.LA(2);
if ( (LA7_1=='/') ) {
alt7=2;
}
else if ( ((LA7_1 >= '\u0000' && LA7_1 <= '.')||(LA7_1 >= '0' && LA7_1 <= '\uFFFF')) ) {
alt7=1;
}
}
else if ( ((LA7_0 >= '\u0000' && LA7_0 <= ')')||(LA7_0 >= '+' && LA7_0 <= '\uFFFF')) ) {
alt7=1;
}
switch (alt7) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1404:24: .
{
matchAny();
}
break;
default :
break loop7;
}
} while (true);
match("*/");
_channel = HIDDEN;
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "MULTILINECOMMENT"
// $ANTLR start "XMLCOMMENT"
public final void mXMLCOMMENT() throws RecognitionException {
try {
int _type = XMLCOMMENT;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:11: ( ( '<' '!' '-' '-' ) ( . )* ( '-' '-' '>' ) )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:13: ( '<' '!' '-' '-' ) ( . )* ( '-' '-' '>' )
{
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:13: ( '<' '!' '-' '-' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:14: '<' '!' '-' '-'
{
match('<');
match('!');
match('-');
match('-');
}
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:31: ( . )*
loop8:
do {
int alt8=2;
int LA8_0 = input.LA(1);
if ( (LA8_0=='-') ) {
int LA8_1 = input.LA(2);
if ( (LA8_1=='-') ) {
int LA8_3 = input.LA(3);
if ( (LA8_3=='>') ) {
alt8=2;
}
else if ( ((LA8_3 >= '\u0000' && LA8_3 <= '=')||(LA8_3 >= '?' && LA8_3 <= '\uFFFF')) ) {
alt8=1;
}
}
else if ( ((LA8_1 >= '\u0000' && LA8_1 <= ',')||(LA8_1 >= '.' && LA8_1 <= '\uFFFF')) ) {
alt8=1;
}
}
else if ( ((LA8_0 >= '\u0000' && LA8_0 <= ',')||(LA8_0 >= '.' && LA8_0 <= '\uFFFF')) ) {
alt8=1;
}
switch (alt8) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:31: .
{
matchAny();
}
break;
default :
break loop8;
}
} while (true);
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:34: ( '-' '-' '>' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1405:35: '-' '-' '>'
{
match('-');
match('-');
match('>');
}
_channel = HIDDEN;
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "XMLCOMMENT"
// $ANTLR start "IGNORE"
public final void mIGNORE() throws RecognitionException {
try {
int _type = IGNORE;
int _channel = DEFAULT_TOKEN_CHANNEL;
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1406:7: ( '<' '?' ( . )* '?' '>' )
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1406:9: '<' '?' ( . )* '?' '>'
{
match('<');
match('?');
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1406:17: ( . )*
loop9:
do {
int alt9=2;
int LA9_0 = input.LA(1);
if ( (LA9_0=='?') ) {
int LA9_1 = input.LA(2);
if ( (LA9_1=='>') ) {
alt9=2;
}
else if ( ((LA9_1 >= '\u0000' && LA9_1 <= '=')||(LA9_1 >= '?' && LA9_1 <= '\uFFFF')) ) {
alt9=1;
}
}
else if ( ((LA9_0 >= '\u0000' && LA9_0 <= '>')||(LA9_0 >= '@' && LA9_0 <= '\uFFFF')) ) {
alt9=1;
}
switch (alt9) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1406:17: .
{
matchAny();
}
break;
default :
break loop9;
}
} while (true);
match('?');
match('>');
_channel = HIDDEN;
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
}
// $ANTLR end "IGNORE"
public void mTokens() throws RecognitionException {
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:8: ( T__57 | T__58 | T__59 | T__60 | T__61 | T__62 | T__63 | LPAREN | RPAREN | QMARK | COLON | SEMICOLON | PERIOD | UNDERSCORE | COMMA | QUOTE | MODULE | NAME | INPUT | OUTPUT | INTERNAL | MARKING | STATE_VECTOR | TRANSITION | LABEL | PRESET | POSTSET | TRUE | FALSE | PLUS | MINUS | TIMES | DIV | MOD | EQUALS | GREATER | LESS | GREATER_EQUAL | LESS_EQUAL | EQUIV | NOT_EQUIV | NEGATION | AND | OR | IMPLICATION | BITWISE_NEGATION | BITWISE_AND | BITWISE_OR | BITWISE_XOR | BITWISE_LSHIFT | BITWISE_RSHIFT | INT | ID | WS | COMMENT | MULTILINECOMMENT | XMLCOMMENT | IGNORE )
int alt10=58;
alt10 = dfa10.predict(input);
switch (alt10) {
case 1 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:10: T__57
{
mT__57();
}
break;
case 2 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:16: T__58
{
mT__58();
}
break;
case 3 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:22: T__59
{
mT__59();
}
break;
case 4 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:28: T__60
{
mT__60();
}
break;
case 5 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:34: T__61
{
mT__61();
}
break;
case 6 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:40: T__62
{
mT__62();
}
break;
case 7 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:46: T__63
{
mT__63();
}
break;
case 8 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:52: LPAREN
{
mLPAREN();
}
break;
case 9 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:59: RPAREN
{
mRPAREN();
}
break;
case 10 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:66: QMARK
{
mQMARK();
}
break;
case 11 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:72: COLON
{
mCOLON();
}
break;
case 12 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:78: SEMICOLON
{
mSEMICOLON();
}
break;
case 13 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:88: PERIOD
{
mPERIOD();
}
break;
case 14 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:95: UNDERSCORE
{
mUNDERSCORE();
}
break;
case 15 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:106: COMMA
{
mCOMMA();
}
break;
case 16 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:112: QUOTE
{
mQUOTE();
}
break;
case 17 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:118: MODULE
{
mMODULE();
}
break;
case 18 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:125: NAME
{
mNAME();
}
break;
case 19 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:130: INPUT
{
mINPUT();
}
break;
case 20 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:136: OUTPUT
{
mOUTPUT();
}
break;
case 21 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:143: INTERNAL
{
mINTERNAL();
}
break;
case 22 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:152: MARKING
{
mMARKING();
}
break;
case 23 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:160: STATE_VECTOR
{
mSTATE_VECTOR();
}
break;
case 24 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:173: TRANSITION
{
mTRANSITION();
}
break;
case 25 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:184: LABEL
{
mLABEL();
}
break;
case 26 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:190: PRESET
{
mPRESET();
}
break;
case 27 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:197: POSTSET
{
mPOSTSET();
}
break;
case 28 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:205: TRUE
{
mTRUE();
}
break;
case 29 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:210: FALSE
{
mFALSE();
}
break;
case 30 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:216: PLUS
{
mPLUS();
}
break;
case 31 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:221: MINUS
{
mMINUS();
}
break;
case 32 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:227: TIMES
{
mTIMES();
}
break;
case 33 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:233: DIV
{
mDIV();
}
break;
case 34 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:237: MOD
{
mMOD();
}
break;
case 35 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:241: EQUALS
{
mEQUALS();
}
break;
case 36 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:248: GREATER
{
mGREATER();
}
break;
case 37 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:256: LESS
{
mLESS();
}
break;
case 38 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:261: GREATER_EQUAL
{
mGREATER_EQUAL();
}
break;
case 39 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:275: LESS_EQUAL
{
mLESS_EQUAL();
}
break;
case 40 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:286: EQUIV
{
mEQUIV();
}
break;
case 41 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:292: NOT_EQUIV
{
mNOT_EQUIV();
}
break;
case 42 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:302: NEGATION
{
mNEGATION();
}
break;
case 43 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:311: AND
{
mAND();
}
break;
case 44 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:315: OR
{
mOR();
}
break;
case 45 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:318: IMPLICATION
{
mIMPLICATION();
}
break;
case 46 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:330: BITWISE_NEGATION
{
mBITWISE_NEGATION();
}
break;
case 47 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:347: BITWISE_AND
{
mBITWISE_AND();
}
break;
case 48 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:359: BITWISE_OR
{
mBITWISE_OR();
}
break;
case 49 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:370: BITWISE_XOR
{
mBITWISE_XOR();
}
break;
case 50 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:382: BITWISE_LSHIFT
{
mBITWISE_LSHIFT();
}
break;
case 51 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:397: BITWISE_RSHIFT
{
mBITWISE_RSHIFT();
}
break;
case 52 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:412: INT
{
mINT();
}
break;
case 53 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:416: ID
{
mID();
}
break;
case 54 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:419: WS
{
mWS();
}
break;
case 55 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:422: COMMENT
{
mCOMMENT();
}
break;
case 56 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:430: MULTILINECOMMENT
{
mMULTILINECOMMENT();
}
break;
case 57 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:447: XMLCOMMENT
{
mXMLCOMMENT();
}
break;
case 58 :
// /Users/zhangz/myBioSim/BioSim/gui/src/verification/platu/lpn/io/PlatuGrammar.g:1:458: IGNORE
{
mIGNORE();
}
break;
}
}
protected DFA10 dfa10 = new DFA10(this);
static final String DFA10_eotS =
"\3\uffff\4\46\11\uffff\10\46\1\uffff\1\67\1\uffff\1\72\1\uffff\1"+
"\74\1\77\1\104\1\106\1\110\1\112\5\uffff\16\46\25\uffff\2\46\1\137"+
"\4\46\1\144\2\46\1\147\11\46\1\uffff\1\161\1\46\1\163\1\46\1\uffff"+
"\1\165\1\46\1\uffff\2\46\1\171\5\46\1\177\1\uffff\1\u0080\1\uffff"+
"\1\46\1\uffff\3\46\1\uffff\1\u0085\2\46\1\u0088\1\u0089\2\uffff"+
"\1\46\1\u008b\2\46\1\uffff\1\u008e\1\46\2\uffff\1\u0090\1\uffff"+
"\2\46\1\uffff\1\u0093\1\uffff\2\46\1\uffff\3\46\1\u0099\1\u009a"+
"\2\uffff";
static final String DFA10_eofS =
"\u009b\uffff";
static final String DFA10_minS =
"\1\11\2\uffff\1\163\1\157\1\156\1\141\11\uffff\1\141\1\165\1\141"+
"\1\164\1\162\1\141\1\157\1\141\1\uffff\1\60\1\uffff\1\52\1\uffff"+
"\2\75\1\41\1\75\1\46\1\174\5\uffff\1\163\1\156\1\146\1\151\1\144"+
"\1\155\1\164\1\162\2\141\1\142\1\145\1\163\1\154\25\uffff\1\145"+
"\1\163\1\56\1\164\1\165\1\156\1\153\1\56\1\145\1\160\1\56\1\164"+
"\1\156\2\145\1\163\1\164\1\163\1\162\1\164\1\uffff\1\56\1\164\1"+
"\56\1\151\1\uffff\1\56\1\165\1\uffff\1\145\1\163\1\56\1\154\1\145"+
"\1\163\1\145\1\164\1\56\1\uffff\1\56\1\uffff\1\156\1\uffff\1\164"+
"\1\166\1\151\1\uffff\1\56\1\164\1\145\2\56\2\uffff\1\147\1\56\1"+
"\145\1\164\1\uffff\1\56\1\164\2\uffff\1\56\1\uffff\1\143\1\151\1"+
"\uffff\1\56\1\uffff\1\164\1\157\1\uffff\1\157\1\156\1\162\2\56\2"+
"\uffff";
static final String DFA10_maxS =
"\1\176\2\uffff\1\163\1\157\1\156\1\157\11\uffff\1\141\1\165\1\141"+
"\1\164\1\162\1\141\1\162\1\141\1\uffff\1\76\1\uffff\1\57\1\uffff"+
"\1\75\1\76\1\77\1\75\1\46\1\174\5\uffff\1\163\1\156\1\163\1\162"+
"\1\144\1\155\1\164\1\162\1\141\1\165\1\142\1\145\1\163\1\154\25"+
"\uffff\1\145\1\163\1\172\1\164\1\165\1\156\1\153\1\172\1\145\1\160"+
"\1\172\1\164\1\156\2\145\1\163\1\164\1\163\1\162\1\164\1\uffff\1"+
"\172\1\164\1\172\1\151\1\uffff\1\172\1\165\1\uffff\1\145\1\163\1"+
"\172\1\154\1\145\1\163\1\145\1\164\1\172\1\uffff\1\172\1\uffff\1"+
"\156\1\uffff\1\164\1\166\1\151\1\uffff\1\172\1\164\1\145\2\172\2"+
"\uffff\1\147\1\172\1\145\1\164\1\uffff\1\172\1\164\2\uffff\1\172"+
"\1\uffff\1\143\1\151\1\uffff\1\172\1\uffff\1\164\1\157\1\uffff\1"+
"\157\1\156\1\162\2\172\2\uffff";
static final String DFA10_acceptS =
"\1\uffff\1\1\1\2\4\uffff\1\10\1\11\1\12\1\13\1\14\1\15\1\16\1\17"+
"\1\20\10\uffff\1\36\1\uffff\1\40\1\uffff\1\42\6\uffff\1\56\1\61"+
"\1\64\1\65\1\66\16\uffff\1\55\1\37\1\67\1\70\1\41\1\50\1\43\1\46"+
"\1\63\1\44\1\47\1\62\1\71\1\72\1\45\1\51\1\52\1\53\1\57\1\54\1\60"+
"\24\uffff\1\5\4\uffff\1\21\2\uffff\1\25\11\uffff\1\6\1\uffff\1\7"+
"\1\uffff\1\22\3\uffff\1\34\5\uffff\1\4\1\23\4\uffff\1\31\2\uffff"+
"\1\35\1\3\1\uffff\1\24\2\uffff\1\32\1\uffff\1\26\2\uffff\1\33\5"+
"\uffff\1\30\1\27";
static final String DFA10_specialS =
"\u009b\uffff}>";
static final String[] DFA10_transitionS = {
"\2\47\1\uffff\2\47\22\uffff\1\47\1\40\1\17\2\uffff\1\34\1\41"+
"\1\uffff\1\7\1\10\1\32\1\30\1\16\1\31\1\14\1\33\12\45\1\12\1"+
"\13\1\37\1\35\1\36\1\11\1\uffff\32\46\1\1\1\uffff\1\2\1\44\1"+
"\15\1\uffff\1\3\1\46\1\4\2\46\1\27\2\46\1\5\2\46\1\25\1\6\1"+
"\20\1\21\1\26\2\46\1\23\1\24\1\46\1\22\4\46\1\uffff\1\42\1\uffff"+
"\1\43",
"",
"",
"\1\50",
"\1\51",
"\1\52",
"\1\53\15\uffff\1\54",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"\1\55",
"\1\56",
"\1\57",
"\1\60",
"\1\61",
"\1\62",
"\1\64\2\uffff\1\63",
"\1\65",
"",
"\12\45\4\uffff\1\66",
"",
"\1\71\4\uffff\1\70",
"",
"\1\73",
"\1\75\1\76",
"\1\102\32\uffff\1\101\1\100\1\uffff\1\103",
"\1\105",
"\1\107",
"\1\111",
"",
"",
"",
"",
"",
"\1\113",
"\1\114",
"\1\115\11\uffff\1\117\2\uffff\1\116",
"\1\120\10\uffff\1\121",
"\1\122",
"\1\123",
"\1\124",
"\1\125",
"\1\126",
"\1\127\23\uffff\1\130",
"\1\131",
"\1\132",
"\1\133",
"\1\134",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"\1\135",
"\1\136",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\140",
"\1\141",
"\1\142",
"\1\143",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\145",
"\1\146",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\150",
"\1\151",
"\1\152",
"\1\153",
"\1\154",
"\1\155",
"\1\156",
"\1\157",
"\1\160",
"",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\162",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\164",
"",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\166",
"",
"\1\167",
"\1\170",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\172",
"\1\173",
"\1\174",
"\1\175",
"\1\176",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"",
"\1\u0081",
"",
"\1\u0082",
"\1\u0083",
"\1\u0084",
"",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\u0086",
"\1\u0087",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"",
"",
"\1\u008a",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\u008c",
"\1\u008d",
"",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\u008f",
"",
"",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"",
"\1\u0091",
"\1\u0092",
"",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"",
"\1\u0094",
"\1\u0095",
"",
"\1\u0096",
"\1\u0097",
"\1\u0098",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"\1\46\1\uffff\12\46\7\uffff\32\46\4\uffff\1\46\1\uffff\32\46",
"",
""
};
static final short[] DFA10_eot = DFA.unpackEncodedString(DFA10_eotS);
static final short[] DFA10_eof = DFA.unpackEncodedString(DFA10_eofS);
static final char[] DFA10_min = DFA.unpackEncodedStringToUnsignedChars(DFA10_minS);
static final char[] DFA10_max = DFA.unpackEncodedStringToUnsignedChars(DFA10_maxS);
static final short[] DFA10_accept = DFA.unpackEncodedString(DFA10_acceptS);
static final short[] DFA10_special = DFA.unpackEncodedString(DFA10_specialS);
static final short[][] DFA10_transition;
static {
int numStates = DFA10_transitionS.length;
DFA10_transition = new short[numStates][];
for (int i=0; i<numStates; i++) {
DFA10_transition[i] = DFA.unpackEncodedString(DFA10_transitionS[i]);
}
}
class DFA10 extends DFA {
public DFA10(BaseRecognizer recognizer) {
this.recognizer = recognizer;
this.decisionNumber = 10;
this.eot = DFA10_eot;
this.eof = DFA10_eof;
this.min = DFA10_min;
this.max = DFA10_max;
this.accept = DFA10_accept;
this.special = DFA10_special;
this.transition = DFA10_transition;
}
public String getDescription() {
return "1:1: Tokens : ( T__57 | T__58 | T__59 | T__60 | T__61 | T__62 | T__63 | LPAREN | RPAREN | QMARK | COLON | SEMICOLON | PERIOD | UNDERSCORE | COMMA | QUOTE | MODULE | NAME | INPUT | OUTPUT | INTERNAL | MARKING | STATE_VECTOR | TRANSITION | LABEL | PRESET | POSTSET | TRUE | FALSE | PLUS | MINUS | TIMES | DIV | MOD | EQUALS | GREATER | LESS | GREATER_EQUAL | LESS_EQUAL | EQUIV | NOT_EQUIV | NEGATION | AND | OR | IMPLICATION | BITWISE_NEGATION | BITWISE_AND | BITWISE_OR | BITWISE_XOR | BITWISE_LSHIFT | BITWISE_RSHIFT | INT | ID | WS | COMMENT | MULTILINECOMMENT | XMLCOMMENT | IGNORE );";
}
}
} | MyersResearchGroup/iBioSim | verification/src/main/java/edu/utah/ece/async/lema/verification/platu/platuLpn/io/PlatuGrammarLexer.java | Java | apache-2.0 | 77,940 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package demo;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.ComponentScan;
import transform.LoggingTransformer;
@SpringBootApplication
@ComponentScan(basePackageClasses=LoggingTransformer.class)
public class TransformApplication {
public static void main(String[] args) {
SpringApplication.run(TransformApplication.class, args);
}
}
| ericbottard/spring-cloud-stream | spring-cloud-stream-samples/transform/src/main/java/demo/TransformApplication.java | Java | apache-2.0 | 1,079 |
/*
* Copyright 2013-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.zookeeper.discovery.dependency;
import org.springframework.boot.autoconfigure.condition.ConditionOutcome;
import org.springframework.context.annotation.ConditionContext;
import org.springframework.core.type.AnnotatedTypeMetadata;
/**
* Inverse of the {@link ConditionalOnDependenciesPassed} condition. Also checks if switch for zookeeper dependencies
* was turned on
*
* @author Marcin Grzejszczak
* @since 1.0.0
*/
public class DependenciesNotPassedCondition extends DependenciesPassedCondition {
@Override
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
ConditionOutcome propertiesSet = super.getMatchOutcome(context, metadata);
if (propertiesSet.isMatch()) {
return ConditionOutcome.inverse(propertiesSet);
}
Boolean dependenciesEnabled = context.getEnvironment()
.getProperty("spring.cloud.zookeeper.dependency.enabled", Boolean.class, false);
if (dependenciesEnabled) {
return ConditionOutcome.noMatch("Dependencies are defined in configuration and switch is turned on");
}
return ConditionOutcome.match("Dependencies are not defined in configuration and switch is turned off");
}
}
| 4finance/spring-cloud-zookeeper | spring-cloud-zookeeper-discovery/src/main/java/org/springframework/cloud/zookeeper/discovery/dependency/DependenciesNotPassedCondition.java | Java | apache-2.0 | 1,832 |
import json
import random
from datetime import datetime, timedelta
import hashlib
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render_to_response
from django.template import loader
from django.utils import encoding
from core.grafana.GrafanaES import Grafana
from core.grafana.QueryES import Query
from core.grafana.data_tranformation import stacked_hist, pledges_merging
from core.libs.cache import setCacheEntry, getCacheEntry
from core.oauth.utils import login_customrequired
from core.views import initRequest, DateTimeEncoder, DateEncoder
colours_codes = {
"0": "#AE3C51",
"1": "#6298FF",
"2": "#D97529",
"3": "#009246",
"AOD": "#006019",
"Analysis": "#FF00FF",
"CA": "#FF1F1F",
"CAF processing": "#CAD141",
"CERN": "#AE3C51",
"Custodial": "#FF0000",
"DE": "#000000",
"DESD": "#4189FF",
"DPD": "#FEF100",
"Data Processing": "#FFFF00",
"Data Processing (XP)": "#008800",
"Default": "#808080",
"ES": "#EDBF00",
"ESD": "#001640",
"Extra Production": "#FF0000",
"FR": "#0055A5",
"Group Analysis": "#808080",
"Group Production": "#008800",
"HITS": "#FF6666",
"IT": "#009246",
"MC Event Generation": "#356C20",
"MC Production": "#0000FF",
"MC Reconstruction": "#00006B",
"MC Reconstruction (XP)": "#D97529",
"MC Simulation": "#0000FF",
"MC Simulation (XP)": "#AE3C51",
"MC Simulation Fast": "#0099CC",
"MC Simulation Fast (XP)": "#0099CC",
"MC Simulation Full": "#00CCCC",
"MC Simulation Full (XP)": "#00CCCC",
"ND": "#6298FF",
"NL": "#D97529",
"Other": "#66008D",
"Others": "#00FFFF",
"Others (XP)": "#009246",
"Primary": "#FFA500",
"RAW": "#FF0000",
"RU": "#66008D",
"Rest": "#625D5D",
"Secondary": "#00FFFF",
"T0 processing": "#DB9900",
"TW": "#89000F",
"Testing": "#00FF00",
"ToBeDeleted": "#FFFF00",
"UK": "#356C20",
"UNKNOWN": "#FFA500",
"US": "#00006B",
"User Analysis": "#FF00FF",
"Validation": "#000000",
"analysis": "#FF0000",
"bstream": "#0055A5",
"cancelled": "#FF9933",
"closed": "#808080",
"evgen": "#D97529",
"evgentx": "#AE3C51",
"failed": "#bf1b00",
"filter": "#DB9900",
"finished": "#248F24",
"ganga": "#1433CC",
"gangarobot": "#006666",
"gangarobot-64": "#009999",
"gangarobot-filestager": "#00CCCC",
"gangarobot-new": "#00FFFF",
"gangarobot-nightly": "#99FF00",
"gangarobot-pft": "#99CC33",
"gangarobot-pft-trial": "#999966",
"gangarobot-rctest": "#996699",
"gangarobot-root": "#CC0000",
"gangarobot-squid": "#CC0066",
"gangarobotnew": "#CC3399",
"hammercloud": "#A5D3CA",
"merge": "#FFA600",
"merging": "#47D147",
"non-panda_analysis": "#CCCCCC",
"pandamover": "#FFE920",
"pile": "#FF00FF",
"prod_test": "#B4D1B6",
"production": "#CAD141",
"ptest": "#89C7FF",
"rc_test": "#A5FF8A",
"reco": "#00006B",
"reprocessing": "#008800",
"running": "#47D147",
"simul": "#0000FF",
"software": "#FFCFA4s",
"t0_caf": "#CAD141",
"t0_processing": "#FFA600",
"test": "#00FF00",
"transfering": "#47D147",
"txtgen": "#29AFD6",
"validation": "#000000"
}
@login_customrequired
def index(request):
"""The main page containing drop-down menus to select group by options etc.
Data delivers asynchroniously by request to grafana_api view"""
valid, response = initRequest(request)
# all possible group by options and plots to build
group_by = {'dst_federation': 'Federation'}
split_series = {'adcactivity': 'ADC Activity', 'jobstatus': 'Job status'}
plots = {'cpuconsumption': 'CPU Consumption', 'wallclockhepspec06': 'WallClock HEPSPEC06'}
data = {
'group_by': group_by,
'split_series': split_series,
'plots': plots,
}
response = render_to_response('grafana-api-plots.html', data, content_type='text/html')
return response
def chartjs(request):
"""The main page containing drop-down menus to select group by options etc.
Data delivers asynchroniously by request to grafana_api view"""
valid, response = initRequest(request)
# all possible group by options and plots to build
group_by = {'dst_federation': 'Federation'}
split_series = {'adcactivity': 'ADC Activity', 'jobstatus': 'Job status'}
plots = {'cpuconsumption': 'CPU Consumption', 'wallclockhepspec06': 'WallClock HEPSPEC06'}
data = {
'group_by': group_by,
'split_series': split_series,
'plots': plots,
}
response = render_to_response('grafana-chartjs-plots.html', data, content_type='text/html')
return response
def grafana_api(request):
valid, response = initRequest(request)
group_by = None
split_series = None
if 'groupby' in request.session['requestParams']:
groupby_params = request.session['requestParams']['groupby'].split(',')
if 'time' in groupby_params:
pass
else:
group_by = groupby_params[0]
if len(groupby_params) > 1:
split_series = groupby_params[1]
result = []
q = Query()
q = q.request_to_query(request)
last_pledges = Query(agg_func='last', table='pledges_last', field='value', grouping='real_federation')
# / api / datasources / proxy / 9267 / query?db = monit_production_rebus
# sum_pledges = Query(agg_func='sum', table='pledges', field='atlas', grouping='time(1m),real_federation')
try:
if q.table == 'pledges_last' or q.table == 'pledges_sum' or q.table == 'pledges_hs06sec':
result = Grafana(database='monit_production_rebus').get_data(q)
else:
result = Grafana().get_data(q)
# last_pledges = Grafana().get_data(last_pledges)
if 'type' in request.session['requestParams'] and request.session['requestParams']['type'] == 'd3js':
data = stacked_hist(result['results'][0]['series'], group_by, split_series)
return JsonResponse(data)
if 'type' in request.session['requestParams'] and request.session['requestParams']['type'] == 'chartjs':
last_pledges = Grafana(database='monit_production_rebus').get_data(last_pledges)
data = {}
data = stacked_hist(result['results'][0]['series'], group_by, split_series)
last_pledges = stacked_hist(last_pledges['results'][0]['series'], 'real_federation')
lables = list(data.keys())
pledges_keys = list(last_pledges.keys())
datasets = []
elements = {}
for object in data:
for element in data[object]:
elements.setdefault(element, []).append(data[object][element])
if object in pledges_keys:
elements.setdefault('pledges', []).append(last_pledges[object]['all'] * 7 * 24 * 60 * 60)
else:
elements.setdefault('pledges', []).append(0)
background = ''
for key in elements:
if key in colours_codes:
background = colours_codes[key]
else:
r = lambda: random.randint(0, 255)
background = '#%02X%02X%02X' % (r(), r(), r())
if key != 'pledges':
datasets.append(
{'label': key, 'stack': 'Stack 0', 'data': elements[key], 'backgroundColor': background})
else:
datasets.append(
{'label': key, 'stack': 'Stack 1', 'data': elements[key], 'backgroundColor': '#FF0000'})
data = {'labels': lables, 'datasets': datasets}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
if 'export' in request.session['requestParams']:
if request.session['requestParams']['export'] == 'csv':
data = stacked_hist(result['results'][0]['series'], group_by, split_series)
import csv
import copy
response = HttpResponse(content_type='text/csv')
column_titles = copy.deepcopy(groupby_params)
column_titles.append('value')
response['Content-Disposition'] = 'attachment; filename={0}.csv'.format('_'.join(groupby_params))
writer = csv.writer(response, delimiter=";")
writer.writerow(column_titles)
csvList = []
if len(groupby_params) > 1:
csvList = grab_children(data)
else:
for key, value in data.items():
csvList.append([key, value['all']])
writer.writerows(csvList)
return response
except Exception as ex:
result.append(ex)
return JsonResponse(result)
def grab_children(data, parent=None, child=None):
if child is None:
child = []
for key, value in data.items():
if isinstance(value, dict):
grab_children(value, key, child)
else:
child.append([parent, key, value])
return child
#@login_customrequired
def pledges(request):
valid, response = initRequest(request)
if 'date_from' in request.session['requestParams'] and 'date_to' in request.session['requestParams']:
starttime = request.session['requestParams']['date_from']
endtime = request.session['requestParams']['date_to']
date_to = datetime.strptime(endtime, "%d.%m.%Y %H:%M:%S")
date_from = datetime.strptime(starttime, "%d.%m.%Y %H:%M:%S")
total_seconds = (date_to - date_from).total_seconds()
total_days = (date_to - date_from).days
date_list = []
if (date_to - date_from).days > 30:
n = 20
while True:
start_date = date_from
end_date = (start_date + timedelta(days=n))
end_date = end_date - timedelta(minutes=1)
if end_date >= date_to:
end_date = date_to - timedelta(minutes=1)
date_list.append([start_date.strftime("%d.%m.%Y %H:%M:%S"), end_date.strftime("%d.%m.%Y %H:%M:%S")])
break
else:
date_list.append([start_date.strftime("%d.%m.%Y %H:%M:%S"), end_date.strftime("%d.%m.%Y %H:%M:%S")])
date_from = end_date + timedelta(minutes=1)
else:
newendtime = (date_to - timedelta(minutes=1)).strftime("%d.%m.%Y %H:%M:%S")
date_list.append([starttime, newendtime])
else:
timebefore = timedelta(days=7)
endtime = (datetime.utcnow()).replace(minute=00, hour=00, second=00, microsecond=000)
starttime = (endtime - timebefore).replace(minute=00, hour=00, second=00, microsecond=000)
total_seconds = (starttime - endtime).total_seconds()
total_days = (endtime - starttime).days
endtime = endtime - timedelta(minutes=1)
endtime = endtime.strftime("%d.%m.%Y %H:%M:%S")
starttime = starttime.strftime("%d.%m.%Y %H:%M:%S")
if 'type' in request.session['requestParams'] and request.session['requestParams'] \
['type'] == 'federation':
key = hashlib.md5(encoding.force_bytes("{0}_{1}_federation".format(starttime, endtime)))
key = key.hexdigest()
federations = getCacheEntry(request, key, isData=True)
if federations is not None:
federations = json.loads(federations)
return HttpResponse(json.dumps(federations), content_type='text/json')
pledges_dict = {}
pledges_list = []
federations_info = {}
if len(date_list) > 1:
for date in date_list:
hs06sec = Query(agg_func='sum', table='completed', field=['sum_hs06sec','sum_count',
'sum_cpuconsumptiontime','sum_walltime'],
grouping='time,dst_federation,dst_tier,dst_experiment_site,computingsite',
starttime=date[0], endtime=date[1])
hs06sec = Grafana().get_data(hs06sec)
pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value',
grouping='time,real_federation,tier', starttime=date[0], endtime=date[1])
pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum)
pledges_dict, federations_info = pledges_merging(hs06sec, pledges_sum, total_seconds,
pledges_dict, federations_info)
else:
hs06sec = Query(agg_func='sum', table='completed', field=['sum_hs06sec','sum_count',
'sum_cpuconsumptiontime','sum_walltime'],
grouping='time,dst_federation,dst_tier,dst_experiment_site,computingsite',
starttime=date_list[0][0], endtime=date_list[0][1])
hs06sec = Grafana().get_data(hs06sec)
pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value',
grouping='time,real_federation,tier', starttime=date_list[0][0],
endtime=date_list[0][1])
pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum)
pledges_dict, federations_info = pledges_merging(hs06sec, pledges_sum, total_seconds,
pledges_dict, federations_info)
for pledges in pledges_dict:
if pledges == 'NULL':
continue
else:
# pledges_list.append(
# {type: pledges, "hs06sec": pledges_dict[pledges]['hs06sec'],
# 'pledges': pledges_dict[pledges]['pledges']})
pledges_list.append({"dst_federation": pledges,
"hs06sec": int(round(float(pledges_dict[pledges]['hs06sec']) / 86400, 2)),
'pledges': int(round(float(pledges_dict[pledges]['pledges']) / 86400, 2)),
'tier': pledges_dict[pledges]['tier'],
'federation_info': federations_info[pledges] if pledges in federations_info else None}
)
setCacheEntry(request, key, json.dumps(pledges_list), 60 * 60 * 24 * 30, isData=True)
return HttpResponse(json.dumps(pledges_list), content_type='text/json')
elif 'type' in request.session['requestParams'] and request.session['requestParams'] \
['type'] == 'country':
key = hashlib.md5(encoding.force_bytes("{0}_{1}_country".format(starttime, endtime)))
key = key.hexdigest()
countries = getCacheEntry(request, key, isData=True)
if countries is not None:
countries = json.loads(countries)
return HttpResponse(json.dumps(countries), content_type='text/json')
federations_info = {}
pledges_dict = {}
pledges_list = []
if len(date_list) > 1:
for date in date_list:
hs06sec = Query(agg_func='sum', table='completed', field='sum_hs06sec',
grouping='time,dst_federation,dst_country',
starttime=date[0], endtime=date[1])
hs06sec = Grafana().get_data(hs06sec)
pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value',
grouping='time,real_federation,country', starttime=date[0], endtime=date[1])
pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum)
pledges_dict = pledges_merging(hs06sec, pledges_sum, total_seconds, pledges_dict, federations_info,
type='dst_country')
else:
hs06sec = Query(agg_func='sum', table='completed', field='sum_hs06sec',
grouping='time,dst_federation,dst_country', starttime=date_list[0][0],
endtime=date_list[0][1])
hs06sec = Grafana().get_data(hs06sec)
pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value',
grouping='time,real_federation,country', starttime=date_list[0][0],
endtime=date_list[0][1])
pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum)
pledges_dict = pledges_merging(hs06sec, pledges_sum, total_seconds, federations_info,
pledges_dict, type='dst_country')
for pledges in pledges_dict:
if pledges == 'NULL':
continue
else:
pledges_list.append(
{"dst_country": pledges, "hs06sec": int(round(float(pledges_dict[pledges]['hs06sec']) / 86400, 2)),
'pledges': int(round(float(pledges_dict[pledges]['pledges']) / 86400, 2))})
setCacheEntry(request, key, json.dumps(pledges_list),
60 * 60 * 24 * 30, isData=True)
return HttpResponse(json.dumps(pledges_list), content_type='text/json')
else:
data = getCacheEntry(request, "pledges")
# data = None
if data is not None:
data = json.loads(data)
t = loader.get_template('grafana-pledges.html')
return HttpResponse(t.render(data, request), content_type='text/html')
else:
key_fed = hashlib.md5(encoding.force_bytes("{0}_{1}_federation".format(starttime, endtime)))
key_country = hashlib.md5(encoding.force_bytes("{0}_{1}_country".format(starttime, endtime)))
key_fed = key_fed.hexdigest()
key_country = key_country.hexdigest()
setCacheEntry(request, key_fed, None, 60, isData=True)
setCacheEntry(request, key_country, None, 60, isData=True)
t = loader.get_template('grafana-pledges.html')
data = {
'request': request,
'date_from': starttime,
'date_to': endtime,
'days': total_days,
'info': "This page was cached: {0}".format(str(datetime.utcnow()))
}
setCacheEntry(request, "pledges", json.dumps(data, cls=DateEncoder), 60 * 60 * 24 * 30)
return HttpResponse(t.render({"date_from": starttime, "date_to": endtime, "days": total_days}, request),
content_type='text/html')
def grafana_api_es(request):
valid, response = initRequest(request)
group_by = None
split_series = None
if 'groupby' in request.session['requestParams']:
groupby_params = request.session['requestParams']['groupby'].split(',')
if 'time' in groupby_params:
pass
else:
group_by = groupby_params[0]
if len(groupby_params) > 1:
split_series = groupby_params[1]
else:
split_series = group_by
result = []
q = Query()
q = q.request_to_query(request)
result = Grafana().get_data(q)
return JsonResponse(result) | PanDAWMS/panda-bigmon-core | core/grafana/views.py | Python | apache-2.0 | 19,477 |
package org.stellasql.stella.session;
public interface ResultTabHandler
{
public void closeSelectedTab();
public void selectNextTab();
public void selectPreviousTab();
}
| shocksm/stella | src/main/java/org/stellasql/stella/session/ResultTabHandler.java | Java | apache-2.0 | 187 |
package com.sequenceiq.cloudbreak.service.user;
import javax.inject.Inject;
import org.springframework.stereotype.Service;
import com.sequenceiq.cloudbreak.api.endpoint.v4.userprofile.responses.UserProfileV4Response;
import com.sequenceiq.cloudbreak.auth.crn.Crn;
import com.sequenceiq.cloudbreak.auth.altus.EntitlementService;
@Service
public class UserProfileDecorator {
@Inject
private EntitlementService entitlementService;
public UserProfileV4Response decorate(UserProfileV4Response userProfileV4Response, String userCrn) {
userProfileV4Response.setEntitlements(entitlementService.getEntitlements(Crn.safeFromString(userCrn).getAccountId()));
return userProfileV4Response;
}
}
| hortonworks/cloudbreak | core/src/main/java/com/sequenceiq/cloudbreak/service/user/UserProfileDecorator.java | Java | apache-2.0 | 720 |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trajectory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.drivers import dynamic_episode_driver
from tf_agents.drivers import test_utils as drivers_test_utils
from tf_agents.environments import tf_py_environment
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.utils import test_utils
class TrajectoryTest(test_utils.TestCase):
def testFirstTensors(self):
observation = ()
action = ()
policy_info = ()
reward = tf.constant([1.0, 1.0, 2.0])
discount = tf.constant([1.0, 1.0, 1.0])
traj = trajectory.first(observation, action, policy_info, reward, discount)
self.assertTrue(tf.is_tensor(traj.step_type))
traj_val = self.evaluate(traj)
self.assertAllEqual(traj_val.step_type, [ts.StepType.FIRST] * 3)
self.assertAllEqual(traj_val.next_step_type, [ts.StepType.MID] * 3)
def testFirstArrays(self):
observation = ()
action = ()
policy_info = ()
reward = np.array([1.0, 1.0, 2.0])
discount = np.array([1.0, 1.0, 1.0])
traj = trajectory.first(observation, action, policy_info, reward, discount)
self.assertFalse(tf.is_tensor(traj.step_type))
self.assertAllEqual(traj.step_type, [ts.StepType.FIRST] * 3)
self.assertAllEqual(traj.next_step_type, [ts.StepType.MID] * 3)
def testMidTensors(self):
observation = ()
action = ()
policy_info = ()
reward = tf.constant([1.0, 1.0, 2.0])
discount = tf.constant([1.0, 1.0, 1.0])
traj = trajectory.mid(observation, action, policy_info, reward, discount)
self.assertTrue(tf.is_tensor(traj.step_type))
traj_val = self.evaluate(traj)
self.assertAllEqual(traj_val.step_type, [ts.StepType.MID] * 3)
self.assertAllEqual(traj_val.next_step_type, [ts.StepType.MID] * 3)
def testMidArrays(self):
observation = ()
action = ()
policy_info = ()
reward = np.array([1.0, 1.0, 2.0])
discount = np.array([1.0, 1.0, 1.0])
traj = trajectory.mid(observation, action, policy_info, reward, discount)
self.assertFalse(tf.is_tensor(traj.step_type))
self.assertAllEqual(traj.step_type, [ts.StepType.MID] * 3)
self.assertAllEqual(traj.next_step_type, [ts.StepType.MID] * 3)
def testLastTensors(self):
observation = ()
action = ()
policy_info = ()
reward = tf.constant([1.0, 1.0, 2.0])
discount = tf.constant([1.0, 1.0, 1.0])
traj = trajectory.last(observation, action, policy_info, reward, discount)
self.assertTrue(tf.is_tensor(traj.step_type))
traj_val = self.evaluate(traj)
self.assertAllEqual(traj_val.step_type, [ts.StepType.MID] * 3)
self.assertAllEqual(traj_val.next_step_type, [ts.StepType.LAST] * 3)
def testLastArrays(self):
observation = ()
action = ()
policy_info = ()
reward = np.array([1.0, 1.0, 2.0])
discount = np.array([1.0, 1.0, 1.0])
traj = trajectory.last(observation, action, policy_info, reward, discount)
self.assertFalse(tf.is_tensor(traj.step_type))
self.assertAllEqual(traj.step_type, [ts.StepType.MID] * 3)
self.assertAllEqual(traj.next_step_type, [ts.StepType.LAST] * 3)
def testSingleStepTensors(self):
observation = ()
action = ()
policy_info = ()
reward = tf.constant([1.0, 1.0, 2.0])
discount = tf.constant([1.0, 1.0, 1.0])
traj = trajectory.single_step(observation, action, policy_info, reward,
discount)
self.assertTrue(tf.is_tensor(traj.step_type))
traj_val = self.evaluate(traj)
self.assertAllEqual(traj_val.step_type, [ts.StepType.FIRST] * 3)
self.assertAllEqual(traj_val.next_step_type, [ts.StepType.LAST] * 3)
def testSingleStepArrays(self):
observation = ()
action = ()
policy_info = ()
reward = np.array([1.0, 1.0, 2.0])
discount = np.array([1.0, 1.0, 1.0])
traj = trajectory.single_step(observation, action, policy_info, reward,
discount)
self.assertFalse(tf.is_tensor(traj.step_type))
self.assertAllEqual(traj.step_type, [ts.StepType.FIRST] * 3)
self.assertAllEqual(traj.next_step_type, [ts.StepType.LAST] * 3)
def testFromEpisodeTensor(self):
observation = tf.random.uniform((4, 5))
action = ()
policy_info = ()
reward = tf.random.uniform((4,))
traj = trajectory.from_episode(
observation, action, policy_info, reward, discount=None)
self.assertTrue(tf.is_tensor(traj.step_type))
traj_val, obs_val, reward_val = self.evaluate((traj, observation, reward))
first = ts.StepType.FIRST
mid = ts.StepType.MID
last = ts.StepType.LAST
self.assertAllEqual(
traj_val.step_type, [first, mid, mid, mid])
self.assertAllEqual(
traj_val.next_step_type, [mid, mid, mid, last])
self.assertAllClose(traj_val.observation, obs_val)
self.assertAllEqual(traj_val.reward, reward_val)
self.assertAllEqual(traj_val.discount, [1.0, 1.0, 1.0, 1.0])
def testFromEpisodeWithCompositeTensorOfTensors(self):
observation = tf.SparseTensor(
indices=tf.random.uniform((7, 2), maxval=9, dtype=tf.int64),
values=tf.random.uniform((7,)),
dense_shape=[4, 10]) # The 4 is important, it must match reward length.
action = ()
policy_info = ()
reward = tf.random.uniform((4,))
traj = trajectory.from_episode(
observation, action, policy_info, reward, discount=None)
self.assertTrue(tf.is_tensor(traj.step_type))
traj_val, obs_val, reward_val = self.evaluate((traj, observation, reward))
first = ts.StepType.FIRST
mid = ts.StepType.MID
last = ts.StepType.LAST
self.assertAllEqual(
traj_val.step_type, [first, mid, mid, mid])
self.assertAllEqual(
traj_val.next_step_type, [mid, mid, mid, last])
self.assertAllClose(traj_val.observation, obs_val)
self.assertAllEqual(traj_val.reward, reward_val)
self.assertAllEqual(traj_val.discount, [1.0, 1.0, 1.0, 1.0])
def testFromEpisodeArray(self):
observation = np.random.rand(4, 5)
action = ()
policy_info = ()
reward = np.random.rand(4)
traj = trajectory.from_episode(
observation, action, policy_info, reward, discount=None)
self.assertFalse(tf.is_tensor(traj.step_type))
first = ts.StepType.FIRST
mid = ts.StepType.MID
last = ts.StepType.LAST
self.assertAllEqual(
traj.step_type, [first, mid, mid, mid])
self.assertAllEqual(
traj.next_step_type, [mid, mid, mid, last])
self.assertAllEqual(traj.observation, observation)
self.assertAllEqual(traj.reward, reward)
self.assertAllEqual(traj.discount, [1.0, 1.0, 1.0, 1.0])
def testToTransition(self):
first = ts.StepType.FIRST
mid = ts.StepType.MID
last = ts.StepType.LAST
# Define a batch size 1, 3-step trajectory.
traj = trajectory.Trajectory(
step_type=np.array([[first, mid, last]]),
next_step_type=np.array([[mid, last, first]]),
observation=np.array([[10.0, 20.0, 30.0]]),
action=np.array([[11.0, 22.0, 33.0]]),
# reward at step 2 is an invalid dummy reward.
reward=np.array([[0.0, 1.0, 2.0]]),
discount=np.array([[1.0, 1.0, 0.0]]),
policy_info=np.array([[1.0, 2.0, 3.0]]))
transition = trajectory.to_transition(traj)
self.assertIsInstance(transition, trajectory.Transition)
time_steps, policy_steps, next_time_steps = transition
self.assertAllEqual(time_steps.step_type, np.array([[first, mid]]))
self.assertAllEqual(time_steps.observation, np.array([[10.0, 20.0]]))
# reward and discount are filled with zero (dummy) values
self.assertAllEqual(time_steps.reward, np.array([[0.0, 0.0]]))
self.assertAllEqual(time_steps.discount, np.array([[0.0, 0.0]]))
self.assertAllEqual(next_time_steps.step_type, np.array([[mid, last]]))
self.assertAllEqual(next_time_steps.observation, np.array([[20.0, 30.0]]))
self.assertAllEqual(next_time_steps.reward, np.array([[0.0, 1.0]]))
self.assertAllEqual(next_time_steps.discount, np.array([[1.0, 1.0]]))
self.assertAllEqual(policy_steps.action, np.array([[11.0, 22.0]]))
self.assertAllEqual(policy_steps.info, np.array([[1.0, 2.0]]))
def testToNStepTransitionForNEquals1(self):
first = ts.StepType.FIRST
last = ts.StepType.LAST
# Define a batch size 1, 2-step trajectory.
traj = trajectory.Trajectory(
step_type=np.array([[first, last]]),
next_step_type=np.array([[last, first]]),
observation=np.array([[10.0, 20.0]]),
action=np.array([[11.0, 22.0]]),
# reward & discount values at step 1 is an invalid dummy reward.
reward=np.array([[-1.0, 0.0]]),
discount=np.array([[0.9, 0.0]]),
policy_info=np.array([[10.0, 20.0]]))
transition = trajectory.to_n_step_transition(traj, gamma=0.5)
self.assertIsInstance(transition, trajectory.Transition)
time_steps, policy_steps, next_time_steps = transition
self.assertAllEqual(time_steps.step_type, np.array([first]))
self.assertAllEqual(time_steps.observation, np.array([10.0]))
self.assertAllEqual(time_steps.reward, np.array([np.nan]))
self.assertAllEqual(time_steps.discount, np.array([np.nan]))
self.assertAllEqual(next_time_steps.step_type, np.array([last]))
self.assertAllEqual(next_time_steps.observation, np.array([20.0]))
# r0
self.assertAllEqual(next_time_steps.reward, np.array([-1.0]))
# d0
self.assertAllEqual(next_time_steps.discount, np.array([0.9]))
self.assertAllEqual(policy_steps.action, np.array([11.0]))
self.assertAllEqual(policy_steps.info, np.array([10.0]))
def testToNStepTransition(self):
first = ts.StepType.FIRST
mid = ts.StepType.MID
last = ts.StepType.LAST
gamma = 0.5
# Define a batch size 1, 4-step trajectory.
traj = trajectory.Trajectory(
step_type=np.array([[first, mid, mid, last]]),
next_step_type=np.array([[mid, mid, last, first]]),
observation=np.array([[10.0, 20.0, 30.0, 40.0]]),
action=np.array([[11.0, 22.0, 33.0, 44.0]]),
# reward & discount values at step 3 is an invalid dummy reward.
reward=np.array([[-1.0, 1.0, 2.0, 0.0]]),
discount=np.array([[0.9, 0.95, 1.0, 0.0]]),
policy_info=np.array([[10.0, 20.0, 30.0, 40.0]]))
transition = trajectory.to_n_step_transition(traj, gamma=gamma)
self.assertIsInstance(transition, trajectory.Transition)
time_steps, policy_steps, next_time_steps = transition
self.assertAllEqual(time_steps.step_type, np.array([first]))
self.assertAllEqual(time_steps.observation, np.array([10.0]))
self.assertAllEqual(time_steps.reward, np.array([np.nan]))
self.assertAllEqual(time_steps.discount, np.array([np.nan]))
self.assertAllEqual(next_time_steps.step_type, np.array([last]))
self.assertAllEqual(next_time_steps.observation, np.array([40.0]))
# r0 + r1 * g * d0 + r2 * g * d0 * d1
# == -1.0 + 1.0*0.5*(0.9) + 2.0*(0.5**2)*(0.9*0.95)
self.assertAllEqual(
next_time_steps.reward,
np.array([-1.0 + 1.0 * gamma * 0.9 + 2.0 * gamma**2 * 0.9 * 0.95]))
# gamma**2 * (d0 * d1 * d2)
self.assertAllEqual(
next_time_steps.discount, np.array([gamma**2 * (0.9 * 0.95 * 1.0)]))
self.assertAllEqual(policy_steps.action, np.array([11.0]))
self.assertAllEqual(policy_steps.info, np.array([10.0]))
def testToTransitionHandlesTrajectoryFromDriverCorrectly(self):
env = tf_py_environment.TFPyEnvironment(
drivers_test_utils.PyEnvironmentMock())
policy = drivers_test_utils.TFPolicyMock(
env.time_step_spec(), env.action_spec())
replay_buffer = drivers_test_utils.make_replay_buffer(policy)
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env, policy, num_episodes=3, observers=[replay_buffer.add_batch])
run_driver = driver.run()
rb_gather_all = replay_buffer.gather_all()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(run_driver)
trajectories = self.evaluate(rb_gather_all)
transitions = trajectory.to_transition(trajectories)
self.assertIsInstance(transitions, trajectory.Transition)
time_steps, policy_step, next_time_steps = transitions
self.assertAllEqual(time_steps.observation,
trajectories.observation[:, :-1])
self.assertAllEqual(time_steps.step_type, trajectories.step_type[:, :-1])
self.assertAllEqual(next_time_steps.observation,
trajectories.observation[:, 1:])
self.assertAllEqual(next_time_steps.step_type,
trajectories.step_type[:, 1:])
self.assertAllEqual(next_time_steps.reward, trajectories.reward[:, :-1])
self.assertAllEqual(next_time_steps.discount, trajectories.discount[:, :-1])
self.assertAllEqual(policy_step.action, trajectories.action[:, :-1])
self.assertAllEqual(policy_step.info, trajectories.policy_info[:, :-1])
def testToTransitionSpec(self):
env = tf_py_environment.TFPyEnvironment(
drivers_test_utils.PyEnvironmentMock())
policy = drivers_test_utils.TFPolicyMock(
env.time_step_spec(), env.action_spec())
trajectory_spec = policy.trajectory_spec
transition_spec = trajectory.to_transition_spec(trajectory_spec)
self.assertIsInstance(transition_spec, trajectory.Transition)
ts_spec, ps_spec, nts_spec = transition_spec
self.assertAllEqual(ts_spec, env.time_step_spec())
self.assertAllEqual(ps_spec.action, env.action_spec())
self.assertAllEqual(nts_spec, env.time_step_spec())
if __name__ == '__main__':
tf.test.main()
| tensorflow/agents | tf_agents/trajectories/trajectory_test.py | Python | apache-2.0 | 14,383 |
package com.hunt.system.exception;
/**
* @Author ouyangan
* @Date 2016/10/29/17:32
* @Description
*/
public class ForbiddenIpException extends Exception {
/**
* Constructs a new exception with the specified detail message. The
* cause is not initialized, and may subsequently be initialized by
* a call to {@link #initCause}.
*
* @param message the detail message. The detail message is saved for
* later retrieval by the {@link #getMessage()} method.
*/
public ForbiddenIpException(String message) {
super(message);
}
}
| Ouyangan/hunt-admin | hunt-web/src/main/java/com/hunt/system/exception/ForbiddenIpException.java | Java | apache-2.0 | 597 |
/*
Copyright 2015 Chris Hannon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
namespace FluentBoilerplate.Traits
{
/// <summary>
/// Represents a trait that allows contract requirements for rights
/// </summary>
/// <typeparam name="TContext">The context type</typeparam>
public interface IRolesBasedTrait<TContext>
{
/// <summary>
/// Indicates that the current identity must have a set of roles prior to performing a context action
/// </summary>
/// <param name="rights">The required rights</param>
/// <returns>An instance of <typeparamref name="TContext"/> that contains the new requirements</returns>
TContext RequireRoles(params IRole[] roles);
/// <summary>
/// Indicates that the current identity must not have a set of roles prior to performing a context action
/// </summary>
/// <param name="rights">The restricted rights</param>
/// <returns>An instance of <typeparamref name="TContext"/> that contains the new requirements</returns>
TContext MustNotHaveRoles(params IRole[] roles);
}
}
| Norhaven/FluentBoilerplate | DotNet/FluentBoilerplate/PublicContract/Traits/IRolesBasedTrait.cs | C# | apache-2.0 | 1,638 |
package www.huangchengdu.com.a11_filepersistencetest;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Example local unit test, which will execute on the development machine (host).
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() {
assertEquals(4, 2 + 2);
}
} | huang303513/AndroidBasicCommonDemos | 11_FilePersistenceTest/app/src/test/java/www/huangchengdu/com/a11_filepersistencetest/ExampleUnitTest.java | Java | apache-2.0 | 405 |
class yamMessageHandlerBase(object):
"""
Base class for message handlers for a :class:`ZMQProcess`.
Inheriting classes only need to implement a handler function for each
message type. It must assign the protobuf Message class to self.cls and
create a mapping of message types to handler functions
"""
def __init__(self, rep_stream, stop):
self._rep_stream = rep_stream
self._stop = stop
self.cls = None
self.funcMap = {}
self.subMessageHandler = False
pass
def __call__(self, msg):
"""
Gets called when a messages is received by the stream this handlers is
registered at. *msg* is a list as return by
:meth:`zmq.core.socket.Socket.recv_multipart`.
"""
if self.subMessageHandler:
yamMessage = msg
else:
yamMessage = self.cls()
fullMsg = "".join(msg)
yamMessage.ParseFromString(fullMsg)
handlerFunc = self.funcMap[yamMessage.type]
responseMessage = handlerFunc(yamMessage)
return responseMessage
| dpquigl/YAM | src/pyyam/yam/handlers/yamMessageHandlerBase.py | Python | apache-2.0 | 1,148 |
'use strict';
/**
* Grunt - clean
*
* Url: https://github.com/gruntjs/grunt-contrib-clean
*/
module.exports = ( grunt, config ) => {
return {
// clean destination of intermediares
all : {
options : {
force : true, // caution, this is to allow deletion outside of cwd
},
files : {
src : [ `${ config.path.www.base }/**/*` ]
}
}
};
};
| katallaxie/generator-angular2-ts | templates/app/grunt/clean.js | JavaScript | apache-2.0 | 394 |
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.versions;
import com.facebook.buck.log.Logger;
import com.facebook.buck.model.BuildTarget;
import com.facebook.buck.model.Flavor;
import com.facebook.buck.model.InternalFlavor;
import com.facebook.buck.rules.TargetGraph;
import com.facebook.buck.rules.TargetGraphAndBuildTargets;
import com.facebook.buck.rules.TargetNode;
import com.facebook.buck.util.MoreCollectors;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.base.Throwables;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.google.common.hash.Hasher;
import com.google.common.hash.Hashing;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.RecursiveAction;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.StreamSupport;
/**
* Takes a regular {@link TargetGraph}, resolves any versioned nodes, and returns a new graph with
* the versioned nodes removed.
*/
public class VersionedTargetGraphBuilder {
private static final Logger LOG = Logger.get(VersionedTargetGraphBuilder.class);
private final ForkJoinPool pool;
private final VersionSelector versionSelector;
private final TargetGraphAndBuildTargets unversionedTargetGraphAndBuildTargets;
/**
* The resolved version graph being built.
*/
private final VersionedTargetGraph.Builder targetGraphBuilder = VersionedTargetGraph.builder();
/**
* Map of the build targets to nodes in the resolved graph.
*/
private final ConcurrentHashMap<BuildTarget, TargetNode<?, ?>> index;
/**
* Fork-join actions for each root node.
*/
private final ConcurrentHashMap<BuildTarget, RootAction> rootActions;
/**
* Intermediate version info for each node.
*/
private final ConcurrentHashMap<BuildTarget, VersionInfo> versionInfo;
/**
* Count of root nodes.
*/
private final AtomicInteger roots = new AtomicInteger();
VersionedTargetGraphBuilder(
ForkJoinPool pool,
VersionSelector versionSelector,
TargetGraphAndBuildTargets unversionedTargetGraphAndBuildTargets) {
this.pool = pool;
this.versionSelector = versionSelector;
this.unversionedTargetGraphAndBuildTargets = unversionedTargetGraphAndBuildTargets;
this.index =
new ConcurrentHashMap<>(
unversionedTargetGraphAndBuildTargets.getTargetGraph().getNodes().size() * 4,
0.75f,
pool.getParallelism());
this.rootActions =
new ConcurrentHashMap<>(
unversionedTargetGraphAndBuildTargets.getTargetGraph().getNodes().size() / 2,
0.75f,
pool.getParallelism());
this.versionInfo =
new ConcurrentHashMap<>(
2 * unversionedTargetGraphAndBuildTargets.getTargetGraph().getNodes().size(),
0.75f,
pool.getParallelism());
}
private TargetNode<?, ?> getNode(BuildTarget target) {
return unversionedTargetGraphAndBuildTargets.getTargetGraph().get(target);
}
private Optional<TargetNode<?, ?>> getNodeOptional(BuildTarget target) {
return unversionedTargetGraphAndBuildTargets.getTargetGraph().getOptional(target);
}
private TargetNode<?, ?> indexPutIfAbsent(TargetNode<?, ?> node) {
return index.putIfAbsent(node.getBuildTarget(), node);
}
/**
* Get/cache the transitive version info for this node.
*/
private VersionInfo getVersionInfo(TargetNode<?, ?> node) {
VersionInfo info = this.versionInfo.get(node.getBuildTarget());
if (info != null) {
return info;
}
Map<BuildTarget, ImmutableSet<Version>> versionDomain = new HashMap<>();
Optional<TargetNode<VersionedAliasDescription.Arg, ?>> versionedNode =
TargetGraphVersionTransformations.getVersionedNode(node);
if (versionedNode.isPresent()) {
ImmutableMap<Version, BuildTarget> versions =
versionedNode.get().getConstructorArg().versions;
// Merge in the versioned deps and the version domain.
versionDomain.put(node.getBuildTarget(), versions.keySet());
// If this version has only one possible choice, there's no need to wrap the constraints from
// it's transitive deps in an implication constraint.
if (versions.size() == 1) {
Map.Entry<Version, BuildTarget> ent = versions.entrySet().iterator().next();
VersionInfo depInfo = getVersionInfo(getNode(ent.getValue()));
versionDomain.putAll(depInfo.getVersionDomain());
} else {
// For each version choice, inherit the transitive constraints by wrapping them in an
// implication dependent on the specific version that pulls them in.
for (Map.Entry<Version, BuildTarget> ent : versions.entrySet()) {
VersionInfo depInfo = getVersionInfo(getNode(ent.getValue()));
versionDomain.putAll(depInfo.getVersionDomain());
}
}
} else {
// Merge in the constraints and version domain/deps from transitive deps.
for (BuildTarget depTarget : TargetGraphVersionTransformations.getDeps(node)) {
TargetNode<?, ?> dep = getNode(depTarget);
if (TargetGraphVersionTransformations.isVersionPropagator(dep) ||
TargetGraphVersionTransformations.getVersionedNode(dep).isPresent()) {
VersionInfo depInfo = getVersionInfo(dep);
versionDomain.putAll(depInfo.getVersionDomain());
}
}
}
info = VersionInfo.of(versionDomain);
this.versionInfo.put(node.getBuildTarget(), info);
return info;
}
/**
* @return a flavor to which summarizes the given version selections.
*/
static Flavor getVersionedFlavor(SortedMap<BuildTarget, Version> versions) {
Preconditions.checkArgument(!versions.isEmpty());
Hasher hasher = Hashing.md5().newHasher();
for (Map.Entry<BuildTarget, Version> ent : versions.entrySet()) {
hasher.putString(ent.getKey().toString(), Charsets.UTF_8);
hasher.putString(ent.getValue().getName(), Charsets.UTF_8);
}
return InternalFlavor.of("v" + hasher.hash().toString().substring(0, 7));
}
private TargetNode<?, ?> resolveVersions(
TargetNode<?, ?> node,
ImmutableMap<BuildTarget, Version> selectedVersions) {
Optional<TargetNode<VersionedAliasDescription.Arg, ?>> versionedNode =
node.castArg(VersionedAliasDescription.Arg.class);
if (versionedNode.isPresent()) {
node =
getNode(
Preconditions.checkNotNull(
versionedNode.get().getConstructorArg().versions.get(
selectedVersions.get(node.getBuildTarget()))));
}
return node;
}
/**
* @return the {@link BuildTarget} to use in the resolved target graph, formed by adding a
* flavor generated from the given version selections.
*/
private Optional<BuildTarget> getTranslateBuildTarget(
TargetNode<?, ?> node,
ImmutableMap<BuildTarget, Version> selectedVersions) {
BuildTarget originalTarget = node.getBuildTarget();
node = resolveVersions(node, selectedVersions);
BuildTarget newTarget = node.getBuildTarget();
if (TargetGraphVersionTransformations.isVersionPropagator(node)) {
VersionInfo info = getVersionInfo(node);
Collection<BuildTarget> versionedDeps = info.getVersionDomain().keySet();
TreeMap<BuildTarget, Version> versions = new TreeMap<>();
for (BuildTarget depTarget : versionedDeps) {
versions.put(depTarget, selectedVersions.get(depTarget));
}
if (!versions.isEmpty()) {
Flavor versionedFlavor = getVersionedFlavor(versions);
newTarget = node.getBuildTarget().withAppendedFlavors(versionedFlavor);
}
}
return newTarget.equals(originalTarget) ?
Optional.empty() :
Optional.of(newTarget);
}
public TargetGraph build() throws VersionException, InterruptedException {
LOG.debug(
"Starting version target graph transformation (nodes %d)",
unversionedTargetGraphAndBuildTargets.getTargetGraph().getNodes().size());
long start = System.currentTimeMillis();
// Walk through explicit built targets, separating them into root and non-root nodes.
ImmutableList<RootAction> actions =
unversionedTargetGraphAndBuildTargets.getBuildTargets().stream()
.map(this::getNode)
.map(RootAction::new)
.collect(MoreCollectors.toImmutableList());
// Add actions to the `rootActions` member for bookkeeping.
actions.forEach(a -> rootActions.put(a.getRoot().getBuildTarget(), a));
// Kick off the jobs to process the root nodes.
actions.forEach(pool::submit);
// Wait for actions to complete.
for (RootAction action : actions) {
action.getChecked();
}
long end = System.currentTimeMillis();
LOG.debug(
"Finished version target graph transformation in %.2f (nodes %d, roots: %d)",
(end - start) / 1000.0,
index.size(),
roots.get());
return targetGraphBuilder.build();
}
public static TargetGraphAndBuildTargets transform(
VersionSelector versionSelector,
TargetGraphAndBuildTargets unversionedTargetGraphAndBuildTargets,
ForkJoinPool pool)
throws VersionException, InterruptedException {
return unversionedTargetGraphAndBuildTargets.withTargetGraph(
new VersionedTargetGraphBuilder(
pool,
versionSelector,
unversionedTargetGraphAndBuildTargets)
.build());
}
/**
* Transform a version sub-graph at the given root node.
*/
private class RootAction extends RecursiveAction {
private final TargetNode<?, ?> node;
RootAction(TargetNode<?, ?> node) {
this.node = node;
}
private final Predicate<BuildTarget> isVersionPropagator =
target -> TargetGraphVersionTransformations.isVersionPropagator(getNode(target));
private final Predicate<BuildTarget> isVersioned =
target -> TargetGraphVersionTransformations.getVersionedNode(getNode(target)).isPresent();
/**
* Process a non-root node in the graph.
*/
private TargetNode<?, ?> processNode(TargetNode<?, ?> node) throws VersionException {
// If we've already processed this node, exit now.
TargetNode<?, ?> processed = index.get(node.getBuildTarget());
if (processed != null) {
return processed;
}
// Add the node to the graph and recurse on its deps.
TargetNode<?, ?> oldNode = indexPutIfAbsent(node);
if (oldNode != null) {
node = oldNode;
} else {
targetGraphBuilder.addNode(node.getBuildTarget().withFlavors(), node);
for (TargetNode<?, ?> dep : process(node.getParseDeps())) {
targetGraphBuilder.addEdge(node, dep);
}
}
return node;
}
/**
* Dispatch new jobs to transform the given nodes in parallel and wait for their results.
*/
private Iterable<TargetNode<?, ?>> process(Iterable<BuildTarget> targets)
throws VersionException {
int size = Iterables.size(targets);
List<RootAction> newActions = new ArrayList<>(size);
List<RootAction> oldActions = new ArrayList<>(size);
List<TargetNode<?, ?>> nonRootNodes = new ArrayList<>(size);
for (BuildTarget target : targets) {
TargetNode<?, ?> node = getNode(target);
// If we see a root node, create an action to process it using the pool, since it's
// potentially heavy-weight.
if (TargetGraphVersionTransformations.isVersionRoot(node)) {
RootAction oldAction = rootActions.get(target);
if (oldAction != null) {
oldActions.add(oldAction);
} else {
RootAction newAction = new RootAction(getNode(target));
oldAction = rootActions.putIfAbsent(target, newAction);
if (oldAction == null) {
newActions.add(newAction);
} else {
oldActions.add(oldAction);
}
}
} else {
nonRootNodes.add(node);
}
}
// Kick off all new rootActions in parallel.
invokeAll(newActions);
// For non-root nodes, just process them in-place, as they are inexpensive.
for (TargetNode<?, ?> node : nonRootNodes) {
processNode(node);
}
// Wait for any existing rootActions to finish.
for (RootAction action : oldActions) {
action.join();
}
// Now that everything is ready, return all the results.
return StreamSupport.stream(targets.spliterator(), false)
.map(index::get)
.collect(MoreCollectors.toImmutableList());
}
public Void getChecked() throws VersionException, InterruptedException {
try {
return get();
} catch (ExecutionException e) {
Throwable rootCause = Throwables.getRootCause(e);
Throwables.throwIfInstanceOf(rootCause, VersionException.class);
Throwables.throwIfInstanceOf(rootCause, RuntimeException.class);
throw new IllegalStateException(
String.format("Unexpected exception: %s: %s", e.getClass(), e.getMessage()),
e);
}
}
@SuppressWarnings("unchecked")
private TargetNode<?, ?> processVersionSubGraphNode(
TargetNode<?, ?> node,
ImmutableMap<BuildTarget, Version> selectedVersions,
TargetNodeTranslator targetTranslator)
throws VersionException {
Optional<BuildTarget> newTarget =
targetTranslator.translateBuildTarget(node.getBuildTarget());
TargetNode<?, ?> processed = index.get(newTarget.orElse(node.getBuildTarget()));
if (processed != null) {
return processed;
}
// Create the new target node, with the new target and deps.
TargetNode<?, ?> newNode =
((Optional<TargetNode<?, ?>>) (Optional<?>) targetTranslator.translateNode(node))
.orElse(node);
LOG.verbose(
"%s: new node declared deps %s, extra deps %s, arg %s",
newNode.getBuildTarget(),
newNode.getDeclaredDeps(),
newNode.getExtraDeps(),
newNode.getConstructorArg());
// Add the new node, and it's dep edges, to the new graph.
TargetNode<?, ?> oldNode = indexPutIfAbsent(newNode);
if (oldNode != null) {
newNode = oldNode;
} else {
// Insert the node into the graph, indexing it by a base target containing only the version
// flavor, if one exists.
targetGraphBuilder.addNode(
node.getBuildTarget().withFlavors(
Sets.difference(
newNode.getBuildTarget().getFlavors(),
node.getBuildTarget().getFlavors())),
newNode);
for (BuildTarget depTarget :
FluentIterable.from(node.getParseDeps())
.filter(Predicates.or(isVersionPropagator, isVersioned))) {
targetGraphBuilder.addEdge(
newNode,
processVersionSubGraphNode(
resolveVersions(getNode(depTarget), selectedVersions),
selectedVersions,
targetTranslator));
}
for (TargetNode<?, ?> dep :
process(
FluentIterable.from(node.getParseDeps())
.filter(Predicates.not(Predicates.or(isVersionPropagator, isVersioned))))) {
targetGraphBuilder.addEdge(newNode, dep);
}
}
return newNode;
}
// Transform a root node and its version sub-graph.
private TargetNode<?, ?> processRoot(TargetNode<?, ?> root) throws VersionException {
// If we've already processed this root, exit now.
final TargetNode<?, ?> processedRoot = index.get(root.getBuildTarget());
if (processedRoot != null) {
return processedRoot;
}
// For stats collection.
roots.incrementAndGet();
VersionInfo versionInfo = getVersionInfo(root);
// Select the versions to use for this sub-graph.
final ImmutableMap<BuildTarget, Version> selectedVersions =
versionSelector.resolve(
root.getBuildTarget(),
versionInfo.getVersionDomain());
// Build a target translator object to translate build targets.
ImmutableList<TargetTranslator<?>> translators =
ImmutableList.of(
new QueryTargetTranslator());
TargetNodeTranslator targetTranslator =
new TargetNodeTranslator(translators) {
private final LoadingCache<BuildTarget, Optional<BuildTarget>> cache =
CacheBuilder.newBuilder()
.build(
CacheLoader.from(
target -> {
// If we're handling the root node, there's nothing to translate.
if (root.getBuildTarget().equals(target)) {
return Optional.empty();
}
// If this target isn't in the target graph, which can be the case
// of build targets in the `tests` parameter, don't do any
// translation.
Optional<TargetNode<?, ?>> node = getNodeOptional(target);
if (!node.isPresent()) {
return Optional.empty();
}
return getTranslateBuildTarget(getNode(target), selectedVersions);
}));
@Override
public Optional<BuildTarget> translateBuildTarget(BuildTarget target) {
return cache.getUnchecked(target);
}
@Override
public Optional<ImmutableMap<BuildTarget, Version>> getSelectedVersions(
BuildTarget target) {
ImmutableMap.Builder<BuildTarget, Version> builder = ImmutableMap.builder();
for (BuildTarget dep : getVersionInfo(getNode(target)).getVersionDomain().keySet()) {
builder.put(dep, selectedVersions.get(dep));
}
return Optional.of(builder.build());
}
};
return processVersionSubGraphNode(root, selectedVersions, targetTranslator);
}
@Override
protected void compute() {
try {
processRoot(node);
} catch (VersionException e) {
completeExceptionally(e);
}
}
public TargetNode<?, ?> getRoot() {
return node;
}
}
}
| vschs007/buck | src/com/facebook/buck/versions/VersionedTargetGraphBuilder.java | Java | apache-2.0 | 19,928 |
package org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.policies;
import org.eclipse.gef.commands.Command;
import org.eclipse.gmf.runtime.emf.type.core.requests.CreateElementRequest;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.APIResourceEndpointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.AddressEndPointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.AddressingEndpointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.AggregateMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.BAMMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.BeanMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.BuilderMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CacheMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CallMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CallTemplateMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CalloutMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ClassMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CloneMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CloudConnectorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CloudConnectorOperationCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.CommandMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ConditionalRouterMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DBLookupMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DBReportMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DataMapperMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DefaultEndPointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.DropMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EJBMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EnqueueMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EnrichMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EntitlementMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.EventMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.FailoverEndPointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.FastXSLTMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.FaultMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.FilterMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.HTTPEndpointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.HeaderMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.IterateMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.LoadBalanceEndPointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.LogMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.LoopBackMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.NamedEndpointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.OAuthMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.PayloadFactoryMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.PropertyMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RMSequenceMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RecipientListEndPointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RespondMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RouterMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.RuleMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ScriptMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SendMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SequenceCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SmooksMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SpringMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.StoreMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.SwitchMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.TemplateEndpointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ThrottleMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.TransactionMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.URLRewriteMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.ValidateMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.WSDLEndPointCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.XQueryMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands.XSLTMediatorCreateCommand;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.providers.EsbElementTypes;
/**
* @generated
*/
public class MediatorFlowMediatorFlowCompartment14ItemSemanticEditPolicy extends
EsbBaseItemSemanticEditPolicy {
/**
* @generated
*/
public MediatorFlowMediatorFlowCompartment14ItemSemanticEditPolicy() {
super(EsbElementTypes.MediatorFlow_3627);
}
/**
* @generated
*/
protected Command getCreateCommand(CreateElementRequest req) {
if (EsbElementTypes.DropMediator_3491 == req.getElementType()) {
return getGEFWrapper(new DropMediatorCreateCommand(req));
}
if (EsbElementTypes.PropertyMediator_3492 == req.getElementType()) {
return getGEFWrapper(new PropertyMediatorCreateCommand(req));
}
if (EsbElementTypes.ThrottleMediator_3493 == req.getElementType()) {
return getGEFWrapper(new ThrottleMediatorCreateCommand(req));
}
if (EsbElementTypes.FilterMediator_3494 == req.getElementType()) {
return getGEFWrapper(new FilterMediatorCreateCommand(req));
}
if (EsbElementTypes.LogMediator_3495 == req.getElementType()) {
return getGEFWrapper(new LogMediatorCreateCommand(req));
}
if (EsbElementTypes.EnrichMediator_3496 == req.getElementType()) {
return getGEFWrapper(new EnrichMediatorCreateCommand(req));
}
if (EsbElementTypes.XSLTMediator_3497 == req.getElementType()) {
return getGEFWrapper(new XSLTMediatorCreateCommand(req));
}
if (EsbElementTypes.SwitchMediator_3498 == req.getElementType()) {
return getGEFWrapper(new SwitchMediatorCreateCommand(req));
}
if (EsbElementTypes.Sequence_3503 == req.getElementType()) {
return getGEFWrapper(new SequenceCreateCommand(req));
}
if (EsbElementTypes.EventMediator_3504 == req.getElementType()) {
return getGEFWrapper(new EventMediatorCreateCommand(req));
}
if (EsbElementTypes.EntitlementMediator_3505 == req.getElementType()) {
return getGEFWrapper(new EntitlementMediatorCreateCommand(req));
}
if (EsbElementTypes.ClassMediator_3506 == req.getElementType()) {
return getGEFWrapper(new ClassMediatorCreateCommand(req));
}
if (EsbElementTypes.SpringMediator_3507 == req.getElementType()) {
return getGEFWrapper(new SpringMediatorCreateCommand(req));
}
if (EsbElementTypes.ScriptMediator_3508 == req.getElementType()) {
return getGEFWrapper(new ScriptMediatorCreateCommand(req));
}
if (EsbElementTypes.FaultMediator_3509 == req.getElementType()) {
return getGEFWrapper(new FaultMediatorCreateCommand(req));
}
if (EsbElementTypes.XQueryMediator_3510 == req.getElementType()) {
return getGEFWrapper(new XQueryMediatorCreateCommand(req));
}
if (EsbElementTypes.CommandMediator_3511 == req.getElementType()) {
return getGEFWrapper(new CommandMediatorCreateCommand(req));
}
if (EsbElementTypes.DBLookupMediator_3512 == req.getElementType()) {
return getGEFWrapper(new DBLookupMediatorCreateCommand(req));
}
if (EsbElementTypes.DBReportMediator_3513 == req.getElementType()) {
return getGEFWrapper(new DBReportMediatorCreateCommand(req));
}
if (EsbElementTypes.SmooksMediator_3514 == req.getElementType()) {
return getGEFWrapper(new SmooksMediatorCreateCommand(req));
}
if (EsbElementTypes.SendMediator_3515 == req.getElementType()) {
return getGEFWrapper(new SendMediatorCreateCommand(req));
}
if (EsbElementTypes.HeaderMediator_3516 == req.getElementType()) {
return getGEFWrapper(new HeaderMediatorCreateCommand(req));
}
if (EsbElementTypes.CloneMediator_3517 == req.getElementType()) {
return getGEFWrapper(new CloneMediatorCreateCommand(req));
}
if (EsbElementTypes.CacheMediator_3518 == req.getElementType()) {
return getGEFWrapper(new CacheMediatorCreateCommand(req));
}
if (EsbElementTypes.IterateMediator_3519 == req.getElementType()) {
return getGEFWrapper(new IterateMediatorCreateCommand(req));
}
if (EsbElementTypes.CalloutMediator_3520 == req.getElementType()) {
return getGEFWrapper(new CalloutMediatorCreateCommand(req));
}
if (EsbElementTypes.TransactionMediator_3521 == req.getElementType()) {
return getGEFWrapper(new TransactionMediatorCreateCommand(req));
}
if (EsbElementTypes.RMSequenceMediator_3522 == req.getElementType()) {
return getGEFWrapper(new RMSequenceMediatorCreateCommand(req));
}
if (EsbElementTypes.RuleMediator_3523 == req.getElementType()) {
return getGEFWrapper(new RuleMediatorCreateCommand(req));
}
if (EsbElementTypes.OAuthMediator_3524 == req.getElementType()) {
return getGEFWrapper(new OAuthMediatorCreateCommand(req));
}
if (EsbElementTypes.AggregateMediator_3525 == req.getElementType()) {
return getGEFWrapper(new AggregateMediatorCreateCommand(req));
}
if (EsbElementTypes.StoreMediator_3588 == req.getElementType()) {
return getGEFWrapper(new StoreMediatorCreateCommand(req));
}
if (EsbElementTypes.BuilderMediator_3591 == req.getElementType()) {
return getGEFWrapper(new BuilderMediatorCreateCommand(req));
}
if (EsbElementTypes.CallTemplateMediator_3594 == req.getElementType()) {
return getGEFWrapper(new CallTemplateMediatorCreateCommand(req));
}
if (EsbElementTypes.PayloadFactoryMediator_3597 == req.getElementType()) {
return getGEFWrapper(new PayloadFactoryMediatorCreateCommand(req));
}
if (EsbElementTypes.EnqueueMediator_3600 == req.getElementType()) {
return getGEFWrapper(new EnqueueMediatorCreateCommand(req));
}
if (EsbElementTypes.URLRewriteMediator_3620 == req.getElementType()) {
return getGEFWrapper(new URLRewriteMediatorCreateCommand(req));
}
if (EsbElementTypes.ValidateMediator_3623 == req.getElementType()) {
return getGEFWrapper(new ValidateMediatorCreateCommand(req));
}
if (EsbElementTypes.RouterMediator_3628 == req.getElementType()) {
return getGEFWrapper(new RouterMediatorCreateCommand(req));
}
if (EsbElementTypes.ConditionalRouterMediator_3635 == req
.getElementType()) {
return getGEFWrapper(new ConditionalRouterMediatorCreateCommand(req));
}
if (EsbElementTypes.BAMMediator_3680 == req.getElementType()) {
return getGEFWrapper(new BAMMediatorCreateCommand(req));
}
if (EsbElementTypes.BeanMediator_3683 == req.getElementType()) {
return getGEFWrapper(new BeanMediatorCreateCommand(req));
}
if (EsbElementTypes.EJBMediator_3686 == req.getElementType()) {
return getGEFWrapper(new EJBMediatorCreateCommand(req));
}
if (EsbElementTypes.DefaultEndPoint_3609 == req.getElementType()) {
return getGEFWrapper(new DefaultEndPointCreateCommand(req));
}
if (EsbElementTypes.AddressEndPoint_3610 == req.getElementType()) {
return getGEFWrapper(new AddressEndPointCreateCommand(req));
}
if (EsbElementTypes.FailoverEndPoint_3611 == req.getElementType()) {
return getGEFWrapper(new FailoverEndPointCreateCommand(req));
}
if (EsbElementTypes.RecipientListEndPoint_3692 == req.getElementType()) {
return getGEFWrapper(new RecipientListEndPointCreateCommand(req));
}
if (EsbElementTypes.WSDLEndPoint_3612 == req.getElementType()) {
return getGEFWrapper(new WSDLEndPointCreateCommand(req));
}
if (EsbElementTypes.NamedEndpoint_3660 == req.getElementType()) {
return getGEFWrapper(new NamedEndpointCreateCommand(req));
}
if (EsbElementTypes.LoadBalanceEndPoint_3613 == req.getElementType()) {
return getGEFWrapper(new LoadBalanceEndPointCreateCommand(req));
}
if (EsbElementTypes.APIResourceEndpoint_3674 == req.getElementType()) {
return getGEFWrapper(new APIResourceEndpointCreateCommand(req));
}
if (EsbElementTypes.AddressingEndpoint_3689 == req.getElementType()) {
return getGEFWrapper(new AddressingEndpointCreateCommand(req));
}
if (EsbElementTypes.HTTPEndpoint_3709 == req.getElementType()) {
return getGEFWrapper(new HTTPEndpointCreateCommand(req));
}
if (EsbElementTypes.TemplateEndpoint_3716 == req.getElementType()) {
return getGEFWrapper(new TemplateEndpointCreateCommand(req));
}
if (EsbElementTypes.CloudConnector_3719 == req.getElementType()) {
return getGEFWrapper(new CloudConnectorCreateCommand(req));
}
if (EsbElementTypes.CloudConnectorOperation_3722 == req
.getElementType()) {
return getGEFWrapper(new CloudConnectorOperationCreateCommand(req));
}
if (EsbElementTypes.LoopBackMediator_3736 == req.getElementType()) {
return getGEFWrapper(new LoopBackMediatorCreateCommand(req));
}
if (EsbElementTypes.RespondMediator_3739 == req.getElementType()) {
return getGEFWrapper(new RespondMediatorCreateCommand(req));
}
if (EsbElementTypes.CallMediator_3742 == req.getElementType()) {
return getGEFWrapper(new CallMediatorCreateCommand(req));
}
if (EsbElementTypes.DataMapperMediator_3761 == req.getElementType()) {
return getGEFWrapper(new DataMapperMediatorCreateCommand(req));
}
if (EsbElementTypes.FastXSLTMediator_3764 == req.getElementType()) {
return getGEFWrapper(new FastXSLTMediatorCreateCommand(req));
}
return super.getCreateCommand(req);
}
}
| rajeevanv89/developer-studio | esb/org.wso2.developerstudio.eclipse.gmf.esb.diagram/src/org/wso2/developerstudio/eclipse/gmf/esb/diagram/edit/policies/MediatorFlowMediatorFlowCompartment14ItemSemanticEditPolicy.java | Java | apache-2.0 | 15,236 |
using Nancy;
public class HomeModule : NancyModule
{
public HomeModule()
{
Get("/", args => "Aloha from .NET, using the NancyFX framework. This is version 2.0 of this program.");
Get("/os", x =>
{
return System.Runtime.InteropServices.RuntimeInformation.OSDescription;
});
}
}
| redhat-dotnet-msa/aloha | HomeModule.cs | C# | apache-2.0 | 375 |
package com.planet_ink.coffee_mud.Abilities.Spells;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2002-2016 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class Spell_CombatPrecognition extends Spell
{
@Override public String ID() { return "Spell_CombatPrecognition"; }
private final static String localizedName = CMLib.lang().L("Combat Precognition");
@Override public String name() { return localizedName; }
private final static String localizedStaticDisplay = CMLib.lang().L("(Combat Precognition)");
@Override public String displayText() { return localizedStaticDisplay; }
@Override public int abstractQuality(){return Ability.QUALITY_BENEFICIAL_SELF;}
@Override protected int canAffectCode(){return CAN_MOBS;}
@Override protected int overrideMana(){return 100;}
boolean lastTime=false;
@Override public int classificationCode(){ return Ability.ACODE_SPELL|Ability.DOMAIN_DIVINATION;}
@Override
public boolean okMessage(final Environmental myHost, final CMMsg msg)
{
if(!(affected instanceof MOB))
return true;
final MOB mob=(MOB)affected;
if(msg.amITarget(mob)
&&(mob.location()!=null)
&&(CMLib.flags().isAliveAwakeMobile(mob,true)))
{
if(msg.targetMinor()==CMMsg.TYP_WEAPONATTACK)
{
final CMMsg msg2=CMClass.getMsg(mob,msg.source(),null,CMMsg.MSG_QUIETMOVEMENT,L("<S-NAME> avoid(s) the attack by <T-NAME>!"));
if((proficiencyCheck(null,mob.charStats().getStat(CharStats.STAT_DEXTERITY)-60,false))
&&(!lastTime)
&&(msg.source().getVictim()==mob)
&&(msg.source().rangeToTarget()==0)
&&(mob.location().okMessage(mob,msg2)))
{
lastTime=true;
mob.location().send(mob,msg2);
helpProficiency(mob, 0);
return false;
}
lastTime=false;
}
else
if((msg.value()<=0)
&&(CMath.bset(msg.targetMajor(),CMMsg.MASK_MALICIOUS))
&&((mob.fetchAbility(ID())==null)||proficiencyCheck(null,mob.charStats().getStat(CharStats.STAT_DEXTERITY)-50,false)))
{
String tool=null;
if((msg.tool() instanceof Ability))
tool=((Ability)msg.tool()).name();
CMMsg msg2=null;
switch(msg.targetMinor())
{
case CMMsg.TYP_JUSTICE:
if((CMath.bset(msg.targetMajor(),CMMsg.MASK_MOVE))
&&(tool!=null))
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",tool));
break;
case CMMsg.TYP_GAS:
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"noxious fumes":tool)));
break;
case CMMsg.TYP_COLD:
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"cold blast":tool)));
break;
case CMMsg.TYP_ELECTRIC:
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"electrical attack":tool)));
break;
case CMMsg.TYP_FIRE:
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"blast of heat":tool)));
break;
case CMMsg.TYP_WATER:
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"weat blast":tool)));
break;
case CMMsg.TYP_ACID:
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"acid attack":tool)));
break;
case CMMsg.TYP_SONIC:
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"sonic attack":tool)));
break;
case CMMsg.TYP_LASER:
msg2=CMClass.getMsg(mob,msg.source(),CMMsg.MSG_NOISYMOVEMENT,L("<S-NAME> avoid(s) the @x1 from <T-NAME>.",((tool==null)?"laser attack":tool)));
break;
}
if((msg2!=null)&&(mob.location()!=null)&&(mob.location().okMessage(mob,msg2)))
{
mob.location().send(mob,msg2);
return false;
}
}
}
return true;
}
@Override
public void unInvoke()
{
// undo the affects of this spell
if(!(affected instanceof MOB))
return;
final MOB mob=(MOB)affected;
super.unInvoke();
mob.tell(L("Your combat precognition fades away."));
}
@Override
public boolean invoke(MOB mob, List<String> commands, Physical givenTarget, boolean auto, int asLevel)
{
MOB target=mob;
if((auto)&&(givenTarget!=null)&&(givenTarget instanceof MOB))
target=(MOB)givenTarget;
if(target.fetchEffect(ID())!=null)
{
mob.tell(target,null,null,L("<S-NAME> already <S-HAS-HAVE> the sight."));
return false;
}
if(!super.invoke(mob,commands,givenTarget,auto,asLevel))
return false;
final boolean success=proficiencyCheck(mob,0,auto);
if(success)
{
invoker=mob;
final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),L(auto?"<T-NAME> shout(s) combatively!":"^S<S-NAME> shout(s) a combative spell!^?"));
if(mob.location().okMessage(mob,msg))
{
mob.location().send(mob,msg);
beneficialAffect(mob,target,asLevel,0);
}
}
else
return beneficialWordsFizzle(mob,target,L("<S-NAME> shout(s) combatively, but nothing more happens."));
// return whether it worked
return success;
}
}
| oriontribunal/CoffeeMud | com/planet_ink/coffee_mud/Abilities/Spells/Spell_CombatPrecognition.java | Java | apache-2.0 | 6,760 |
package edu.neu.coe.info6205;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Scanner;
/*
* This is program to check whether the tickets have been used by students. It will take input all the tickets.
* After this user need to input the ticket number used by students.
* The program can be terminated by entering 7889- Exit code
*
* Input Format--
*
* [ticket_number_1,ticket_number_2,ticket_number_3,ticket_number_4,ticket_number_5....ticket_number_n ]
*
* Output--
*
* ========================================================================
* Final Tally of tickets
* Tickets Used Status
* ========================================================================
* 182051 1
* 167929 2
* 154421 Not Used
* 160561 Not Used
*
* */
class Checker {
public void checkValid(int[] nums) {
Scanner input = new Scanner(System.in);
System.out.println("Total tickets: " + nums.length);
HashMap<Integer, Integer> ticketCounter = new HashMap<>();
for (int num : nums) {
ticketCounter.put(num, 0);
}
while (true) {
System.out.println("Enter the ticket number: ");
int ticket = input.nextInt();
if (ticket == 7889) break;
if (!ticketCounter.containsKey(ticket)) {
System.out.println("Invalid Ticket: " + ticket);
} else {
int value = ticketCounter.get(ticket);
if (value == 0) {
ticketCounter.put(ticket, value + 1);
System.out.println("Valid Ticket: " + ticket);
} else {
ticketCounter.put(ticket, value + 1);
System.out.println("Ticket already used by another User");
System.out.println("Number of user: " + ticketCounter.get(ticket));
}
}
}
System.out.println("========================================================================");
System.out.println("Final Tally of tickets");
System.out.println("Tickets Used Status");
System.out.println("========================================================================");
for (int num : nums) {
System.out.println(num + " " + (ticketCounter.get(num) == 0 ? "Not Used" : ticketCounter.get(num)));
}
}
}
public class TicketChecker {
public static int[] stringToIntegerArray(String input) {
input = input.trim();
input = input.substring(1, input.length() - 1);
if (input.length() == 0) {
return new int[0];
}
String[] parts = input.split(",");
int[] output = new int[parts.length];
for (int index = 0; index < parts.length; index++) {
String part = parts[index].trim();
output[index] = Integer.parseInt(part);
}
return output;
}
public static String integerArrayListToString(List<Integer> nums, int length) {
if (length == 0) {
return "[]";
}
StringBuilder result = new StringBuilder();
for (int index = 0; index < length; index++) {
Integer number = nums.get(index);
result.append(number).append(", ");
}
return "[" + result.substring(0, result.length() - 2) + "]";
}
public static String integerArrayListToString(List<Integer> nums) {
return integerArrayListToString(nums, nums.size());
}
public static String int2dListToString(Collection<List<Integer>> nums) {
StringBuilder sb = new StringBuilder("[");
for (List<Integer> list : nums) {
sb.append(integerArrayListToString(list));
sb.append(",");
}
sb.setCharAt(sb.length() - 1, ']');
return sb.toString();
}
public static void main(String[] args) throws IOException {
System.out.println("Enter the total tickets");
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
// TODO figure out what was meant here: while does not loop!
while ((line = in.readLine()) != null) {
int[] nums = stringToIntegerArray(line);
new Checker().checkValid(nums);
break;
}
}
}
| rchillyard/INFO6205 | src/main/java/edu/neu/coe/info6205/TicketChecker.java | Java | apache-2.0 | 4,562 |
"""Base configuration implementation."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class InvalidConfigError(Exception):
"""This error is thrown when the config file is not valid."""
pass
def test_config_condition(cond, msg):
if cond:
raise InvalidConfigError(msg)
class Conf(object):
"""Base class for FAUCET configuration."""
defaults = {} # type: dict
defaults_types = {} # type: dict
dyn_finalized = False
dyn_hash = None
def __init__(self, _id, dp_id, conf=None):
self._id = _id
self.dp_id = dp_id
if conf is None:
conf = {}
# TODO: handle conf as a sequence. # pylint: disable=fixme
if isinstance(conf, dict):
self.update(conf)
self.set_defaults()
self.check_config()
def set_defaults(self):
"""Set default values and run any basic sanity checks."""
for key, value in list(self.defaults.items()):
self._set_default(key, value)
def _check_unknown_conf(self, conf):
"""Check that supplied conf dict doesn't specify keys not defined."""
sub_conf_names = set(conf.keys())
unknown_conf_names = sub_conf_names - set(self.defaults.keys())
test_config_condition(unknown_conf_names, '%s fields unknown in %s' % (
unknown_conf_names, self._id))
def _check_conf_types(self, conf, conf_types):
"""Check that conf value is of the correct type."""
for conf_key, conf_value in list(conf.items()):
test_config_condition(conf_key not in conf_types, '%s field unknown in %s (known types %s)' % (
conf_key, self._id, conf_types))
if conf_value is not None:
conf_type = conf_types[conf_key]
test_config_condition(not isinstance(conf_value, conf_type), '%s value %s must be %s not %s' % (
conf_key, conf_value, conf_type, type(conf_value))) # pytype: disable=invalid-typevar
@staticmethod
def _set_unknown_conf(conf, conf_types):
for conf_key, conf_type in list(conf_types.items()):
if conf_key not in conf:
if conf_type == list:
conf[conf_key] = []
else:
conf[conf_key] = None
return conf
def update(self, conf):
"""Parse supplied YAML config and sanity check."""
self.__dict__.update(conf)
self._check_unknown_conf(conf)
self._check_conf_types(conf, self.defaults_types)
def check_config(self):
"""Check config at instantiation time for errors, typically via assert."""
return
@staticmethod
def _conf_keys(conf, dyn=False, subconf=True, ignore_keys=None):
"""Return a list of key/values of attributes with dyn/Conf attributes/filtered."""
conf_keys = []
for key, value in list(conf.__dict__.items()):
if not dyn and key.startswith('dyn'):
continue
if not subconf and isinstance(value, Conf):
continue
if ignore_keys and key in ignore_keys:
continue
conf_keys.append((key, value))
return conf_keys
def merge_dyn(self, other_conf):
"""Merge dynamic state from other conf object."""
for key, value in self._conf_keys(other_conf, dyn=True):
self.__dict__[key] = value
def _set_default(self, key, value):
if key not in self.__dict__ or self.__dict__[key] is None:
self.__dict__[key] = value
def to_conf(self):
"""Return configuration as a dict."""
result = {}
for key in self.defaults:
if key != 'name':
result[key] = self.__dict__[str(key)]
return result
def conf_hash(self, dyn=False, subconf=True, ignore_keys=None):
"""Return hash of keys configurably filtering attributes."""
return hash(frozenset(list(map(
str, self._conf_keys(self, dyn=dyn, subconf=subconf, ignore_keys=ignore_keys)))))
def __hash__(self):
if self.dyn_hash is not None:
return self.dyn_hash
dyn_hash = self.conf_hash(dyn=False, subconf=True)
if self.dyn_finalized:
self.dyn_hash = dyn_hash
return dyn_hash
def finalize(self):
"""Configuration parsing marked complete."""
self.dyn_finalized = True
def ignore_subconf(self, other, ignore_keys=None):
"""Return True if this config same as other, ignoring sub config."""
return (self.conf_hash(dyn=False, subconf=False, ignore_keys=ignore_keys)
== other.conf_hash(dyn=False, subconf=False, ignore_keys=ignore_keys))
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __ne__(self, other):
return not self.__eq__(other)
| wackerly/faucet | faucet/conf.py | Python | apache-2.0 | 5,573 |
/*
* Copyright (C) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package OptimizationTests.ShortLeafMethodsInlining.InvokeVirtual_add_int_lit16_001;
class Main {
final static int iterations = 10;
public static void main(String[] args) {
Test test = new Test();
int nextJ = -10;
System.out.println("Initial nextJ value is " + nextJ);
for(int i = 0; i < iterations; i++) {
nextJ = test.simple_method(i) + i;
}
System.out.println("Final nextJ value is " + nextJ);
}
}
| android-art-intel/marshmallow | art-extension/opttests/src/OptimizationTests/ShortLeafMethodsInlining/InvokeVirtual_add_int_lit16_001/Main.java | Java | apache-2.0 | 1,083 |
/*
* Copyright 2012-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.actuate.metrics.web.servlet;
import io.micrometer.core.instrument.Clock;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.MockClock;
import io.micrometer.core.instrument.simple.SimpleConfig;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.context.web.WebAppConfiguration;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.context.WebApplicationContext;
import org.springframework.web.servlet.config.annotation.EnableWebMvc;
import static org.assertj.core.api.Assertions.assertThat;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
/**
* @author Jon Schneider
*/
@RunWith(SpringRunner.class)
@WebAppConfiguration
public class WebMvcMetricsFilterAutoTimedTests {
@Autowired
private MeterRegistry registry;
@Autowired
private WebApplicationContext context;
private MockMvc mvc;
@Autowired
private WebMvcMetricsFilter filter;
@Before
public void setupMockMvc() {
this.mvc = MockMvcBuilders.webAppContextSetup(this.context)
.addFilters(this.filter).build();
}
@Test
public void metricsCanBeAutoTimed() throws Exception {
this.mvc.perform(get("/api/10")).andExpect(status().isOk());
assertThat(
this.registry.find("http.server.requests").tags("status", "200").timer())
.hasValueSatisfying((t) -> assertThat(t.count()).isEqualTo(1));
}
@Configuration
@EnableWebMvc
@Import({ Controller.class })
static class TestConfiguration {
@Bean
MockClock clock() {
return new MockClock();
}
@Bean
MeterRegistry meterRegistry(Clock clock) {
return new SimpleMeterRegistry(SimpleConfig.DEFAULT, clock);
}
@Bean
public WebMvcMetrics controllerMetrics(MeterRegistry registry) {
return new WebMvcMetrics(registry, new DefaultWebMvcTagsProvider(),
"http.server.requests", true, false);
}
@Bean
public WebMvcMetricsFilter webMetricsFilter(ApplicationContext context) {
return new WebMvcMetricsFilter(context);
}
}
@RestController
@RequestMapping("/api")
static class Controller {
@GetMapping("/{id}")
public String successful(@PathVariable Long id) {
return id.toString();
}
}
}
| ihoneymon/spring-boot | spring-boot-project/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/metrics/web/servlet/WebMvcMetricsFilterAutoTimedTests.java | Java | apache-2.0 | 3,712 |
// GUI Animator FREE
// Version: 1.1.0
// Compatilble: Unity 5.4.0 or higher, see more info in Readme.txt file.
//
// Developer: Gold Experience Team (https://www.ge-team.com)
//
// Unity Asset Store: https://www.assetstore.unity3d.com/en/#!/content/58843
// GE Store: https://www.ge-team.com/en/products/gui-animator-free/
// Full version on Unity Asset Store: https://www.assetstore.unity3d.com/en/#!/content/28709
// Full version on GE Store: https://www.ge-team.com/en/products/gui-animator-for-unity-ui/
//
// Please direct any bugs/comments/suggestions to geteamdev@gmail.com
#region Namespaces
using UnityEngine;
using System.Collections;
#endregion // Namespaces
// ######################################################################
// GA_FREE_OpenOtherScene class
// This class handles 8 buttons for changing scene.
// ######################################################################
public class GA_FREE_OpenOtherScene : MonoBehaviour
{
// ########################################
// MonoBehaviour Functions
// ########################################
#region MonoBehaviour
// Start is called on the frame when a script is enabled just before any of the Update methods is called the first time.
// http://docs.unity3d.com/ScriptReference/MonoBehaviour.Start.html
void Start () {
}
// Update is called every frame, if the MonoBehaviour is enabled.
// http://docs.unity3d.com/ScriptReference/MonoBehaviour.Update.html
void Update () {
}
#endregion // MonoBehaviour
// ########################################
// UI Responder functions
// ########################################
#region UI Responder
// Open Demo Scene 1
public void ButtonOpenDemoScene1 ()
{
// Disable all buttons
GUIAnimSystemFREE.Instance.EnableAllButtons(false);
// Waits 1.5 secs for Moving Out animation then load next level
GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo01 (960x600px)", 1.5f);
gameObject.SendMessage("HideAllGUIs");
}
// Open Demo Scene 2
public void ButtonOpenDemoScene2 ()
{
// Disable all buttons
GUIAnimSystemFREE.Instance.EnableAllButtons(false);
// Waits 1.5 secs for Moving Out animation then load next level
GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo02 (960x600px)", 1.5f);
gameObject.SendMessage("HideAllGUIs");
}
// Open Demo Scene 3
public void ButtonOpenDemoScene3 ()
{
// Disable all buttons
GUIAnimSystemFREE.Instance.EnableAllButtons(false);
// Waits 1.5 secs for Moving Out animation then load next level
GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo03 (960x600px)", 1.5f);
gameObject.SendMessage("HideAllGUIs");
}
// Open Demo Scene 4
public void ButtonOpenDemoScene4 ()
{
// Disable all buttons
GUIAnimSystemFREE.Instance.EnableAllButtons(false);
// Waits 1.5 secs for Moving Out animation then load next level
GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo04 (960x600px)", 1.5f);
gameObject.SendMessage("HideAllGUIs");
}
// Open Demo Scene 5
public void ButtonOpenDemoScene5 ()
{
// Disable all buttons
GUIAnimSystemFREE.Instance.EnableAllButtons(false);
// Waits 1.5 secs for Moving Out animation then load next level
GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo05 (960x600px)", 1.5f);
gameObject.SendMessage("HideAllGUIs");
}
// Open Demo Scene 6
public void ButtonOpenDemoScene6 ()
{
// Disable all buttons
GUIAnimSystemFREE.Instance.EnableAllButtons(false);
// Waits 1.5 secs for Moving Out animation then load next level
GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo06 (960x600px)", 1.5f);
gameObject.SendMessage("HideAllGUIs");
}
// Open Demo Scene 7
public void ButtonOpenDemoScene7 ()
{
// Disable all buttons
GUIAnimSystemFREE.Instance.EnableAllButtons(false);
// Waits 1.5 secs for Moving Out animation then load next level
GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo07 (960x600px)", 1.5f);
gameObject.SendMessage("HideAllGUIs");
}
// Open Demo Scene 8
public void ButtonOpenDemoScene8 ()
{
// Disable all buttons
GUIAnimSystemFREE.Instance.EnableAllButtons(false);
// Waits 1.5 secs for Moving Out animation then load next level
GUIAnimSystemFREE.Instance.LoadLevel("GA FREE - Demo08 (960x600px)", 1.5f);
gameObject.SendMessage("HideAllGUIs");
}
#endregion // UI Responder
}
| Kuraikari/Modern-Times | Modern Time (J)RPG/Assets/Plugins/GUI Animator/GUI Animator FREE/Demo (CSharp)/Scripts/GA_FREE_OpenOtherScene.cs | C# | apache-2.0 | 4,394 |
//+build linux
package notification
import "os/exec"
func Send(title, summary string) error {
return exec.Command("notify-send", title, summary).Run()
}
| jamesrr39/goutil | notification/notification_linux.go | GO | apache-2.0 | 157 |
define(function(require, exports, module) {
var EditorManager = brackets.getModule("editor/EditorManager");
var ExtensionUtils = brackets.getModule("utils/ExtensionUtils");
var HTMLUtils = brackets.getModule("language/HTMLUtils");
var PreferencesManager = brackets.getModule("preferences/PreferencesManager");
function wrapBrackets(str) {
if (typeof str !== "string") {
return null;
}
var result = str;
if (!result.startsWith("<")) {
result = "<" + result;
}
if (!result.endsWith(">")) {
result = result + ">";
}
return result;
}
var TauDocumentParser;
module.exports = TauDocumentParser = (function() {
function TauDocumentParser() {
this.tauAPIs = {};
this.tauHTML = {};
this.tauGuideData = {};
this.tauGuidePaths = {};
this.readJson();
}
TauDocumentParser.prototype.readJson = function() {
var self = this;
ExtensionUtils.loadFile(module, "tau-document-config.json").done(
function (data) {
self.tauGuideData = data;
self.setTauGuideData();
}
);
};
TauDocumentParser.prototype.setTauGuideData = function() {
var profile, version;
profile = PreferencesManager.getViewState("projectProfile");
version = PreferencesManager.getViewState("projectVersion");
this.tauGuidePaths = this.tauGuideData[version][profile].doc;
this.tauAPIs = this.tauGuideData[version][profile].api;
this.tauHTML = this.tauGuideData[version][profile].html;
return this.tauAPIs;
};
TauDocumentParser.prototype.parse = function() {
var api = this.tauAPIs;
var html = this.tauHTML;
var href = null;
var name = null;
var editor = EditorManager.getFocusedEditor();
var language = editor.getLanguageForSelection();
var langId = language.getId();
var pos = editor.getSelection();
var line = editor.document.getLine(pos.start.line);
if (langId === "html") {
var tagInfo = HTMLUtils.getTagInfo(editor, editor.getCursorPos());
if (tagInfo.position.tokenType === HTMLUtils.TAG_NAME || tagInfo.position.tokenType === HTMLUtils.ATTR_VALUE) {
var start = 0;
var end = 0;
// Find a start tag
for (var cur = pos.start.ch; cur >= 0; cur--) {
if (line[cur] === "<") {
start = cur;
break;
}
}
// Find a end tag
for (var cur = pos.start.ch; cur < line.length; cur++) {
if (line[cur] === ">" || line[cur] === "/") {
end = cur;
break;
}
}
var result = line.slice(start, end);
result = wrapBrackets(result);
var element = $.parseHTML(result);
if (element && element.length > 0) {
Object.keys(html).forEach((value) => {
if (element[0].matches(value)) {
if (html[value].href) {
href = this.tauGuidePaths.local + html[value].href;
name = html[value].name;
}
}
});
}
}
} else if (langId === "javascript") {
var start = line.lastIndexOf("tau.");
var end = 0;
if (start === -1) {
return null;
}
for (var cur = pos.start.ch; cur < line.length; cur++) {
if (line[cur] === " " || line[cur] === "(" || line[cur] === ".") {
end = cur;
break;
}
}
var data = line.slice(start, end);
if (data) {
data = data.split(".");
for (var i=0; i<data.length; i++) {
api = api[data[i]];
if (!api) {
break;
}
}
if (api && api.href) {
// TODO: Should change the href to use the network
// href = this.tauGuidePaths.network + api.href;
href = this.tauGuidePaths.local + api.href;
name = api.name;
}
}
}
return {
href: href,
name: name
};
};
return TauDocumentParser;
}());
});
| HunseopJeong/WATT | libs/brackets-server/embedded-ext/tau-document/tau-document-parser.js | JavaScript | apache-2.0 | 5,250 |
/*
* Copyright © 2013-2018 camunda services GmbH and various authors (info@camunda.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.container.impl.ejb;
import java.util.List;
import java.util.Set;
import javax.annotation.PostConstruct;
import javax.ejb.EJB;
import javax.ejb.Local;
import javax.ejb.Stateless;
import javax.ejb.TransactionAttribute;
import javax.ejb.TransactionAttributeType;
import org.camunda.bpm.ProcessEngineService;
import org.camunda.bpm.engine.ProcessEngine;
/**
* <p>Exposes the {@link ProcessEngineService} as EJB inside the container.</p>
*
* @author Daniel Meyer
*
*/
@Stateless(name="ProcessEngineService", mappedName="ProcessEngineService")
@Local(ProcessEngineService.class)
@TransactionAttribute(TransactionAttributeType.SUPPORTS)
public class EjbProcessEngineService implements ProcessEngineService {
@EJB
protected EjbBpmPlatformBootstrap ejbBpmPlatform;
/** the processEngineServiceDelegate */
protected ProcessEngineService processEngineServiceDelegate;
@PostConstruct
protected void initProcessEngineServiceDelegate() {
processEngineServiceDelegate = ejbBpmPlatform.getProcessEngineService();
}
public ProcessEngine getDefaultProcessEngine() {
return processEngineServiceDelegate.getDefaultProcessEngine();
}
public List<ProcessEngine> getProcessEngines() {
return processEngineServiceDelegate.getProcessEngines();
}
public Set<String> getProcessEngineNames() {
return processEngineServiceDelegate.getProcessEngineNames();
}
public ProcessEngine getProcessEngine(String name) {
return processEngineServiceDelegate.getProcessEngine(name);
}
}
| xasx/camunda-bpm-platform | javaee/ejb-service/src/main/java/org/camunda/bpm/container/impl/ejb/EjbProcessEngineService.java | Java | apache-2.0 | 2,196 |
package fr.sii.ogham.core.util;
import static java.util.stream.Collectors.toList;
import java.util.ArrayList;
import java.util.List;
/**
* Helper class that registers objects with associated priority. Each registered
* object is then returned as list ordered by priority. The higher priority
* value comes first in the list.
*
* @author Aurélien Baudet
*
* @param <P>
* the type of priorized objects
*/
public class PriorizedList<P> {
private final List<WithPriority<P>> priorities;
/**
* Initializes with an empty list
*/
public PriorizedList() {
this(new ArrayList<>());
}
/**
* Initializes with some priorized objects
*
* @param priorities
* the priorized objects
*/
public PriorizedList(List<WithPriority<P>> priorities) {
super();
this.priorities = priorities;
}
/**
* Registers a new priorized object
*
* @param priorized
* the wrapped object with its priority
* @return this instance for fluent chaining
*/
public PriorizedList<P> register(WithPriority<P> priorized) {
priorities.add(priorized);
return this;
}
/**
* Registers an object with its priority
*
* @param priorized
* the object to register
* @param priority
* the associated priority
* @return this instance for fluent chaining
*/
public PriorizedList<P> register(P priorized, int priority) {
priorities.add(new WithPriority<>(priorized, priority));
return this;
}
/**
* Merge all priorities of another {@link PriorizedList} into this one.
*
* @param other
* the priority list
* @return this isntance for fluent chaining
*/
public PriorizedList<P> register(PriorizedList<P> other) {
priorities.addAll(other.getPriorities());
return this;
}
/**
* Returns true if this list contains no elements.
*
* @return if this list contains no elements
*/
public boolean isEmpty() {
return priorities.isEmpty();
}
/**
* Get the list of priorities ordered by priority
*
* @return ordered list of priorities
*/
public List<WithPriority<P>> getPriorities() {
return sort();
}
/**
* Get the list of priorized objects ordered by highest priority.
*
* @return list of objects ordered by highet priority
*/
public List<P> getOrdered() {
return sort().stream().map(WithPriority::getPriorized).collect(toList());
}
private List<WithPriority<P>> sort() {
priorities.sort(WithPriority.comparator());
return priorities;
}
}
| groupe-sii/ogham | ogham-core/src/main/java/fr/sii/ogham/core/util/PriorizedList.java | Java | apache-2.0 | 2,496 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.carlomicieli.java8.football;
import org.junit.Test;
import java.time.LocalDate;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.Assert.assertThat;
/**
* @author Carlo Micieli
*/
public class PlayerTests {
@Test
public void shouldCreateNewPlayers() {
Player patrickWillis = createPlayerForPatrickWillis();
assertThatPlayer_Is_PatrickWillis(patrickWillis);
}
@Test
public void playersShouldHaveOptionalTeam() {
Player withTeam = createPlayerWithTeam("SF");
Player withoutTeam = createPlayerWithoutTeam();
assertThat(withTeam.getTeam().get(), is(equalTo("SF")));
assertThat(withoutTeam.getTeam().orElse(null), is(nullValue()));
}
@Test
public void playersShouldHaveOptionalJerseyNumber() {
Player withNumber = createPlayerWithNumber(99);
Player withoutNumber = createPlayerWithoutNumber();
assertThat(withNumber.getNumber().get(), is(equalTo(99)));
assertThat(withoutNumber.getNumber().orElse(null), is(nullValue()));
}
@Test
public void playersShouldHaveOptionalNumberOfYearsAmongPro() {
Player withYearsPro = createPlayerWithYearsPro(9);
Player withoutYearsPro = createPlayerWithoutYearsPro();
assertThat(withYearsPro.getYearsPro().get(), is(equalTo(9)));
assertThat(withoutYearsPro.getYearsPro().orElse(null), is(nullValue()));
}
@Test
public void shouldReturnPlayersHeightInCentimeters() {
Player patrickWillis = createPlayerForPatrickWillis();
assertThat(patrickWillis.heightInCentimeters(), is(equalTo(185)));
}
@Test
public void shouldReturnPlayersWeightInKilograms() {
Player patrickWillis = createPlayerForPatrickWillis();
assertThat(patrickWillis.weightInKg(), is(equalTo(109)));
}
@Test
public void shouldCheckWhetherTwoPlayersAreDifferent() {
Player x = createPlayerForPatrickWillis();
Player y = createAnotherPlayer();
assertThat(x.equals(y), is(equalTo(false)));
}
@Test
public void shouldCheckWhetherTwoPlayersAreEquals() {
Player x = createPlayerForPatrickWillis();
Player y = createPlayerForPatrickWillis();
assertThat(x.equals(x), is(equalTo(true)));
assertThat(x.equals(y), is(equalTo(true)));
}
@Test
public void shouldCalculateHashCodeForPlayers() {
Player x = createPlayerForPatrickWillis();
Player y = createPlayerForPatrickWillis();
assertThat(x.hashCode(), is(equalTo(y.hashCode())));
}
private Player createPlayerWithNumber(int number) {
return createPlayer(null, number, null);
}
private Player createPlayerWithYearsPro(int yearsPro) {
return createPlayer(null, null, yearsPro);
}
private Player createPlayerWithTeam(String team) {
return createPlayer(team, null, null);
}
private Player createPlayerWithoutYearsPro() {
return createAnotherPlayer();
}
private Player createPlayerWithoutTeam() {
return createAnotherPlayer();
}
private Player createPlayerWithoutNumber() {
return createAnotherPlayer();
}
private Player createAnotherPlayer() {
return createPlayer(null, null, null);
}
private Player createPlayer(String team, Integer number, Integer yearsPro) {
return new Player("John",
"Doe",
"FS",
"Hogwarts",
"00-0000000",
team,
"http://www.nfl.com/player/johndoe/profile",
LocalDate.of(1981, 9, 9),
73,
240,
number,
yearsPro);
}
private Player createPlayerForPatrickWillis() {
return new Player("Patrick", "Willis",
"ILB",
"Mississippi",
"00-0025398",
"SF",
"http://www.nfl.com/player/patrickwillis/2495781/profile",
LocalDate.of(1985, 1, 25),
73,
240,
52,
8);
}
private void assertThatPlayer_Is_PatrickWillis(Player p) {
assertThat(p.getFirstName(), is(equalTo("Patrick")));
assertThat(p.getLastName(), is(equalTo("Willis")));
assertThat(p.getCollege(), is(equalTo("Mississippi")));
assertThat(p.getPlayerId(), is(equalTo("00-0025398")));
assertThat(p.getTeam().orElse(null), is(equalTo("SF")));
assertThat(p.getProfileUrl(), is(equalTo("http://www.nfl.com/player/patrickwillis/2495781/profile")));
assertThat(p.getBirthdate(), is(equalTo(LocalDate.of(1985, 1, 25))));
assertThat(p.getHeight(), is(equalTo(73)));
assertThat(p.getWeight(), is(equalTo(240)));
assertThat(p.getNumber().orElse(null), is(equalTo(52)));
assertThat(p.getYearsPro().orElse(null), is(equalTo(8)));
}
}
| CarloMicieli/java8-for-hipsters | src/test/java/io/github/carlomicieli/java8/football/PlayerTests.java | Java | apache-2.0 | 5,683 |
/*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.drivers.lumentum;
import com.google.common.collect.Lists;
import org.apache.commons.lang3.tuple.Pair;
import org.onosproject.net.ChannelSpacing;
import org.onosproject.net.GridType;
import org.onosproject.net.OchSignal;
import org.onosproject.net.OchSignalType;
import org.onosproject.net.Port;
import org.onosproject.net.PortNumber;
import org.onosproject.net.device.DeviceService;
import org.onosproject.net.driver.AbstractHandlerBehaviour;
import org.onosproject.net.flow.DefaultFlowEntry;
import org.onosproject.net.flow.DefaultFlowRule;
import org.onosproject.net.flow.DefaultTrafficSelector;
import org.onosproject.net.flow.DefaultTrafficTreatment;
import org.onosproject.net.flow.FlowEntry;
import org.onosproject.net.flow.FlowId;
import org.onosproject.net.flow.FlowRule;
import org.onosproject.net.flow.FlowRuleProgrammable;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.TrafficTreatment;
import org.onosproject.net.flow.criteria.Criteria;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.snmp4j.PDU;
import org.snmp4j.event.ResponseEvent;
import org.snmp4j.smi.Integer32;
import org.snmp4j.smi.OID;
import org.snmp4j.smi.UnsignedInteger32;
import org.snmp4j.smi.VariableBinding;
import org.snmp4j.util.TreeEvent;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import static com.google.common.base.Preconditions.checkArgument;
// TODO: need to convert between OChSignal and XC channel number
public class LumentumSdnRoadmFlowRuleProgrammable extends AbstractHandlerBehaviour implements FlowRuleProgrammable {
private static final Logger log =
LoggerFactory.getLogger(LumentumSdnRoadmFlowRuleProgrammable.class);
// Default values
private static final int DEFAULT_TARGET_GAIN_PREAMP = 150;
private static final int DEFAULT_TARGET_GAIN_BOOSTER = 200;
private static final int DISABLE_CHANNEL_TARGET_POWER = -650;
private static final int DEFAULT_CHANNEL_TARGET_POWER = -30;
private static final int DISABLE_CHANNEL_ABSOLUTE_ATTENUATION = 160;
private static final int DEFAULT_CHANNEL_ABSOLUTE_ATTENUATION = 50;
private static final int DISABLE_CHANNEL_ADD_DROP_PORT_INDEX = 1;
private static final int OUT_OF_SERVICE = 1;
private static final int IN_SERVICE = 2;
private static final int OPEN_LOOP = 1;
private static final int CLOSED_LOOP = 2;
// First 20 ports are add/mux ports, next 20 are drop/demux
private static final int DROP_PORT_OFFSET = 20;
// OIDs
private static final String CTRL_AMP_MODULE_SERVICE_STATE_PREAMP = ".1.3.6.1.4.1.46184.1.4.4.1.2.1";
private static final String CTRL_AMP_MODULE_SERVICE_STATE_BOOSTER = ".1.3.6.1.4.1.46184.1.4.4.1.2.2";
private static final String CTRL_AMP_MODULE_TARGET_GAIN_PREAMP = ".1.3.6.1.4.1.46184.1.4.4.1.8.1";
private static final String CTRL_AMP_MODULE_TARGET_GAIN_BOOSTER = ".1.3.6.1.4.1.46184.1.4.4.1.8.2";
private static final String CTRL_CHANNEL_STATE = ".1.3.6.1.4.1.46184.1.4.2.1.3.";
private static final String CTRL_CHANNEL_MODE = ".1.3.6.1.4.1.46184.1.4.2.1.4.";
private static final String CTRL_CHANNEL_TARGET_POWER = ".1.3.6.1.4.1.46184.1.4.2.1.6.";
private static final String CTRL_CHANNEL_ADD_DROP_PORT_INDEX = ".1.3.6.1.4.1.46184.1.4.2.1.13.";
private static final String CTRL_CHANNEL_ABSOLUTE_ATTENUATION = ".1.3.6.1.4.1.46184.1.4.2.1.5.";
private LumentumSnmpDevice snmp;
@Override
public Collection<FlowEntry> getFlowEntries() {
try {
snmp = new LumentumSnmpDevice(handler().data().deviceId());
} catch (IOException e) {
log.error("Failed to connect to device: ", e);
return Collections.emptyList();
}
// Line in is last but one port, line out is last
DeviceService deviceService = this.handler().get(DeviceService.class);
List<Port> ports = deviceService.getPorts(data().deviceId());
if (ports.size() < 2) {
return Collections.emptyList();
}
PortNumber lineIn = ports.get(ports.size() - 2).number();
PortNumber lineOut = ports.get(ports.size() - 1).number();
Collection<FlowEntry> entries = Lists.newLinkedList();
// Add rules
OID addOid = new OID(CTRL_CHANNEL_STATE + "1");
entries.addAll(
fetchRules(addOid, true, lineOut).stream()
.map(fr -> new DefaultFlowEntry(fr, FlowEntry.FlowEntryState.ADDED, 0, 0, 0))
.collect(Collectors.toList())
);
// Drop rules
OID dropOid = new OID(CTRL_CHANNEL_STATE + "2");
entries.addAll(
fetchRules(dropOid, false, lineIn).stream()
.map(fr -> new DefaultFlowEntry(fr, FlowEntry.FlowEntryState.ADDED, 0, 0, 0))
.collect(Collectors.toList())
);
return entries;
}
@Override
public Collection<FlowRule> applyFlowRules(Collection<FlowRule> rules) {
try {
snmp = new LumentumSnmpDevice(data().deviceId());
} catch (IOException e) {
log.error("Failed to connect to device: ", e);
}
// Line ports
DeviceService deviceService = this.handler().get(DeviceService.class);
List<Port> ports = deviceService.getPorts(data().deviceId());
List<PortNumber> linePorts = ports.subList(ports.size() - 2, ports.size()).stream()
.map(p -> p.number())
.collect(Collectors.toList());
// Apply the valid rules on the device
Collection<FlowRule> added = rules.stream()
.map(r -> new CrossConnectFlowRule(r, linePorts))
.filter(xc -> installCrossConnect(xc))
.collect(Collectors.toList());
// Cache the cookie/priority
CrossConnectCache cache = this.handler().get(CrossConnectCache.class);
added.forEach(xc -> cache.set(
Objects.hash(data().deviceId(), xc.selector(), xc.treatment()),
xc.id(),
xc.priority()));
return added;
}
@Override
public Collection<FlowRule> removeFlowRules(Collection<FlowRule> rules) {
try {
snmp = new LumentumSnmpDevice(data().deviceId());
} catch (IOException e) {
log.error("Failed to connect to device: ", e);
}
// Line ports
DeviceService deviceService = this.handler().get(DeviceService.class);
List<Port> ports = deviceService.getPorts(data().deviceId());
List<PortNumber> linePorts = ports.subList(ports.size() - 2, ports.size()).stream()
.map(p -> p.number())
.collect(Collectors.toList());
// Apply the valid rules on the device
Collection<FlowRule> removed = rules.stream()
.map(r -> new CrossConnectFlowRule(r, linePorts))
.filter(xc -> removeCrossConnect(xc))
.collect(Collectors.toList());
// Remove flow rule from cache
CrossConnectCache cache = this.handler().get(CrossConnectCache.class);
removed.forEach(xc -> cache.remove(
Objects.hash(data().deviceId(), xc.selector(), xc.treatment())));
return removed;
}
// Installs cross connect on device
private boolean installCrossConnect(CrossConnectFlowRule xc) {
int channel = toChannel(xc.ochSignal());
long addDrop = xc.addDrop().toLong();
if (!xc.isAddRule()) {
addDrop -= DROP_PORT_OFFSET;
}
// Create the PDU object
PDU pdu = new PDU();
pdu.setType(PDU.SET);
// Enable preamp & booster
List<OID> oids = Arrays.asList(new OID(CTRL_AMP_MODULE_SERVICE_STATE_PREAMP),
new OID(CTRL_AMP_MODULE_SERVICE_STATE_BOOSTER));
oids.forEach(
oid -> pdu.add(new VariableBinding(oid, new Integer32(IN_SERVICE)))
);
// Set target gain on preamp & booster
OID ctrlAmpModuleTargetGainPreamp = new OID(CTRL_AMP_MODULE_TARGET_GAIN_PREAMP);
pdu.add(new VariableBinding(ctrlAmpModuleTargetGainPreamp, new Integer32(DEFAULT_TARGET_GAIN_PREAMP)));
OID ctrlAmpModuleTargetGainBooster = new OID(CTRL_AMP_MODULE_TARGET_GAIN_BOOSTER);
pdu.add(new VariableBinding(ctrlAmpModuleTargetGainBooster, new Integer32(DEFAULT_TARGET_GAIN_BOOSTER)));
// Make cross connect
OID ctrlChannelAddDropPortIndex = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX +
(xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelAddDropPortIndex, new UnsignedInteger32(addDrop)));
// Add rules use closed loop, drop rules open loop
// Add rules are set to target power, drop rules are attenuated
if (xc.isAddRule()) {
OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + "1." + channel);
pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(CLOSED_LOOP)));
OID ctrlChannelTargetPower = new OID(CTRL_CHANNEL_TARGET_POWER + "1." + channel);
pdu.add(new VariableBinding(ctrlChannelTargetPower, new Integer32(DEFAULT_CHANNEL_TARGET_POWER)));
} else {
OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + "2." + channel);
pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(OPEN_LOOP)));
OID ctrlChannelAbsoluteAttenuation = new OID(CTRL_CHANNEL_ABSOLUTE_ATTENUATION + "2." + channel);
pdu.add(new VariableBinding(
ctrlChannelAbsoluteAttenuation, new UnsignedInteger32(DEFAULT_CHANNEL_ABSOLUTE_ATTENUATION)));
}
// Final step is to enable the channel
OID ctrlChannelState = new OID(CTRL_CHANNEL_STATE + (xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelState, new Integer32(IN_SERVICE)));
try {
ResponseEvent response = snmp.set(pdu);
// TODO: parse response
} catch (IOException e) {
log.error("Failed to create cross connect, unable to connect to device: ", e);
}
return true;
}
// Removes cross connect on device
private boolean removeCrossConnect(CrossConnectFlowRule xc) {
int channel = toChannel(xc.ochSignal());
// Create the PDU object
PDU pdu = new PDU();
pdu.setType(PDU.SET);
// Disable the channel
OID ctrlChannelState = new OID(CTRL_CHANNEL_STATE + (xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelState, new Integer32(OUT_OF_SERVICE)));
// Put cross connect back into default port 1
OID ctrlChannelAddDropPortIndex = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX +
(xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelAddDropPortIndex,
new UnsignedInteger32(DISABLE_CHANNEL_ADD_DROP_PORT_INDEX)));
// Put port/channel back to open loop
OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + (xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(OPEN_LOOP)));
// Add rules are set to target power, drop rules are attenuated
if (xc.isAddRule()) {
OID ctrlChannelTargetPower = new OID(CTRL_CHANNEL_TARGET_POWER + "1." + channel);
pdu.add(new VariableBinding(ctrlChannelTargetPower, new Integer32(DISABLE_CHANNEL_TARGET_POWER)));
} else {
OID ctrlChannelAbsoluteAttenuation = new OID(CTRL_CHANNEL_ABSOLUTE_ATTENUATION + "2." + channel);
pdu.add(new VariableBinding(
ctrlChannelAbsoluteAttenuation, new UnsignedInteger32(DISABLE_CHANNEL_ABSOLUTE_ATTENUATION)));
}
try {
ResponseEvent response = snmp.set(pdu);
// TODO: parse response
} catch (IOException e) {
log.error("Failed to remove cross connect, unable to connect to device: ", e);
return false;
}
return true;
}
/**
* Convert OCh signal to Lumentum channel ID.
*
* @param ochSignal OCh signal
* @return Lumentum channel ID
*/
public static int toChannel(OchSignal ochSignal) {
// FIXME: move to cross connect validation
checkArgument(ochSignal.channelSpacing() == ChannelSpacing.CHL_50GHZ);
checkArgument(LumentumSnmpDevice.START_CENTER_FREQ.compareTo(ochSignal.centralFrequency()) <= 0);
checkArgument(LumentumSnmpDevice.END_CENTER_FREQ.compareTo(ochSignal.centralFrequency()) >= 0);
return ochSignal.spacingMultiplier() + LumentumSnmpDevice.MULTIPLIER_SHIFT;
}
/**
* Convert Lumentum channel ID to OCh signal.
*
* @param channel Lumentum channel ID
* @return OCh signal
*/
public static OchSignal toOchSignal(int channel) {
checkArgument(1 <= channel);
checkArgument(channel <= 96);
return new OchSignal(GridType.DWDM, ChannelSpacing.CHL_50GHZ,
channel - LumentumSnmpDevice.MULTIPLIER_SHIFT, 4);
}
// Returns the currently configured add/drop port for the given channel.
private PortNumber getAddDropPort(int channel, boolean isAddPort) {
OID oid = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX + (isAddPort ? "1" : "2"));
for (TreeEvent event : snmp.get(oid)) {
if (event == null) {
return null;
}
VariableBinding[] varBindings = event.getVariableBindings();
for (VariableBinding varBinding : varBindings) {
if (varBinding.getOid().last() == channel) {
int port = varBinding.getVariable().toInt();
if (!isAddPort) {
port += DROP_PORT_OFFSET;
}
return PortNumber.portNumber(port);
}
}
}
return null;
}
// Returns the currently installed flow entries on the device.
private List<FlowRule> fetchRules(OID oid, boolean isAdd, PortNumber linePort) {
List<FlowRule> rules = new LinkedList<>();
for (TreeEvent event : snmp.get(oid)) {
if (event == null) {
continue;
}
VariableBinding[] varBindings = event.getVariableBindings();
for (VariableBinding varBinding : varBindings) {
CrossConnectCache cache = this.handler().get(CrossConnectCache.class);
if (varBinding.getVariable().toInt() == IN_SERVICE) {
int channel = varBinding.getOid().removeLast();
PortNumber addDropPort = getAddDropPort(channel, isAdd);
if (addDropPort == null) {
continue;
}
TrafficSelector selector = DefaultTrafficSelector.builder()
.matchInPort(isAdd ? addDropPort : linePort)
.add(Criteria.matchOchSignalType(OchSignalType.FIXED_GRID))
.add(Criteria.matchLambda(toOchSignal(channel)))
.build();
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.setOutput(isAdd ? linePort : addDropPort)
.build();
// Lookup flow ID and priority
int hash = Objects.hash(data().deviceId(), selector, treatment);
Pair<FlowId, Integer> lookup = cache.get(hash);
if (lookup == null) {
continue;
}
FlowRule fr = DefaultFlowRule.builder()
.forDevice(data().deviceId())
.makePermanent()
.withSelector(selector)
.withTreatment(treatment)
.withPriority(lookup.getRight())
.withCookie(lookup.getLeft().value())
.build();
rules.add(fr);
}
}
}
return rules;
}
}
| donNewtonAlpha/onos | drivers/lumentum/src/main/java/org/onosproject/drivers/lumentum/LumentumSdnRoadmFlowRuleProgrammable.java | Java | apache-2.0 | 17,118 |
/*
* Copyright (C) 2010 Ryszard Wiśniewski <brut.alll@gmail.com>
* Copyright (C) 2010 Connor Tumbleson <connor.tumbleson@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package brut.androlib.aapt2;
import brut.androlib.*;
import brut.androlib.meta.MetaInfo;
import brut.androlib.options.BuildOptions;
import brut.common.BrutException;
import brut.directory.ExtFile;
import brut.util.OS;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import static org.junit.Assert.*;
public class BuildAndDecodeTest extends BaseTest {
@BeforeClass
public static void beforeClass() throws Exception {
TestUtils.cleanFrameworkFile();
sTmpDir = new ExtFile(OS.createTempDirectory());
sTestOrigDir = new ExtFile(sTmpDir, "testapp-orig");
sTestNewDir = new ExtFile(sTmpDir, "testapp-new");
LOGGER.info("Unpacking testapp...");
TestUtils.copyResourceDir(BuildAndDecodeTest.class, "aapt2/testapp/", sTestOrigDir);
BuildOptions buildOptions = new BuildOptions();
buildOptions.useAapt2 = true;
buildOptions.verbose = true;
LOGGER.info("Building testapp.apk...");
File testApk = new File(sTmpDir, "testapp.apk");
new Androlib(buildOptions).build(sTestOrigDir, testApk);
LOGGER.info("Decoding testapp.apk...");
ApkDecoder apkDecoder = new ApkDecoder(testApk);
apkDecoder.setOutDir(sTestNewDir);
apkDecoder.decode();
}
@AfterClass
public static void afterClass() throws BrutException {
OS.rmdir(sTmpDir);
}
@Test
public void buildAndDecodeTest() {
assertTrue(sTestNewDir.isDirectory());
}
@Test
public void valuesStringsTest() throws BrutException {
compareValuesFiles("values/strings.xml");
}
@Test
public void valuesColorsTest() throws BrutException {
compareValuesFiles("values/colors.xml");
}
@Test
public void valuesBoolsTest() throws BrutException {
compareValuesFiles("values/bools.xml");
}
@Test
public void valuesMaxLengthTest() throws BrutException {
compareValuesFiles("values-es/strings.xml");
}
@Test
public void confirmZeroByteFileExtensionIsNotStored() throws BrutException {
MetaInfo metaInfo = new Androlib().readMetaFile(sTestNewDir);
assertFalse(metaInfo.doNotCompress.contains("jpg"));
}
@Test
public void confirmZeroByteFileIsStored() throws BrutException {
MetaInfo metaInfo = new Androlib().readMetaFile(sTestNewDir);
assertTrue(metaInfo.doNotCompress.contains("assets/0byte_file.jpg"));
}
@Test
public void navigationResourceTest() throws BrutException {
compareXmlFiles("res/navigation/nav_graph.xml");
}
@Test
public void xmlIdsEmptyTest() throws BrutException {
compareXmlFiles("res/values/ids.xml");
}
@Test
public void leadingDollarSignResourceNameTest() throws BrutException {
compareXmlFiles("res/drawable/$avd_hide_password__0.xml");
compareXmlFiles("res/drawable/$avd_show_password__0.xml");
compareXmlFiles("res/drawable/$avd_show_password__1.xml");
compareXmlFiles("res/drawable/$avd_show_password__2.xml");
compareXmlFiles("res/drawable/avd_show_password.xml");
}
@Test
public void samsungQmgFilesHandledTest() throws IOException, BrutException {
compareBinaryFolder("drawable-xhdpi", true);
}
@Test
public void confirmManifestStructureTest() throws BrutException {
compareXmlFiles("AndroidManifest.xml");
}
@Test
public void xmlXsdFileTest() throws BrutException {
compareXmlFiles("res/xml/ww_box_styles_schema.xsd");
}
@Test
public void multipleDexTest() throws BrutException, IOException {
compareBinaryFolder("/smali_classes2", false);
compareBinaryFolder("/smali_classes3", false);
File classes2Dex = new File(sTestOrigDir, "build/apk/classes2.dex");
File classes3Dex = new File(sTestOrigDir, "build/apk/classes3.dex");
assertTrue(classes2Dex.isFile());
assertTrue(classes3Dex.isFile());
}
@Test
public void singleDexTest() throws BrutException, IOException {
compareBinaryFolder("/smali", false);
File classesDex = new File(sTestOrigDir, "build/apk/classes.dex");
assertTrue(classesDex.isFile());
}
}
| iBotPeaches/Apktool | brut.apktool/apktool-lib/src/test/java/brut/androlib/aapt2/BuildAndDecodeTest.java | Java | apache-2.0 | 5,032 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.impl;
import java.util.concurrent.TimeUnit;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.apache.camel.builder.RouteBuilder;
/**
* @version
*/
public class ShutdownRouteGracefulTimeoutTriggerTest extends ContextTestSupport {
private static String foo = "";
public void testShutdownRouteGraceful() throws Exception {
getMockEndpoint("mock:foo").expectedMessageCount(1);
template.sendBody("seda:foo", "A");
template.sendBody("seda:foo", "B");
template.sendBody("seda:foo", "C");
template.sendBody("seda:foo", "D");
template.sendBody("seda:foo", "E");
assertMockEndpointsSatisfied();
// now stop the route before its complete
foo = foo + "stop";
// timeout after 2 seconds
context.shutdownRoute("seda", 2, TimeUnit.SECONDS);
// should not be able to complete all messages as timeout occurred
assertNotSame("Should not able able to complete all pending messages", "stopABCDE", foo);
assertEquals("bar should still be running", true, context.getRouteStatus("bar").isStarted());
assertEquals("Seda should be stopped", true, context.getRouteStatus("seda").isStopped());
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("seda:foo").routeId("seda").to("mock:foo").delay(1000).process(new Processor() {
public void process(Exchange exchange) throws Exception {
foo = foo + exchange.getIn().getBody(String.class);
}
});
from("direct:bar").routeId("bar").to("mock:bar");
}
};
}
} | everttigchelaar/camel-svn | camel-core/src/test/java/org/apache/camel/impl/ShutdownRouteGracefulTimeoutTriggerTest.java | Java | apache-2.0 | 2,779 |
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2017 Serge Rider (serge@jkiss.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.sqlite.model;
import org.jkiss.dbeaver.ext.generic.model.GenericSQLDialect;
import org.jkiss.dbeaver.model.exec.jdbc.JDBCDatabaseMetaData;
import org.jkiss.dbeaver.model.impl.jdbc.JDBCDataSource;
import org.jkiss.dbeaver.model.impl.sql.BasicSQLDialect;
import org.jkiss.dbeaver.model.sql.SQLConstants;
public class SQLiteSQLDialect extends GenericSQLDialect {
public SQLiteSQLDialect() {
super("SQLite");
}
public void initDriverSettings(JDBCDataSource dataSource, JDBCDatabaseMetaData metaData) {
super.initDriverSettings(dataSource, metaData);
}
public String[][] getIdentifierQuoteStrings() {
return BasicSQLDialect.DEFAULT_QUOTE_STRINGS;
}
}
| ruspl-afed/dbeaver | plugins/org.jkiss.dbeaver.ext.sqlite/src/org/jkiss/dbeaver/ext/sqlite/model/SQLiteSQLDialect.java | Java | apache-2.0 | 1,391 |
// Copyright 2017 MongoDB Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "../microbench.hpp"
#include <algorithm>
#include <fstream>
#include <bsoncxx/stdx/make_unique.hpp>
#include <bsoncxx/stdx/optional.hpp>
#include <mongocxx/client.hpp>
#include <mongocxx/gridfs/bucket.hpp>
#include <mongocxx/instance.hpp>
#include <mongocxx/uri.hpp>
namespace benchmark {
using bsoncxx::builder::basic::kvp;
using bsoncxx::builder::basic::make_document;
using bsoncxx::stdx::make_unique;
class gridfs_download : public microbench {
public:
// The task size comes from the Driver Perfomance Benchmarking Reference Doc.
gridfs_download(std::string file_name)
: microbench{"TestGridFsDownload",
52.43,
std::set<benchmark_type>{benchmark_type::multi_bench,
benchmark_type::read_bench}},
_conn{mongocxx::uri{}},
_file_name{std::move(file_name)} {}
void setup();
void teardown();
protected:
void task();
private:
mongocxx::client _conn;
mongocxx::gridfs::bucket _bucket;
bsoncxx::stdx::optional<bsoncxx::types::bson_value::view> _id;
std::string _file_name;
};
void gridfs_download::setup() {
mongocxx::database db = _conn["perftest"];
db.drop();
std::ifstream stream{_file_name};
_bucket = db.gridfs_bucket();
auto result = _bucket.upload_from_stream(_file_name, &stream);
_id = result.id();
}
void gridfs_download::teardown() {
_conn["perftest"].drop();
}
void gridfs_download::task() {
auto downloader = _bucket.open_download_stream(_id.value());
auto file_length = downloader.file_length();
auto buffer_size = std::min(file_length, static_cast<std::int64_t>(downloader.chunk_size()));
auto buffer = make_unique<std::uint8_t[]>(static_cast<std::size_t>(buffer_size));
while (auto length_read =
downloader.read(buffer.get(), static_cast<std::size_t>(buffer_size))) {
}
}
} // namespace benchmark
| mongodb/mongo-cxx-driver | benchmark/multi_doc/gridfs_download.hpp | C++ | apache-2.0 | 2,561 |
#!/usr/bin/env python
# import libs
import unittest
import sys
import os
import random
import uuid
# import classes
import analytics.utils.misc as misc
import analytics.exceptions.exceptions as ex
import analytics.service as service
from analytics.datamanager.datamanager import DataManager
class IntegrationTestSequence(unittest.TestCase):
def setUp(self):
filepath = os.path.dirname(os.path.realpath(__file__))
self.integrationpath = os.path.join(filepath, "datasets")
self.datamanager = DataManager()
self.datamanager.loadDatasets(self.integrationpath)
self.datasets = self.datamanager._datasets
def test_service_default(self):
query = ""
datasetId = random.choice(self.datasets.keys())
result = service.requestData(datasetId, query, self.datamanager)
self.assertEqual(result["status"], "success")
self.assertEqual(result["code"], 200)
def test_service_wrongquery(self):
query = uuid.uuid4().hex
datasetId = random.choice(self.datasets.keys())
result = service.requestData(datasetId, query, self.datamanager)
self.assertEqual(result["status"], "error")
self.assertEqual(result["code"], 400)
def test_service_simpleQuery(self):
query = """select from ${pulses}
where @1b4cf15c86ec31cd8838feab0f9856b1 |is| static
and @1b4cf15c86ec31cd8838feab0f9856b1 = 2
and @b6db26b3972932b2862dac41cbb1493d = [up]"""
datasetId = random.choice(self.datasets.keys())
result = service.requestData(datasetId, query, self.datamanager)
self.assertEqual(result["status"], "success")
self.assertEqual(result["code"], 200)
def test_service_selectCluster(self):
query = """select from ${clusters}
where @id = [bc27b4dbbc0f34f9ae8e4b72f2d51b60]"""
datasetId = random.choice(self.datasets.keys())
result = service.requestData(datasetId, query, self.datamanager)
self.assertEqual(result["status"], "success")
self.assertEqual(result["code"], 200)
def service_warnings(self, warn=True):
query = """select from ${pulses}
where @f4b9ea9d3bf239f5a1c80578b0556a5e |is| dynamic"""
datasetId = random.choice(self.datasets.keys())
result = service.requestData(
datasetId,
query,
self.datamanager,
iswarnings=warn
)
# result should not fail and should generate warnings
return result
def test_service_warnings_on(self):
# warnings are on by default
result = self.service_warnings()
self.assertEqual(result["status"], "success")
self.assertEqual(result["code"], 200)
self.assertEqual(len(result["messages"]), 1)
def test_service_warnings_off(self):
# warning is expected, but we turn it off
result = self.service_warnings(False)
self.assertEqual(result["status"], "success")
self.assertEqual(result["code"], 200)
self.assertEqual(len(result["messages"]), 0)
# Load test suites
def _suites():
return [
IntegrationTestSequence
]
# Load tests
def loadSuites():
# global test suite for this module
gsuite = unittest.TestSuite()
for suite in _suites():
gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite))
return gsuite
if __name__ == '__main__':
suite = loadSuites()
print ""
print "### Running tests ###"
print "-" * 70
unittest.TextTestRunner(verbosity=2).run(suite)
| sadikovi/pulsar | analytics/tests/integrationtest_service.py | Python | apache-2.0 | 3,628 |
// Copyright 2016 PlanBase Inc. & Glen Peterson
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.organicdesign.fp.tuple;
import java.io.Serializable;
import java.util.Objects;
import static org.organicdesign.fp.FunctionUtils.stringify;
// ======================================================================================
// THIS CLASS IS GENERATED BY /tupleGenerator/TupleGenerator.java. DO NOT EDIT MANUALLY!
// ======================================================================================
/**
Holds 12 items of potentially different types. Designed to let you easily create immutable
subclasses (to give your data structures meaningful names) with correct equals(), hashCode(), and
toString() methods.
*/
public class Tuple12<A,B,C,D,E,F,G,H,I,J,K,L> implements Serializable {
// For serializable. Make sure to change whenever internal data format changes.
// Implemented because implementing serializable only on a sub-class of an
// immutable class requires a serialization proxy. That's probably worse than
// the conceptual burdeon of all tuples being Serializable. private static final long serialVersionUID = 20160906065500L;
// Fields are protected so that sub-classes can make accessor methods with meaningful names.
protected final A _1;
protected final B _2;
protected final C _3;
protected final D _4;
protected final E _5;
protected final F _6;
protected final G _7;
protected final H _8;
protected final I _9;
protected final J _10;
protected final K _11;
protected final L _12;
/**
Constructor is protected (not public) for easy inheritance. Josh Bloch's "Item 1" says public
static factory methods are better than constructors because they have names, they can return
an existing object instead of a new one, and they can return a sub-type. Therefore, you
have more flexibility with a static factory as part of your public API then with a public
constructor.
*/
protected Tuple12(A a, B b, C c, D d, E e, F f, G g, H h, I i, J j, K k, L l) {
_1 = a; _2 = b; _3 = c; _4 = d; _5 = e; _6 = f; _7 = g; _8 = h; _9 = i;
_10 = j; _11 = k; _12 = l;
}
/** Public static factory method */
public static <A,B,C,D,E,F,G,H,I,J,K,L> Tuple12<A,B,C,D,E,F,G,H,I,J,K,L>
of(A a, B b, C c, D d, E e, F f, G g, H h, I i, J j, K k, L l) {
return new Tuple12<>(a, b, c, d, e, f, g, h, i, j, k, l);
}
/** Returns the 1st field */
public A _1() { return _1; }
/** Returns the 2nd field */
public B _2() { return _2; }
/** Returns the 3rd field */
public C _3() { return _3; }
/** Returns the 4th field */
public D _4() { return _4; }
/** Returns the 5th field */
public E _5() { return _5; }
/** Returns the 6th field */
public F _6() { return _6; }
/** Returns the 7th field */
public G _7() { return _7; }
/** Returns the 8th field */
public H _8() { return _8; }
/** Returns the 9th field */
public I _9() { return _9; }
/** Returns the 10th field */
public J _10() { return _10; }
/** Returns the 11th field */
public K _11() { return _11; }
/** Returns the 12th field */
public L _12() { return _12; }
@Override
public String toString() {
return getClass().getSimpleName() + "(" +
stringify(_1) + "," + stringify(_2) + "," +
stringify(_3) + "," + stringify(_4) + "," + stringify(_5) + "," +
stringify(_6) + "," + stringify(_7) + "," + stringify(_8) + "," +
stringify(_9) + "," + stringify(_10) + "," + stringify(_11) + "," +
stringify(_12) + ")";
}
@Override
public boolean equals(Object other) {
// Cheapest operation first...
if (this == other) { return true; }
if (!(other instanceof Tuple12)) { return false; }
// Details...
@SuppressWarnings("rawtypes") final Tuple12 that = (Tuple12) other;
return Objects.equals(this._1, that._1()) &&
Objects.equals(this._2, that._2()) &&
Objects.equals(this._3, that._3()) &&
Objects.equals(this._4, that._4()) &&
Objects.equals(this._5, that._5()) &&
Objects.equals(this._6, that._6()) &&
Objects.equals(this._7, that._7()) &&
Objects.equals(this._8, that._8()) &&
Objects.equals(this._9, that._9()) &&
Objects.equals(this._10, that._10()) &&
Objects.equals(this._11, that._11()) &&
Objects.equals(this._12, that._12());
}
@Override
public int hashCode() {
// First 2 fields match Tuple2 which implements java.util.Map.Entry as part of the map
// contract and therefore must match java.util.HashMap.Node.hashCode().
int ret = 0;
if (_1 != null) { ret = _1.hashCode(); }
if (_2 != null) { ret = ret ^ _2.hashCode(); }
if (_3 != null) { ret = ret + _3.hashCode(); }
if (_4 != null) { ret = ret + _4.hashCode(); }
if (_5 != null) { ret = ret + _5.hashCode(); }
if (_6 != null) { ret = ret + _6.hashCode(); }
if (_7 != null) { ret = ret + _7.hashCode(); }
if (_8 != null) { ret = ret + _8.hashCode(); }
if (_9 != null) { ret = ret + _9.hashCode(); }
if (_10 != null) { ret = ret + _10.hashCode(); }
if (_11 != null) { ret = ret + _11.hashCode(); }
if (_12 != null) { ret = ret + _12.hashCode(); }
return ret;
}
} | GlenKPeterson/UncleJim | src/main/java/org/organicdesign/fp/tuple/Tuple12.java | Java | apache-2.0 | 6,112 |
package com.zhongdan.lobby.bl.ai.chinesechess.engine;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.RandomAccessFile;
import java.net.URL;
import java.util.Calendar;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class SearchEngine {
private static Log log = LogFactory.getLog(SearchEngine.class);
public static final int MaxBookMove = 40;// 使用开局库的最大步数
public static final int MaxKiller = 4;// 搜索杀着的最大步数
private static final int BookUnique = 1;// 指示结点类型,下同
private static final int BookMulti = 2;
private static final int HashAlpha = 4;
private static final int HashBeta = 8;
private static final int HashPv = 16;
private static final int ObsoleteValue = -CCEvalue.MaxValue - 1;
private static final int UnknownValue = -CCEvalue.MaxValue - 2;
// private static final int BookUniqueValue = CCEvalue.MaxValue + 1;
// private static final int BookMultiValue = CCEvalue.MaxValue + 2;//推荐使用开局库,值要足够大
public static final int CLOCK_S = 1000;// 1秒=1000毫秒
public static final int CLOCK_M = 1000 * 60;// 1分=60秒
private static final Random rand = new Random();
private MoveNode bestMove = null;
// for search control
private int depth;
private long properTimer, limitTimer;
// 搜索过程的全局变量,包括:
// 1. 搜索树和历史表
private ActiveBoard activeBoard;
private int histTab[][];
public void setActiveBoard(ActiveBoard activeBoard) {
this.activeBoard = activeBoard;
}
// 2. 搜索选项
private int selectMask, style;// 下棋风格 default = EngineOption.Normal;
private boolean wideQuiesc, futility, nullMove;
// SelectMask:随机性 , WideQuiesc(保守true if Style == EngineOption.solid)
// Futility(true if Style == EngineOption.risky冒进)
// NullMove 是否空着剪裁
private boolean ponder;
// 3. 时间控制参数
private long startTimer, minTimer, maxTimer;
private int startMove;
private boolean stop;
// 4. 统计信息:Main Search Nodes, Quiescence Search Nodes and Hash Nodes
private int nodes, nullNodes, hashNodes, killerNodes, betaNodes, pvNodes, alphaNodes, mateNodes, leafNodes;
private int quiescNullNodes, quiescBetaNodes, quiescPvNodes, quiescAlphaNodes, quiescMateNodes;
private int hitBeta, hitPv, hitAlpha;
// 5. 搜索结果
private int lastScore, pvLineNum;
private MoveNode pvLine[] = new MoveNode[ActiveBoard.MAX_MOVE_NUM];
// 6. Hash and Book Structure
private int hashMask, maxBookPos, bookPosNum;
private HashRecord[] hashList;
private BookRecord[] bookList;
public SearchEngine(ActiveBoard chessP) {
this();
activeBoard = chessP;
}
public SearchEngine() {
int i;
// Position = new ChessPosition();
histTab = new int[90][90];
;
nodes = nullNodes = hashNodes = killerNodes = betaNodes = pvNodes = alphaNodes = mateNodes = leafNodes = 0;
selectMask = 0;// 1<<10-1;//随机性
style = EngineOption.Normal;
wideQuiesc = style == EngineOption.Solid;
futility = style == EngineOption.Risky;
nullMove = true;
// Search results
lastScore = 0;
pvLineNum = 0;
MoveNode PvLine[] = new MoveNode[ActiveBoard.MAX_MOVE_NUM];
for (i = 0; i < ActiveBoard.MAX_MOVE_NUM; i++) {
PvLine[i] = new MoveNode();
}
newHash(17, 14);
// 设置超时和迭代搜索层数 setup timeout and search depth
// depth = 8;
// properTimer = CLOCK_M * 1;
// limitTimer = CLOCK_M * 20;
depth = 8;
properTimer = CLOCK_S * 2;
limitTimer = CLOCK_S * 2;
}
// Begin History and Hash Table Procedures
public void newHash(int HashScale, int BookScale) {
histTab = new int[90][90];
hashMask = (1 << HashScale) - 1;
maxBookPos = 1 << BookScale;
hashList = new HashRecord[hashMask + 1];
for (int i = 0; i < hashMask + 1; i++) {
hashList[i] = new HashRecord();
}
bookList = new BookRecord[maxBookPos];
// for (int i=0; i< MaxBookPos; i++){
// BookList[i]=new BookRecord();
// }
clearHistTab();
clearHash();
// BookRand = rand.nextLong();//(unsigned long) time(NULL);
}
public void delHash() {
histTab = null;
hashList = null;
bookList = null;
}
public void clearHistTab() {
int i, j;
for (i = 0; i < 90; i++) {
for (j = 0; j < 90; j++) {
histTab[i][j] = 0;
}
}
}
public void clearHash() {
int i;
for (i = 0; i <= hashMask; i++) {
hashList[i].flag = 0;
}
}
private int probeHash(MoveNode HashMove, int Alpha, int Beta, int Depth) {
boolean MateNode;
HashRecord TempHash;
int tmpInt = (int) (activeBoard.getZobristKey() & hashMask);
long tmpLong1 = activeBoard.getZobristLock(), tmpLong2;
TempHash = hashList[(int) (activeBoard.getZobristKey() & hashMask)];
tmpLong2 = TempHash.zobristLock;
if (TempHash.flag != 0 && TempHash.zobristLock == activeBoard.getZobristLock()) {
MateNode = false;
if (TempHash.value > CCEvalue.MaxValue - ActiveBoard.MAX_MOVE_NUM / 2) {
TempHash.value -= activeBoard.getMoveNum() - startMove;
MateNode = true;
} else if (TempHash.value < ActiveBoard.MAX_MOVE_NUM / 2 - CCEvalue.MaxValue) {
TempHash.value += activeBoard.getMoveNum() - startMove;
MateNode = true;
}
if (MateNode || TempHash.depth >= Depth) {
if ((TempHash.flag & HashBeta) != 0) {
if (TempHash.value >= Beta) {
hitBeta++;
return TempHash.value;
}
} else if ((TempHash.flag & HashAlpha) != 0) {
if (TempHash.value <= Alpha) {
hitAlpha++;
return TempHash.value;
}
} else if ((TempHash.flag & HashPv) != 0) {
hitPv++;
return TempHash.value;
} else {
return UnknownValue;
}
}
if (TempHash.bestMove.src == -1) {
return UnknownValue;
} else {
HashMove = TempHash.bestMove;
return ObsoleteValue;
}
}
return UnknownValue;
}
private void recordHash(MoveNode hashMove, int hashFlag, int value, int depth) {
HashRecord tempHash;
tempHash = hashList[(int) (activeBoard.getZobristKey() & hashMask)];
if ((tempHash.flag != 0) && tempHash.depth > depth) {
return;
}
tempHash.zobristLock = activeBoard.getZobristLock();
tempHash.flag = hashFlag;
tempHash.depth = depth;
tempHash.value = value;
if (tempHash.value > CCEvalue.MaxValue - ActiveBoard.MAX_MOVE_NUM / 2) {
tempHash.value += activeBoard.getMoveNum() - startMove;
} else if (tempHash.value < ActiveBoard.MAX_MOVE_NUM / 2 - CCEvalue.MaxValue) {
tempHash.value -= activeBoard.getMoveNum() - startMove;
}
tempHash.bestMove = hashMove;
hashList[(int) (activeBoard.getZobristKey() & hashMask)] = tempHash;
}
private void GetPvLine() {
HashRecord tempHash;
tempHash = hashList[(int) (activeBoard.getZobristKey() & hashMask)];
if ((tempHash.flag != 0) && tempHash.bestMove.src != -1 && tempHash.zobristLock == activeBoard.getZobristLock()) {
pvLine[pvLineNum] = tempHash.bestMove;
activeBoard.movePiece(tempHash.bestMove);
pvLineNum++;
if (activeBoard.isLoop(1) == 0) {// ???????
GetPvLine();
}
activeBoard.undoMove();
}
}
// record example: i0h0 4 rnbakabr1/9/4c1c1n/p1p1N3p/9/6p2/P1P1P3P/2N1C2C1/9/R1BAKAB1R w - - 0 7
// i0h0:Move , 4: evalue, other: FEN String
public void loadBook(final String bookFile) throws IOException {// 开局库
int bookMoveNum, value, i;
BufferedReader inFile;
String lineStr;
// LineStr;
int index = 0;
MoveNode bookMove = new MoveNode();// note:wrong
HashRecord tempHash;
ActiveBoard BookPos = new ActiveBoard();// note:wrong
InputStream is = SearchEngine.class.getResourceAsStream(bookFile);
inFile = new BufferedReader(new InputStreamReader(is), 1024 * 1024);
if (inFile == null)
return;
bookPosNum = 0;
int recordedToHash = 0;// for test
while ((lineStr = inFile.readLine()) != null) {
bookMove = new MoveNode();
bookMove.move(lineStr);
index = 0;
if (bookMove.src != -1) {
index += 5;
while (lineStr.charAt(index) == ' ') {
index++;
}
BookPos.loadFen(lineStr.substring(index));
long tmpZob = BookPos.getZobristKey();
int tmp = BookPos.getSquares(bookMove.src);// for test
if (BookPos.getSquares(bookMove.src) != 0) {
tempHash = hashList[(int) (BookPos.getZobristKey() & hashMask)];
if (tempHash.flag != 0) {// 占用
if (tempHash.zobristLock == BookPos.getZobristLock()) {// 局面相同
if ((tempHash.flag & BookMulti) != 0) {// 多个相同走法
bookMoveNum = bookList[tempHash.value].moveNum;
if (bookMoveNum < MaxBookMove) {
bookList[tempHash.value].moveList[bookMoveNum] = bookMove;
bookList[tempHash.value].moveNum++;
recordedToHash++;// for test
}
} else {
if (bookPosNum < maxBookPos) {
tempHash.flag = BookMulti;
bookList[bookPosNum] = new BookRecord();
bookList[bookPosNum].moveNum = 2;
bookList[bookPosNum].moveList[0] = tempHash.bestMove;
bookList[bookPosNum].moveList[1] = bookMove;
tempHash.value = bookPosNum;
bookPosNum++;
hashList[(int) (BookPos.getZobristKey() & hashMask)] = tempHash;
recordedToHash++;// for test
}
}
}
} else {
tempHash.zobristLock = BookPos.getZobristLock();
tempHash.flag = BookUnique;
tempHash.depth = 0;
tempHash.value = 0;
tempHash.bestMove = bookMove;
hashList[(int) (BookPos.getZobristKey() & hashMask)] = tempHash;
recordedToHash++;
}
}
}
}
inFile.close();
}
// End History and Hash Tables Procedures
// Begin Search Procedures
// Search Procedures
private int RAdapt(int depth) {
// 根据不同情况来调整R值的做法,称为“适应性空着裁剪”(Adaptive Null-Move Pruning),
// 它首先由Ernst Heinz发表在1999年的ICCA杂志上。其内容可以概括为
// a. 深度小于或等于6时,用R = 2的空着裁剪进行搜索
// b. 深度大于8时,用R = 3;
// c. 深度是6或7时,如果每方棋子都大于或等于3个,则用 R = 3,否则用 R = 2。
if (depth <= 6) {
return 2;
} else if (depth <= 8) {
return activeBoard.getEvalue(0) < CCEvalue.EndgameMargin || activeBoard.getEvalue(1) < CCEvalue.EndgameMargin ? 2 : 3;
} else {
return 3;
}
}
private int quiesc(int Alpha, int Beta) {// 只对吃子
int i, bestValue, thisAlpha, thisValue;
boolean inCheck, movable;
MoveNode thisMove;
SortedMoveNodes moveSort = new SortedMoveNodes();
// 1. Return if a Loop position is detected
if (activeBoard.getMoveNum() > startMove) {
thisValue = activeBoard.isLoop(1);// note:wrong
if (thisValue != 0) {
return activeBoard.loopValue(thisValue, activeBoard.getMoveNum() - startMove);
}
}
// 2. Initialize
inCheck = activeBoard.lastMove().chk;
movable = false;
bestValue = -CCEvalue.MaxValue;
thisAlpha = Alpha;
// 3. For non-check position, try Null-Move before generate moves
if (!inCheck) {
movable = true;
thisValue = activeBoard.evaluation() + (selectMask != 0 ? (rand.nextInt() & selectMask) - (rand.nextInt() & selectMask) : 0);
if (thisValue > bestValue) {
if (thisValue >= Beta) {
quiescNullNodes++;
return thisValue;
}
bestValue = thisValue;
if (thisValue > thisAlpha) {
thisAlpha = thisValue;
}
}
}
// 4. Generate and sort all moves for check position, or capture moves for non-check position
moveSort.GenMoves(activeBoard, inCheck ? histTab : null);
for (i = 0; i < moveSort.MoveNum; i++) {
moveSort.BubbleSortMax(i);
thisMove = moveSort.MoveList[i];
if (inCheck || activeBoard.narrowCap(thisMove, wideQuiesc)) {
if (activeBoard.movePiece(thisMove)) {
movable = true;
// 5. Call Quiescence Alpha-Beta Search for every leagal moves
thisValue = -quiesc(-Beta, -thisAlpha);
// for debug
String tmpStr = "";
for (int k = 0; k < activeBoard.getMoveNum(); k++) {
tmpStr = tmpStr + activeBoard.moveList[k] + ",";
}
tmpStr = tmpStr + "Value:" + thisValue + "\n";
activeBoard.undoMove();
// 6. Select the best move for Fail-Soft Alpha-Beta
if (thisValue > bestValue) {
if (thisValue >= Beta) {
quiescBetaNodes++;
return thisValue;
}
bestValue = thisValue;
if (thisValue > thisAlpha) {
thisAlpha = thisValue;
}
}
}
}
}
// 7. Return a loose value if no leagal moves
if (!movable) {
quiescMateNodes++;
return activeBoard.getMoveNum() - startMove - CCEvalue.MaxValue;
}
if (thisAlpha > Alpha) {
quiescPvNodes++;
} else {
quiescAlphaNodes++;
}
return bestValue;
}
// 搜索算法,包括
// 1. Hash Table;
// 2. 超出边界的Alpha-Beta搜索
// 3. 适应性空着裁减
// 4. 选择性扩展
// 5. 使用Hash Table的迭代加深;
// 6. 杀着表
// 7. 将军扩展
// 8. 主要变例搜索
// 9. 历史启发表
private int search(KillerStruct KillerTab, int Alpha, int Beta, int Depth) {
int i, j, thisDepth, futPrune, hashFlag;
boolean inCheck, movable, searched;
int hashValue, bestValue, thisAlpha, thisValue, futValue = 0;
MoveNode thisMove = new MoveNode();
MoveNode bestMove = new MoveNode();
SortedMoveNodes moveSort = new SortedMoveNodes();
KillerStruct subKillerTab = new KillerStruct();
// Alpha-Beta Search:
// 1. 重复循环检测
if (activeBoard.getMoveNum() > startMove) {
thisValue = activeBoard.isLoop(1);//
if (thisValue != 0) {
return activeBoard.loopValue(thisValue, activeBoard.getMoveNum() - startMove);
}
}
// 2. 是否需要扩展
inCheck = activeBoard.lastMove().chk;
thisDepth = Depth;
if (inCheck) {
thisDepth++;
}
// 3. Return if hit the Hash Table
hashValue = probeHash(thisMove, Alpha, Beta, thisDepth);
if (hashValue >= -CCEvalue.MaxValue && hashValue <= CCEvalue.MaxValue) {
return hashValue;
}
// 4. Return if interrupted or timeout
if (interrupt()) {
return 0;
}
;
// 5. 正式开始搜索:
if (thisDepth > 0) {
movable = false;
searched = false;
bestValue = -CCEvalue.MaxValue;
thisAlpha = Alpha;
hashFlag = HashAlpha;
subKillerTab.moveNum = 0;
// 6. 是否需要空着裁减与冒进?
futPrune = 0;
if (futility) {
// 冒进
if (thisDepth == 3 && !inCheck && activeBoard.evaluation() + CCEvalue.RazorMargin <= Alpha
&& activeBoard.getEvalue(activeBoard.getOppPlayer()) > CCEvalue.EndgameMargin) {
thisDepth = 2;
}
if (thisDepth < 3) {
futValue = activeBoard.evaluation() + (thisDepth == 2 ? CCEvalue.ExtFutMargin : CCEvalue.SelFutMargin);
if (!inCheck && futValue <= Alpha) {
futPrune = thisDepth;
bestValue = futValue;
}
}
}
// 7. 空着裁减
if (nullMove && futPrune == 0 && !inCheck && activeBoard.lastMove().src != -1
&& activeBoard.getEvalue(activeBoard.getPlayer()) > CCEvalue.EndgameMargin) {
activeBoard.nullMove();
thisValue = -search(subKillerTab, -Beta, 1 - Beta, thisDepth - 1 - RAdapt(thisDepth));
activeBoard.undoNull();
if (thisValue >= Beta) {
nullNodes++;
return Beta;
}
}
// 8. 搜索命中Hash Table
if (hashValue == ObsoleteValue) {
// System.out.println(ThisMove.Coord());
if (activeBoard.movePiece(thisMove)) {
movable = true;
if (futPrune != 0 && -activeBoard.evaluation() + (futPrune == 2 ? CCEvalue.ExtFutMargin : CCEvalue.SelFutMargin) <= Alpha
&& activeBoard.lastMove().chk) {
activeBoard.undoMove();
} else {
thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1);
searched = true;
activeBoard.undoMove();
if (stop) {
return 0;
}
if (thisValue > bestValue) {
if (thisValue >= Beta) {
histTab[thisMove.src][thisMove.dst] += 1 << (thisDepth - 1);
recordHash(thisMove, HashBeta, Beta, thisDepth);
hashNodes++;
return thisValue;
}
bestValue = thisValue;
bestMove = thisMove;
if (thisValue > thisAlpha) {
thisAlpha = thisValue;
hashFlag = HashPv;
if (activeBoard.getMoveNum() == startMove) {
recordHash(bestMove, hashFlag, thisAlpha, thisDepth);
popInfo(thisAlpha, Depth);
}
}
}
}
}
}
// 9. 命中杀着表
for (i = 0; i < KillerTab.moveNum; i++) {
thisMove = KillerTab.moveList[i];
if (activeBoard.leagalMove(thisMove)) {
if (activeBoard.movePiece(thisMove)) {
movable = true;
if (futPrune != 0 && -activeBoard.evaluation() + (futPrune == 2 ? CCEvalue.ExtFutMargin : CCEvalue.SelFutMargin) <= Alpha
&& activeBoard.lastMove().chk) {
activeBoard.undoMove();
} else {
if (searched) {
thisValue = -search(subKillerTab, -thisAlpha - 1, -thisAlpha, thisDepth - 1);
if (thisValue > thisAlpha && thisValue < Beta) {
thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1);
}
} else {
thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1);
searched = true;
}
activeBoard.undoMove();
if (stop) {
return 0;
}
if (thisValue > bestValue) {
if (thisValue >= Beta) {
killerNodes++;
histTab[thisMove.src][thisMove.dst] += 1 << (thisDepth - 1);
recordHash(thisMove, HashBeta, Beta, thisDepth);
return thisValue;
}
bestValue = thisValue;
bestMove = thisMove;
if (thisValue > thisAlpha) {
thisAlpha = thisValue;
hashFlag = HashPv;
if (activeBoard.getMoveNum() == startMove) {
recordHash(bestMove, hashFlag, thisAlpha, thisDepth);
popInfo(thisAlpha, Depth);
}
}
}
}
}
}
}
// 10. 生成当前所有合法着法并排序
moveSort.GenMoves(activeBoard, histTab);
nodes += moveSort.MoveNum;
for (i = 0; i < moveSort.MoveNum; i++) {
moveSort.BubbleSortMax(i);
thisMove = moveSort.MoveList[i];
if (activeBoard.movePiece(thisMove)) {
movable = true;
// 11. Alpha-Beta Search
if (futPrune != 0 && -activeBoard.evaluation() + (futPrune == 2 ? CCEvalue.ExtFutMargin : CCEvalue.SelFutMargin) <= Alpha
&& activeBoard.lastMove().chk) {
activeBoard.undoMove();
} else {
if (searched) {
thisValue = -search(subKillerTab, -thisAlpha - 1, -thisAlpha, thisDepth - 1);
if (thisValue > thisAlpha && thisValue < Beta) {
thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1);
}
} else {
thisValue = -search(subKillerTab, -Beta, -thisAlpha, thisDepth - 1);
searched = true;
}
activeBoard.undoMove();
if (stop) {
return 0;
}
// 12. 超出边界Alpha-Beta
if (thisValue > bestValue) {
if (thisValue >= Beta) {
betaNodes++;
histTab[thisMove.src][thisMove.dst] += 1 << (thisDepth - 1);
recordHash(thisMove, HashBeta, Beta, thisDepth);
if (KillerTab.moveNum < MaxKiller) {
KillerTab.moveList[KillerTab.moveNum] = thisMove;
KillerTab.moveNum++;
}
return thisValue;
}
bestValue = thisValue;
bestMove = thisMove;
if (thisValue > thisAlpha) {
thisAlpha = thisValue;
hashFlag = HashPv;
if (activeBoard.getMoveNum() == startMove) {
recordHash(bestMove, hashFlag, thisAlpha, thisDepth);
popInfo(thisAlpha, Depth);
}
}
}
}
}
}
// 13.无路可走,输棋!
if (!movable) {
mateNodes++;
return activeBoard.getMoveNum() - startMove - CCEvalue.MaxValue;
}
// 14. Update History Tables and Hash Tables
if (futPrune != 0 && bestValue == futValue) {
bestMove.src = bestMove.dst = -1;
}
if ((hashFlag & HashAlpha) != 0) {
alphaNodes++;
} else {
pvNodes++;
histTab[bestMove.src][bestMove.dst] += 1 << (thisDepth - 1);
if (KillerTab.moveNum < MaxKiller) {
KillerTab.moveList[KillerTab.moveNum] = bestMove;
KillerTab.moveNum++;
}
}
recordHash(bestMove, hashFlag, thisAlpha, thisDepth);
return bestValue;
// 15. 静态搜索
} else {
thisValue = quiesc(Alpha, Beta);
thisMove.src = bestMove.dst = -1;
if (thisValue <= Alpha) {
recordHash(thisMove, HashAlpha, Alpha, 0);
} else if (thisValue >= Beta) {
recordHash(thisMove, HashBeta, Beta, 0);
} else {
recordHash(thisMove, HashPv, thisValue, 0);
}
leafNodes++;
return thisValue;
}
}
// End Search Procedures
// Start Control Procedures
private boolean interrupt() {
if (stop)
return true;
return false;
}
public void stopSearch() {
this.stop = true;
}
private void popInfo(int value, int depth) {
int i, quiescNodes, nps, npsQuiesc;
char[] moveStr;
long tempLong;
if (depth != 0) {
String logString = "PVNode: depth=" + depth + ",score=" + value + ",Move: " + "\n";
pvLineNum = 0;
GetPvLine();
for (i = 0; i < pvLineNum; i++) {
moveStr = pvLine[i].location();
logString += " " + String.copyValueOf(moveStr) + "\n";
}
if (ponder && System.currentTimeMillis() > minTimer && value + CCEvalue.InadequateValue > lastScore) {
stop = true;
}
if (log.isDebugEnabled())
log.debug(logString);
}
}
public void setupControl(int depth, long proper, long limit) {
this.depth = depth;
this.properTimer = proper;
this.limitTimer = limit;
}
public void control() throws LostException {
// int Depth, int ProperTimer, int LimitTimer) throws IOException {
int i, MoveNum, ThisValue;
char[] MoveStr;
stop = false;
bestMove = null;
MoveNode ThisMove = new MoveNode(), UniqueMove = new MoveNode();
HashRecord TempHash;
SortedMoveNodes MoveSort = new SortedMoveNodes();
KillerStruct SubKillerTab = new KillerStruct();
// The Computer Thinking Procedure:
// 1. Search the moveNodes in Book
int tmpInt = (int) (activeBoard.getZobristKey() & hashMask);
TempHash = hashList[(int) (activeBoard.getZobristKey() & hashMask)];
if (TempHash.flag != 0 && TempHash.zobristLock == activeBoard.getZobristLock()) {
if ((TempHash.flag == BookUnique)) {
MoveStr = TempHash.bestMove.location();
bestMove = new MoveNode(String.copyValueOf(MoveStr));
return;
} else if (TempHash.flag == BookMulti) {
ThisValue = 0;
i = Math.abs(rand.nextInt()) % (bookList[TempHash.value].moveNum);
MoveStr = bookList[TempHash.value].moveList[i].location();
bestMove = new MoveNode(String.copyValueOf(MoveStr));
return;
}
}
// 2. Initailize Timer and other Counter
startTimer = System.currentTimeMillis();
minTimer = startTimer + (properTimer >> 1);
maxTimer = properTimer << 1;
if (maxTimer > limitTimer) {
maxTimer = limitTimer;
}
maxTimer += startTimer;
stop = false;
startMove = activeBoard.getMoveNum();
nodes = nullNodes = hashNodes = killerNodes = betaNodes = pvNodes = alphaNodes = mateNodes = leafNodes = 0;
quiescNullNodes = quiescBetaNodes = quiescPvNodes = quiescAlphaNodes = quiescMateNodes = 0;
hitBeta = hitPv = hitAlpha = 0;
pvLineNum = 0;
// 3. 不合法:主动送将
if (activeBoard.checked(activeBoard.getOppPlayer())) {
return;
}
ThisValue = activeBoard.isLoop(3);
if (ThisValue != 0) {
throw new LostException("不可常捉!");
}
if (activeBoard.getMoveNum() > ActiveBoard.MAX_CONSECUTIVE_MOVES) {
throw new LostException("最大步数,和棋!");
}
// 4. 测试所有应将的着法
if (activeBoard.lastMove().chk) {
MoveNum = 0;
MoveSort.GenMoves(activeBoard, histTab);
for (i = 0; i < MoveSort.MoveNum; i++) {
ThisMove = MoveSort.MoveList[i];
if (activeBoard.movePiece(ThisMove)) {
activeBoard.undoMove();
UniqueMove = ThisMove;
MoveNum++;
if (MoveNum > 1) {
break;
}
}
}
if (MoveNum == 0) {
if (log.isDebugEnabled())
log.debug("score " + -CCEvalue.MaxValue + "\n");
}
if (MoveNum == 1) {
MoveStr = UniqueMove.location();
if (log.isDebugEnabled())
log.debug("bestmove " + String.copyValueOf(MoveStr) + "\n");
bestMove = new MoveNode(String.copyValueOf(MoveStr));
return;
}
}
// 5. 迭代加深
if (depth == 0) {
return;
}
for (i = 4; i <= depth; i++) {
if (log.isDebugEnabled())
log.debug("info depth " + i + "\n");
SubKillerTab.moveNum = 0;
ThisValue = search(SubKillerTab, -CCEvalue.MaxValue, CCEvalue.MaxValue, i);
popInfo(ThisValue, depth);
if (stop) {
break;
}
lastScore = ThisValue;
// 6. Stop thinking if timeout or solved
if (!ponder && System.currentTimeMillis() > minTimer) {
break;
}
if (ThisValue > CCEvalue.MaxValue - ActiveBoard.MAX_MOVE_NUM / 2 || ThisValue < ActiveBoard.MAX_MOVE_NUM / 2 - CCEvalue.MaxValue) {
break;
}
}
// 7. 得到最佳着法及其线路
if (pvLineNum != 0) {
MoveStr = pvLine[0].location();
bestMove = new MoveNode(String.copyValueOf(MoveStr));
if (log.isDebugEnabled())
log.debug("bestmove: " + String.copyValueOf(MoveStr) + "\n");
if (pvLineNum > 1) {
MoveStr = pvLine[1].location();
if (log.isDebugEnabled())
log.debug("ponder:" + String.copyValueOf(MoveStr) + "\n");
}
} else {
if (log.isDebugEnabled())
log.info("score:" + ThisValue);
}
}
// End Control Procedures
public MoveNode getBestMove() throws LostException {
control();
MoveNode retVal = bestMove;
return bestMove;
}
// for test
public static void main(String[] args) throws IOException {
long start, end;
RandomAccessFile testResult;
log.info("begin search, please wait......");
start = System.currentTimeMillis();
int steps = 8;
ActiveBoard cp = new ActiveBoard();
String FenStr = "1c1k1abR1/4a4/4b4/6NP1/4P4/2C1n1P2/r5p2/4B4/4A4/2BAK4 w - - 0 20";
cp.loadFen(FenStr);
SearchEngine searchMove = new SearchEngine(cp);
searchMove.loadBook("/data/book.txt");
// System.out.println(cp.AllPieces);
// searchMove.Control(steps,CLOCK_M*2,CLOCK_M*4);
log.info(FenStr);
end = System.currentTimeMillis();
long second = (end - start) / 1000;
if (second == 0)
second = 1;
long minutes = second / 60;
URL url = SearchEngine.class.getResource("/data/test.log");
String uri = url.toString().replaceAll("file:/", "");
testResult = new RandomAccessFile(uri, "rw");
Calendar c = Calendar.getInstance();
String tmpStr = "\n\n********************************************************************\n";
tmpStr = tmpStr + "[Test Time] " + c.getTime() + "\n";
tmpStr = tmpStr + "[Fen String] " + FenStr + "\n";
tmpStr = tmpStr + " Deep =" + steps + ",Used Time:" + minutes + ":" + second % 60 + "\n";
tmpStr = tmpStr + "[Nodes] " + searchMove.nodes + "\n";
tmpStr = tmpStr + "[AlphaNodes] " + searchMove.alphaNodes + "\n";
tmpStr = tmpStr + "[BetaNodes] " + searchMove.betaNodes + "\n";
tmpStr = tmpStr + "[HashNodes] " + searchMove.hashNodes + "\n";
tmpStr = tmpStr + "[KillerNodes] " + searchMove.killerNodes + "\n";
tmpStr = tmpStr + "[LeafNodes] " + searchMove.leafNodes + "\n";
tmpStr = tmpStr + "[NullNodes] " + searchMove.nullNodes + "\n";
tmpStr = tmpStr + "[QuiescAlphaNodes] " + searchMove.quiescAlphaNodes + "\n";
tmpStr = tmpStr + "[QuiescBetaNodesNodes] " + searchMove.quiescBetaNodes + "\n";
tmpStr = tmpStr + "[QuiescMateNodes] " + searchMove.quiescMateNodes + "\n";
tmpStr = tmpStr + "[QuiescNullNodes] " + searchMove.quiescNullNodes + "\n";
tmpStr = tmpStr + "[QuiescPvNodes] " + searchMove.quiescPvNodes + "\n";
tmpStr = tmpStr + "[HitAlpha] " + searchMove.hitAlpha + "\n";
tmpStr = tmpStr + "[HitBeta] " + searchMove.hitBeta + "\n";
tmpStr = tmpStr + "[HitPv] " + searchMove.hitPv + "\n";
tmpStr = tmpStr + "[BetaNode] " + searchMove.betaNodes + "\n";
tmpStr = tmpStr + "[BPS] " + searchMove.nodes / second;
int count = 0;
for (int i = 1; i < searchMove.hashList.length; i++) {
if (searchMove.hashList[i].flag != 0)
count++;
}
tmpStr = tmpStr + "[HashTable] length=" + searchMove.hashList.length + ", occupied=" + count;
testResult.seek(testResult.length());
testResult.writeBytes(tmpStr);
testResult.close();
System.out.println(tmpStr);
searchMove = null;
cp = null;
System.gc();
}
}
class BookRecord {
int moveNum;
MoveNode[] moveList;// [MaxBookMove];
public BookRecord() {
moveList = new MoveNode[SearchEngine.MaxBookMove];
moveNum = 0;
}
};
class KillerStruct {
int moveNum;
MoveNode[] moveList;// [MaxKiller];
public KillerStruct() {
moveList = new MoveNode[SearchEngine.MaxKiller];
for (int i = 0; i < SearchEngine.MaxKiller; i++)
moveList[i] = new MoveNode();
moveNum = 0;
}
};
class HashRecord {
public HashRecord() {
flag = 0;
depth = 0;
value = 0;
zobristLock = 0;
bestMove = new MoveNode();
}
long zobristLock;
int flag, depth;
int value;
MoveNode bestMove;
};
| dyzhxsl3897/goliveiptv | lobby/src/main/java/com/zhongdan/lobby/bl/ai/chinesechess/engine/SearchEngine.java | Java | apache-2.0 | 29,025 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.mail;
import javax.activation.DataHandler;
import javax.activation.FileDataSource;
import org.apache.camel.Endpoint;
import org.apache.camel.Exchange;
import org.apache.camel.Message;
import org.apache.camel.Producer;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.mock.MockEndpoint;
import org.apache.camel.test.junit4.CamelTestSupport;
import org.junit.Test;
import org.jvnet.mock_javamail.Mailbox;
/**
*
*/
public class MailSplitAttachmentsTest extends CamelTestSupport {
@Test
public void testSplitAttachments() throws Exception {
// clear mailbox
Mailbox.clearAll();
// create an exchange with a normal body and attachment to be produced as email
Endpoint endpoint = context.getEndpoint("smtp://james@mymailserver.com?password=secret");
// create the exchange with the mail message that is multipart with a file and a Hello World text/plain message.
Exchange exchange = endpoint.createExchange();
Message in = exchange.getIn();
in.setBody("Hello World");
in.addAttachment("logo.jpeg", new DataHandler(new FileDataSource("src/test/data/logo.jpeg")));
in.addAttachment("license.txt", new DataHandler(new FileDataSource("src/main/resources/META-INF/LICENSE.txt")));
Producer producer = endpoint.createProducer();
producer.start();
producer.process(exchange);
Thread.sleep(2000);
MockEndpoint mock = getMockEndpoint("mock:split");
mock.expectedMessageCount(2);
mock.assertIsSatisfied();
Message first = mock.getReceivedExchanges().get(0).getIn();
Message second = mock.getReceivedExchanges().get(1).getIn();
assertEquals(1, first.getAttachments().size());
assertEquals("logo.jpeg", first.getAttachments().keySet().iterator().next());
assertEquals(1, second.getAttachments().size());
assertEquals("license.txt", second.getAttachments().keySet().iterator().next());
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
// START SNIPPET: e1
from("pop3://james@mymailserver.com?password=secret&consumer.delay=1000")
.to("log:email")
// use the SplitAttachmentsExpression which will split the message per attachment
.split(new SplitAttachmentsExpression())
// each message going to this mock has a single attachment
.to("mock:split")
.end();
// END SNIPPET: e1
}
};
}
}
| aaronwalker/camel | components/camel-mail/src/test/java/org/apache/camel/component/mail/MailSplitAttachmentsTest.java | Java | apache-2.0 | 3,594 |
/*
* Copyright 2017 Mahesh Gaya
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.drake.research.android.lipswithmaps.adapter;
import android.content.Context;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
import java.util.List;
import butterknife.BindView;
import butterknife.ButterKnife;
import edu.drake.research.android.lipswithmaps.R;
import edu.drake.research.lipswithmaps.WifiItem;
/**
* Created by Mahesh Gaya on 1/15/17.
*/
public class WifiAdapter extends RecyclerView.Adapter<WifiAdapter.ViewHolder> {
private List<WifiItem> mWifiItemList;
public WifiAdapter(List<WifiItem> wifiItemList){
this.mWifiItemList = wifiItemList;
}
@Override
public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View rootView = LayoutInflater.from(parent.getContext())
.inflate(R.layout.list_wifi_item, parent, false);
return new ViewHolder(rootView);
}
@Override
public void onBindViewHolder(ViewHolder holder, int position) {
WifiItem wifiItem = mWifiItemList.get(position);
holder. levelTextView.setText(String.valueOf(wifiItem.getLevel()));
holder.ssidTextView.setText(wifiItem.getSsid());
holder.bssidTextView.setText(wifiItem.getBssid());
}
@Override
public int getItemCount() {
return mWifiItemList.size();
}
public class ViewHolder extends RecyclerView.ViewHolder{
@BindView(R.id.textview_wifi_level)TextView levelTextView;
@BindView(R.id.textview_wifi_ssid)TextView ssidTextView;
@BindView(R.id.textview_wifi_bssid)TextView bssidTextView;
public ViewHolder(View itemView) {
super(itemView);
ButterKnife.bind(this, itemView);
}
}
}
| maheshgaya/lips-with-maps | android/src/main/java/edu/drake/research/android/lipswithmaps/adapter/WifiAdapter.java | Java | apache-2.0 | 2,440 |
import Vue from 'vue';
import axios from 'axios';
import VueAxios from 'vue-axios';
Vue.use(VueAxios, axios);
let ajax = (options) => {
let p = new Promise(function(resolve, reject) {
Vue.axios(options).catch(err => {
if(err.code === 401) {
//未登录
login().catch(err => reject(err))
.then(() => ajax(options))
.catch(err => reject(err))
.then(data => resolve(data));
}
}).then(data => resolve(data)).finally(() => {
});
})
return p;
};
ajax.decorator = function(promiseFn, {locked, animated}) {
}
| dgmpk/vue-music-app | src/assets/js/request.js | JavaScript | apache-2.0 | 659 |
import requests
class Client(object):
def __init__(self, tornado_server):
self.tornado_server = tornado_server
@property
def base_url(self):
return "http://localhost:{}/api/v1".format(self.tornado_server.port)
def request(self, method, url, **kwargs):
headers = {}
if method.lower() in ("put", "post"):
headers["Content-type"] = "application/json"
return requests.request(
method, self.base_url + url,
headers=headers, **kwargs
)
def get(self, url, **kwargs):
return self.request("GET", url, **kwargs)
def post(self, url, **kwargs):
return self.request("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.request("DELETE", url, **kwargs)
def create(self, url, **kwargs):
return self.post(url, data=json.dumps(kwargs))
def update(self, url, **kwargs):
return self.put(url, data=json.dumps(kwargs)) | dropbox/notouch | tests/api_tests/util.py | Python | apache-2.0 | 1,077 |
package com.braulio.cassule.designfocus.fragment;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.braulio.cassule.designfocus.ui.PostViewHolder;
import com.braulio.cassule.designfocus.R;
import com.braulio.cassule.designfocus.activity.PostDetailActivity;
import com.braulio.cassule.designfocus.model.Post;
import com.firebase.ui.database.FirebaseRecyclerAdapter;
import com.google.firebase.auth.FirebaseAuth;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.MutableData;
import com.google.firebase.database.Query;
import com.google.firebase.database.Transaction;
import com.squareup.picasso.Picasso;
public abstract class PostListFragment extends Fragment {
private static final String TAG = "PostListFragment";
// [START define_database_reference]
private DatabaseReference mDatabase;
// [END define_database_reference]
private FirebaseRecyclerAdapter<Post, PostViewHolder> mAdapter;
private RecyclerView mRecycler;
private LinearLayoutManager mManager;
public PostListFragment() {}
@Override
public View onCreateView (LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
super.onCreateView(inflater, container, savedInstanceState);
View rootView = inflater.inflate(R.layout.fragment_all_posts, container, false);
// [START create_database_reference]
mDatabase = FirebaseDatabase.getInstance().getReference();
// [END create_database_reference]
mRecycler = (RecyclerView) rootView.findViewById(R.id.messages_list);
mRecycler.setHasFixedSize(true);
return rootView;
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
// Set up Layout Manager, reverse layout
mManager = new LinearLayoutManager(getActivity());
mManager.setReverseLayout(true);
mManager.setStackFromEnd(true);
mRecycler.setLayoutManager(mManager);
// Set up FirebaseRecyclerAdapter with the Query
Query postsQuery = getQuery(mDatabase);
mAdapter = new FirebaseRecyclerAdapter<Post, PostViewHolder>(Post.class, R.layout.item_post,
PostViewHolder.class, postsQuery) {
@Override
protected void populateViewHolder(final PostViewHolder viewHolder, final Post model, final int position) {
final DatabaseReference postRef = getRef(position);
// Set click listener for the whole post view
final String postKey = postRef.getKey();
viewHolder.itemView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// Launch PostDetailActivity
Intent intent = new Intent(getActivity(), PostDetailActivity.class);
intent.putExtra(PostDetailActivity.EXTRA_POST_KEY, postKey);
startActivity(intent);
}
});
if (model.image == null){
return;
} else {
Picasso.with(getContext()).load(Uri.parse(model.image)).fit().centerCrop().into(viewHolder.imageView);
}
// Determine if the current user has liked this post and set UI accordingly
if (model.stars.containsKey(getUid())) {
viewHolder.starView.setImageResource(R.drawable.ic_toggle_star_fill_24);
} else {
viewHolder.starView.setImageResource(R.drawable.ic_toggle_star_outline_24);
}
// Bind Post to ViewHolder, setting OnClickListener for the star button
viewHolder.bindToPost(model, new View.OnClickListener() {
@Override
public void onClick(View starView) {
// Need to write to both places the post is stored
DatabaseReference globalPostRef = mDatabase.child("posts").child(postRef.getKey());
DatabaseReference userPostRef = mDatabase.child("user-posts").child(model.uid).child(postRef.getKey());
// Run two transactions
onStarClicked(globalPostRef);
onStarClicked(userPostRef);
}
});
}
};
mRecycler.setAdapter(mAdapter);
}
// [START post_stars_transaction]
private void onStarClicked(DatabaseReference postRef) {
postRef.runTransaction(new Transaction.Handler() {
@Override
public Transaction.Result doTransaction(MutableData mutableData) {
Post p = mutableData.getValue(Post.class);
if (p == null) {
return Transaction.success(mutableData);
}
if (p.stars.containsKey(getUid())) {
// Unstar the post and remove self from stars
p.starCount = p.starCount - 1;
p.stars.remove(getUid());
} else {
// Star the post and add self to stars
p.starCount = p.starCount + 1;
p.stars.put(getUid(), true);
}
// Set value and report transaction success
mutableData.setValue(p);
return Transaction.success(mutableData);
}
@Override
public void onComplete(DatabaseError databaseError, boolean b,
DataSnapshot dataSnapshot) {
// Transaction completed
Log.d(TAG, "postTransaction:onComplete:" + databaseError);
}
});
}
// [END post_stars_transaction]
@Override
public void onDestroy() {
super.onDestroy();
if (mAdapter != null) {
mAdapter.cleanup();
}
}
public String getUid() {
return FirebaseAuth.getInstance().getCurrentUser().getUid();
}
public abstract Query getQuery(DatabaseReference databaseReference);
}
| braulio94/Quadro | app/src/main/java/com/braulio/cassule/designfocus/fragment/PostListFragment.java | Java | apache-2.0 | 6,726 |
using System;
namespace Com.Koushikdutta.Async.Wrapper {
partial interface IAsyncSocketWrapper {
new void Close();
}
}
| thefactory/AndroidAsync-Sharp | Additions/IAsyncSocketWrapper.cs | C# | apache-2.0 | 141 |
/*global Phaser, Assets, Screen*/
var Player = function (game) {
"use strict";
this.game = game;
this.sprite = null;
};
Player.DISTANCE_TO_BORDER = 50;
Player.VELOCITY_X = 300;
Player.SPRITE_ANCHOR_X = 0.5;
Player.SPRITE_ANCHOR_Y = 0.5;
Player.prototype = {
create: function () {
"use strict";
var y = Screen.HEIGHT - Player.DISTANCE_TO_BORDER;
this.sprite = this.game.add.sprite(this.game.world.centerX, y,
Assets.PLAYER_SPRITE_KEY);
this.sprite.anchor.set(Player.SPRITE_ANCHOR_X, Player.SPRITE_ANCHOR_Y);
this.game.physics.enable(this.sprite, Phaser.Physics.ARCADE);
},
update: function () {
"use strict";
if (this.game.input.keyboard.isDown(Phaser.Keyboard.LEFT)) {
this.sprite.body.velocity.x = -Player.VELOCITY_X;
} else if (this.game.input.keyboard.isDown(Phaser.Keyboard.RIGHT)) {
this.sprite.body.velocity.x = Player.VELOCITY_X;
} else {
this.sprite.body.velocity.x = 0;
}
}
};
| fpbfabio/river-raid-remake | js/game/player.js | JavaScript | apache-2.0 | 1,075 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.query.dimension;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import it.unimi.dsi.fastutil.ints.Int2IntMap;
import it.unimi.dsi.fastutil.ints.Int2IntOpenHashMap;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.query.filter.DimFilterUtils;
import org.apache.druid.segment.DimensionSelector;
import javax.annotation.Nullable;
import java.nio.ByteBuffer;
import java.util.regex.Pattern;
/**
*/
public class RegexFilteredDimensionSpec extends BaseFilteredDimensionSpec
{
private static final byte CACHE_TYPE_ID = 0x2;
private final String pattern;
private final Pattern compiledRegex;
public RegexFilteredDimensionSpec(
@JsonProperty("delegate") DimensionSpec delegate,
@JsonProperty("pattern") String pattern //rows not matching the pattern will be discarded
)
{
super(delegate);
this.pattern = Preconditions.checkNotNull(pattern, "pattern must not be null");
this.compiledRegex = Pattern.compile(pattern);
}
@JsonProperty
public String getPattern()
{
return pattern;
}
@Override
public DimensionSelector decorate(final DimensionSelector selector)
{
if (selector == null) {
return null;
}
final int selectorCardinality = selector.getValueCardinality();
if (selectorCardinality < 0 || !selector.nameLookupPossibleInAdvance()) {
return new PredicateFilteredDimensionSelector(
selector,
new Predicate<String>()
{
@Override
public boolean apply(@Nullable String input)
{
return compiledRegex.matcher(NullHandling.nullToEmptyIfNeeded(input)).matches();
}
}
);
}
int count = 0;
final Int2IntOpenHashMap forwardMapping = new Int2IntOpenHashMap();
forwardMapping.defaultReturnValue(-1);
for (int i = 0; i < selectorCardinality; i++) {
String val = NullHandling.nullToEmptyIfNeeded(selector.lookupName(i));
if (val != null && compiledRegex.matcher(val).matches()) {
forwardMapping.put(i, count++);
}
}
final int[] reverseMapping = new int[forwardMapping.size()];
for (Int2IntMap.Entry e : forwardMapping.int2IntEntrySet()) {
reverseMapping[e.getIntValue()] = e.getIntKey();
}
return new ForwardingFilteredDimensionSelector(selector, forwardMapping, reverseMapping);
}
@Override
public byte[] getCacheKey()
{
byte[] delegateCacheKey = delegate.getCacheKey();
byte[] regexBytes = StringUtils.toUtf8(pattern);
return ByteBuffer.allocate(2 + delegateCacheKey.length + regexBytes.length)
.put(CACHE_TYPE_ID)
.put(delegateCacheKey)
.put(DimFilterUtils.STRING_SEPARATOR)
.put(regexBytes)
.array();
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RegexFilteredDimensionSpec that = (RegexFilteredDimensionSpec) o;
if (!delegate.equals(that.delegate)) {
return false;
}
return pattern.equals(that.pattern);
}
@Override
public int hashCode()
{
int result = delegate.hashCode();
result = 31 * result + pattern.hashCode();
return result;
}
@Override
public String toString()
{
return "RegexFilteredDimensionSpec{" +
"pattern='" + pattern + '\'' +
'}';
}
}
| dkhwangbo/druid | processing/src/main/java/org/apache/druid/query/dimension/RegexFilteredDimensionSpec.java | Java | apache-2.0 | 4,458 |
from django.conf.urls import patterns, url
urlpatterns = patterns('accounts.views',
url(r'^$', 'home_view', name='home'),
url(r'^login/$', 'login_view', name='login'),
url(r'^logout/$', 'logout_view', name='logout'),
url(r'^register/$', 'register_view', name='register'),
url(r'^password/$', 'password_view', name='password'),
url(r'^profile/$', 'profile_view', name='profile'),
url(r'^hello/$', 'hello_view', name='hello'),
)
| goncha/django-accounts | urls.py | Python | apache-2.0 | 449 |