repo_name stringlengths 5 108 | path stringlengths 6 333 | size stringlengths 1 6 | content stringlengths 4 977k | license stringclasses 15 values |
|---|---|---|---|---|
JKatzwinkel/bts | org.bbaw.bts.core.corpus.controller.impl/src/org/bbaw/bts/core/corpus/controller/impl/BTSTextEditorControllerContextFunction.java | 1037 | package org.bbaw.bts.core.corpus.controller.impl;
import org.bbaw.bts.core.corpus.controller.impl.partController.BTSTextEditorControllerImpl;
import org.bbaw.bts.core.corpus.controller.partController.BTSTextEditorController;
import org.eclipse.e4.core.contexts.ContextFunction;
import org.eclipse.e4.core.contexts.ContextInjectionFactory;
import org.eclipse.e4.core.contexts.IEclipseContext;
import org.eclipse.e4.ui.model.application.MApplication;
public class BTSTextEditorControllerContextFunction extends ContextFunction
{
@Override
public Object compute(IEclipseContext context)
{
System.out.println("Intitialize BTSTextEditorController");
// Add the new object to the application context
MApplication application = context.get(MApplication.class);
IEclipseContext ctx = application.getContext();
BTSTextEditorController controller = ContextInjectionFactory.make(BTSTextEditorControllerImpl.class, context);
ctx.set(BTSTextEditorController.class, controller);
return controller;
}
}
| lgpl-3.0 |
FenixEdu/fenixedu-academic | src/main/java/org/fenixedu/academic/domain/candidacyProcess/DegreeOfficePublicCandidacyHashCode.java | 12366 | /**
* Copyright © 2002 Instituto Superior Técnico
*
* This file is part of FenixEdu Academic.
*
* FenixEdu Academic is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* FenixEdu Academic is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FenixEdu Academic. If not, see <http://www.gnu.org/licenses/>.
*/
package org.fenixedu.academic.domain.candidacyProcess;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import org.apache.commons.collections.CollectionUtils;
import org.fenixedu.academic.domain.Degree;
import org.fenixedu.academic.domain.PublicCandidacyHashCode;
import org.fenixedu.academic.domain.candidacyProcess.exceptions.HashCodeForEmailAndProcessAlreadyBounded;
import org.fenixedu.academic.domain.candidacyProcess.mobility.MobilityApplicationProcess;
import org.fenixedu.academic.domain.candidacyProcess.mobility.MobilityEmailTemplate;
import org.fenixedu.academic.domain.candidacyProcess.mobility.MobilityEmailTemplateType;
import org.fenixedu.academic.domain.organizationalStructure.Unit;
import org.fenixedu.academic.domain.period.MobilityApplicationPeriod;
import org.fenixedu.academic.util.Bundle;
import org.fenixedu.bennu.core.domain.User;
import org.fenixedu.bennu.core.i18n.BundleUtil;
import org.fenixedu.commons.i18n.I18N;
public class DegreeOfficePublicCandidacyHashCode extends DegreeOfficePublicCandidacyHashCode_Base {
public DegreeOfficePublicCandidacyHashCode() {
super();
}
public boolean isAssociatedWithEmailAndCandidacyProcess(String email, Class<? extends IndividualCandidacyProcess> type,
CandidacyProcess process, List<Degree> degreeList) {
if (email.equals(this.getEmail()) && this.getIndividualCandidacyProcess() != null
&& !getIndividualCandidacyProcess().isCandidacyCancelled()
&& this.getIndividualCandidacyProcess().getClass() == type
&& this.getIndividualCandidacyProcess().getCandidacyProcess() == process) {
return CollectionUtils.disjunction(this.getIndividualCandidacyProcess().getCandidacy().getAllDegrees(), degreeList)
.isEmpty();
} else {
return false;
}
}
@Override
public boolean hasCandidacyProcess() {
return getIndividualCandidacyProcess() != null;
}
@Override
public Optional<User> getUser() {
try {
return Optional.ofNullable(getIndividualCandidacyProcess().getPersonalDetails().getPerson().getUser());
} catch (NullPointerException npe) {
return Optional.empty();
}
}
@Override
public boolean isFromDegreeOffice() {
return true;
}
/**
* Get an hash code not associated with an individual candidacy process for
* the email. Also sends an email
*
* @throws HashCodeForEmailAndProcessAlreadyBounded
*/
static public DegreeOfficePublicCandidacyHashCode getUnusedOrCreateNewHashCodeAndSendEmailForApplicationSubmissionToCandidate(
Class<? extends IndividualCandidacyProcess> individualCandidadyProcessClass, CandidacyProcess parentProcess,
String email) throws HashCodeForEmailAndProcessAlreadyBounded {
DegreeOfficePublicCandidacyHashCode hashCode =
getUnusedOrCreateNewHashCode(individualCandidadyProcessClass, parentProcess, email);
if (parentProcess.isMobility()) {
MobilityApplicationProcess mobilityApplicationProcess = (MobilityApplicationProcess) parentProcess;
MobilityApplicationPeriod candidacyPeriod = mobilityApplicationProcess.getCandidacyPeriod();
MobilityEmailTemplate emailTemplateFor =
candidacyPeriod.getEmailTemplateFor(MobilityEmailTemplateType.PREREGISTRATION);
emailTemplateFor.sendEmailFor(hashCode);
} else {
hashCode.sendEmailForApplicationSubmissionCandidacyForm(individualCandidadyProcessClass);
}
return hashCode;
}
private static final String APPLICATION_SUBMISSION_LINK = ".const.public.application.submission.link";
private static final String SEND_LINK_TO_ACCESS_SUBMISSION_FORM_SUBJECT =
".message.email.subject.send.link.to.submission.form";
private static final String SEND_LINK_TO_ACCESS_SUBMISSION_FORM_BODY = ".message.email.body.send.link.to.submission.form";
private void sendEmailForApplicationSubmissionCandidacyForm(
Class<? extends IndividualCandidacyProcess> individualCandidadyProcessClass) {
String subject =
BundleUtil.getString(Bundle.CANDIDATE, individualCandidadyProcessClass.getSimpleName()
+ SEND_LINK_TO_ACCESS_SUBMISSION_FORM_SUBJECT);
String body =
BundleUtil.getString(Bundle.CANDIDATE, individualCandidadyProcessClass.getSimpleName()
+ SEND_LINK_TO_ACCESS_SUBMISSION_FORM_BODY, Unit.getInstitutionName().getContent());
String link =
BundleUtil.getString(Bundle.CANDIDATE, individualCandidadyProcessClass.getSimpleName()
+ APPLICATION_SUBMISSION_LINK);
link = String.format(link, this.getValue(), I18N.getLocale());
body = String.format(body, link);
this.sendEmail(subject, body);
}
private static final String APPLICATION_ACCESS_LINK = ".const.public.application.access.link";
private static final String INFORM_APPLICATION_SUCCESS_SUBJECT = ".message.email.subject.application.submited";
private static final String INFORM_APPLICATION_SUCCESS_BODY = ".message.email.body.application.submited";
public void sendEmailForApplicationSuccessfullySubmited() {
CandidacyProcess parentProcess = getIndividualCandidacyProcess().getCandidacyProcess();
if (parentProcess.isMobility()) {
MobilityApplicationProcess mobilityApplicationProcess = (MobilityApplicationProcess) parentProcess;
MobilityApplicationPeriod candidacyPeriod = mobilityApplicationProcess.getCandidacyPeriod();
MobilityEmailTemplate emailTemplateFor =
candidacyPeriod.getEmailTemplateFor(MobilityEmailTemplateType.APPLICATION_SUBMISSION);
emailTemplateFor.sendEmailFor(this);
return;
}
String subject =
MessageFormat.format(BundleUtil.getString(Bundle.CANDIDATE, this.getIndividualCandidacyProcess().getClass()
.getSimpleName()
+ INFORM_APPLICATION_SUCCESS_SUBJECT), Unit.getInstitutionAcronym(), Unit.getInstitutionName()
.getContent());
String body =
MessageFormat.format(BundleUtil.getString(Bundle.CANDIDATE, this.getIndividualCandidacyProcess().getClass()
.getSimpleName()
+ INFORM_APPLICATION_SUCCESS_BODY), Unit.getInstitutionAcronym(), Unit.getInstitutionName().getContent());
String link = getDefaultPublicLink();
body =
String.format(body, new String[] { this.getIndividualCandidacyProcess().getProcessCode(), link,
this.getIndividualCandidacyProcess().getCandidacyProcess().getCandidacyEnd().toString("dd/MM/yyyy") });
sendEmail(subject, body);
}
private static final String RECOVERY_APPLICATION_SUBJECT = ".message.email.subject.recovery.access";
private static final String RECOVERY_APPLICATION_BODY = ".message.email.body.recovery.access";
public void sendEmailFoAccessLinkRecovery() {
String subject =
BundleUtil.getString(Bundle.CANDIDATE, this.getIndividualCandidacyProcess().getClass().getSimpleName()
+ RECOVERY_APPLICATION_SUBJECT);
String body =
BundleUtil.getString(Bundle.CANDIDATE, this.getIndividualCandidacyProcess().getClass().getSimpleName()
+ RECOVERY_APPLICATION_BODY);
String link = getDefaultPublicLink();
body = String.format(body, new String[] { link, this.getIndividualCandidacyProcess().getProcessCode() });
sendEmail(subject, body);
}
public String getDefaultPublicLink() {
return String.format(
BundleUtil.getString(Bundle.CANDIDATE, this.getIndividualCandidacyProcess().getClass().getSimpleName()
+ APPLICATION_ACCESS_LINK), this.getValue(), I18N.getLocale().getLanguage());
}
/**
* Get an hash code not associated with an individual candidacy process for
* the email. If the hash
*
* @throws HashCodeForEmailAndProcessAlreadyBounded
*/
static public DegreeOfficePublicCandidacyHashCode getUnusedOrCreateNewHashCode(
Class<? extends IndividualCandidacyProcess> individualCandidadyProcessClass, CandidacyProcess parentProcess,
String email) throws HashCodeForEmailAndProcessAlreadyBounded {
DegreeOfficePublicCandidacyHashCode publicCandidacyHashCode =
getPublicCandidacyHashCodeByEmailAndCandidacyProcessTypeOrNotAssociated(email, individualCandidadyProcessClass,
parentProcess);
if (publicCandidacyHashCode == null) {
return createNewHashCode(email);
} else if (!publicCandidacyHashCode.hasCandidacyProcess()) {
return publicCandidacyHashCode;
} else {
throw new HashCodeForEmailAndProcessAlreadyBounded("error.hash.code.for.email.and.process.already.bounded");
}
}
private static DegreeOfficePublicCandidacyHashCode createNewHashCode(String email) {
DegreeOfficePublicCandidacyHashCode publicCandidacyHashCode = new DegreeOfficePublicCandidacyHashCode();
publicCandidacyHashCode.setEmail(email);
publicCandidacyHashCode.setValue(UUID.randomUUID().toString());
return publicCandidacyHashCode;
}
public static DegreeOfficePublicCandidacyHashCode getPublicCandidacyHashCodeByEmailAndCandidacyProcessType(
final String email, Class<? extends IndividualCandidacyProcess> processType, CandidacyProcess process) {
return getPublicCandidacyHashCodeByEmailAndCandidacyProcessType(email, processType, process, new ArrayList<Degree>());
}
public static DegreeOfficePublicCandidacyHashCode getPublicCandidacyHashCodeByEmailAndCandidacyProcessType(
final String email, Class<? extends IndividualCandidacyProcess> processType, CandidacyProcess process,
List<Degree> degreeList) {
for (final PublicCandidacyHashCode hashCode : getHashCodesAssociatedWithEmail(email)) {
if (!hashCode.isFromDegreeOffice()) {
continue;
}
final DegreeOfficePublicCandidacyHashCode hash = (DegreeOfficePublicCandidacyHashCode) hashCode;
if (hash.isAssociatedWithEmailAndCandidacyProcess(email, processType, process, degreeList)) {
return hash;
}
}
return null;
}
static public DegreeOfficePublicCandidacyHashCode getPublicCandidacyHashCodeByEmailAndCandidacyProcessTypeOrNotAssociated(
final String email, Class<? extends IndividualCandidacyProcess> processType, CandidacyProcess process) {
DegreeOfficePublicCandidacyHashCode associatedHashCode =
getPublicCandidacyHashCodeByEmailAndCandidacyProcessType(email, processType, process);
if (associatedHashCode != null) {
return associatedHashCode;
}
for (final PublicCandidacyHashCode hashCode : getHashCodesAssociatedWithEmail(email)) {
if (hashCode.isFromDegreeOffice() && !hashCode.hasCandidacyProcess()) {
return (DegreeOfficePublicCandidacyHashCode) hashCode;
}
}
return null;
}
}
| lgpl-3.0 |
LTI2000/lanterna | src/test/java/com/googlecode/lanterna/gui2/TableTest.java | 10510 | package com.googlecode.lanterna.gui2;
import com.googlecode.lanterna.SGR;
import com.googlecode.lanterna.TextColor;
import com.googlecode.lanterna.gui2.dialogs.*;
import com.googlecode.lanterna.gui2.table.DefaultTableRenderer;
import com.googlecode.lanterna.gui2.table.Table;
import com.googlecode.lanterna.gui2.table.TableCellBorderStyle;
import com.googlecode.lanterna.gui2.table.TableModel;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.regex.Pattern;
/**
* Test for the Table component
*/
public class TableTest extends TestBase {
public static void main(String[] args) throws IOException, InterruptedException {
new TableTest().run(args);
}
@Override
public void init(final WindowBasedTextGUI textGUI) {
final BasicWindow window = new BasicWindow("Table container test");
final Table<String> table = new Table("Column 1", "Column 2", "Column 3");
final TableModel<String> model = table.getTableModel();
model.addRow("Row1", "Row1", "Row1");
model.addRow("Row2", "Row2", "Row2");
model.addRow("Row3", "Row3", "Row3");
Panel buttonPanel = new Panel();
buttonPanel.setLayoutManager(new LinearLayout(Direction.HORIZONTAL));
buttonPanel.addComponent(new Button("Add...", new Runnable() {
@Override
public void run() {
new ActionListDialogBuilder()
.setTitle("Add to table")
.addAction("Row", new Runnable() {
@Override
public void run() {
List<String> labels = new ArrayList<String>();
for(int i = 0; i < model.getColumnCount(); i++) {
labels.add("Row" + (model.getRowCount() + 1));
}
model.addRow(labels.toArray(new String[labels.size()]));
table.invalidate();
}
})
.addAction("5 Rows", new Runnable() {
@Override
public void run() {
for(int row = 0; row < 5; row++) {
List<String> labels = new ArrayList<String>();
for(int i = 0; i < model.getColumnCount(); i++) {
labels.add("Row" + (model.getRowCount() + 1));
}
model.addRow(labels.toArray(new String[labels.size()]));
}
table.invalidate();
}
})
.addAction("Column", new Runnable() {
@Override
public void run() {
List<String> labels = new ArrayList<String>();
for(int i = 0; i < model.getRowCount(); i++) {
labels.add("Row" + (i + 1));
}
model.addColumn("NewColumn", labels.toArray(new String[labels.size()]));
table.invalidate();
}
})
.build()
.showDialog(textGUI);
}
}));
buttonPanel.addComponent(new Button("Modify...", new Runnable() {
@Override
public void run() {
onModify(textGUI, table);
}
}));
buttonPanel.addComponent(new Button("Remove...", new Runnable() {
@Override
public void run() {
new ActionListDialogBuilder()
.setTitle("Remove from table")
.addAction("Row", new Runnable() {
@Override
public void run() {
String numberAsText = askForANumber(textGUI, "Enter row # to remove (0-" + (model.getRowCount()-1) + ")");
if(numberAsText != null) {
model.removeRow(Integer.parseInt(numberAsText));
}
}
})
.addAction("Column", new Runnable() {
@Override
public void run() {
String numberAsText = askForANumber(textGUI, "Enter column # to remove (0-" + (model.getColumnCount()-1) + ")");
if(numberAsText != null) {
model.removeColumn(Integer.parseInt(numberAsText));
}
}
})
.build()
.showDialog(textGUI);
}
}));
buttonPanel.addComponent(new Button("Close", new Runnable() {
@Override
public void run() {
window.close();
}
}));
window.setComponent(Panels.vertical(
table.withBorder(Borders.singleLineBevel("Table")),
buttonPanel));
textGUI.addWindow(window);
}
private void onModify(WindowBasedTextGUI textGUI, Table table) {
String[] dialogChoices = new String[] {
"Change table content",
"Change table style",
"Change view size",
"Force re-calculate/re-draw"
};
String choice = chooseAString(textGUI, "Modify what?", dialogChoices);
if(choice == null) {
return;
}
else if(choice == dialogChoices[0]) {
onModifyContent(textGUI, table);
}
else if(choice == dialogChoices[1]) {
onModifyStyle(textGUI, table);
}
else if(choice == dialogChoices[2]) {
onModifyViewSize(textGUI, table);
}
else if(choice == dialogChoices[3]) {
table.invalidate();
}
}
private void onModifyContent(WindowBasedTextGUI textGUI, Table<String> table) {
TableModel<String> model = table.getTableModel();
String columnIndexAsText = askForANumber(textGUI, "Enter column # to modify (0-" + (model.getColumnCount() - 1) + ")");
if(columnIndexAsText == null) {
return;
}
String rowIndexAsText = askForANumber(textGUI, "Enter row # to modify (0-" + (model.getRowCount() - 1) + ")");
if(rowIndexAsText == null) {
return;
}
String newLabel = askForAString(textGUI, "Enter new label for the table cell at row " + rowIndexAsText + " column " + columnIndexAsText);
if(newLabel != null) {
model.setCell(Integer.parseInt(rowIndexAsText), Integer.parseInt(columnIndexAsText), newLabel);
}
}
private void onModifyStyle(WindowBasedTextGUI textGUI, Table table) {
String[] dialogChoices = new String[] {
"Header border style (vertical)",
"Header border style (horizontal)",
"Cell border style (vertical)",
"Cell border style (horizontal)",
"Toggle cell selection"
};
String choice = chooseAString(textGUI, "Which style do you want to change?", dialogChoices);
DefaultTableRenderer renderer = (DefaultTableRenderer) table.getRenderer();
if(choice == null) {
return;
}
else if(choice == dialogChoices[4]) {
table.setCellSelection(!table.isCellSelection());
}
else {
TableCellBorderStyle newStyle = new ListSelectDialogBuilder<TableCellBorderStyle>()
.setTitle("Choose a new style")
.addListItems(TableCellBorderStyle.values())
.build()
.showDialog(textGUI);
if(newStyle != null) {
if(choice == dialogChoices[0]) {
renderer.setHeaderVerticalBorderStyle(newStyle);
}
else if(choice == dialogChoices[1]) {
renderer.setHeaderHorizontalBorderStyle(newStyle);
}
else if(choice == dialogChoices[2]) {
renderer.setCellVerticalBorderStyle(newStyle);
}
else if(choice == dialogChoices[3]) {
renderer.setCellHorizontalBorderStyle(newStyle);
}
}
}
table.invalidate();
}
private void onModifyViewSize(WindowBasedTextGUI textGUI, Table table) {
String verticalViewSize = askForANumber(textGUI, "Enter number of rows to display at once (0 = all)");
if(verticalViewSize == null) {
return;
}
table.setVisibleRows(Integer.parseInt(verticalViewSize));
String horizontalViewSize = askForANumber(textGUI, "Enter number of columns to display at once (0 = all)");
if(horizontalViewSize == null) {
return;
}
table.setVisibleColumns(Integer.parseInt(horizontalViewSize));
}
private String chooseAString(WindowBasedTextGUI textGUI, String title, String... items) {
return new ListSelectDialogBuilder<String>()
.setTitle(title)
.addListItems(items)
.build()
.showDialog(textGUI);
}
private String askForAString(WindowBasedTextGUI textGUI, String title) {
return new TextInputDialogBuilder()
.setTitle(title)
.build()
.showDialog(textGUI);
}
private String askForANumber(WindowBasedTextGUI textGUI, String title) {
return askForANumber(textGUI, title, "");
}
private String askForANumber(WindowBasedTextGUI textGUI, String title, String initialNumber) {
return new TextInputDialogBuilder()
.setTitle(title)
.setInitialContent(initialNumber)
.setValidationPattern(Pattern.compile("[0-9]+"), "Not a number")
.build()
.showDialog(textGUI);
}
}
| lgpl-3.0 |
teryk/sonarqube | sonar-core/src/main/java/org/sonar/core/user/HibernateUserFinder.java | 1698 | /*
* SonarQube, open source software quality management tool.
* Copyright (C) 2008-2014 SonarSource
* mailto:contact AT sonarsource DOT com
*
* SonarQube is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* SonarQube is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package org.sonar.core.user;
import org.sonar.api.database.DatabaseSession;
import org.sonar.api.database.model.User;
import org.sonar.api.security.UserFinder;
import org.sonar.jpa.session.DatabaseSessionFactory;
/**
* @since 2.10
*/
public class HibernateUserFinder implements UserFinder {
private DatabaseSessionFactory sessionFactory;
public HibernateUserFinder(DatabaseSessionFactory sessionFactory) {
this.sessionFactory = sessionFactory;
}
@Override
public User findById(int id) {
DatabaseSession session = sessionFactory.getSession();
return session.getSingleResult(User.class, "id", id);
}
@Override
public User findByLogin(String login) {
DatabaseSession session = sessionFactory.getSession();
return session.getSingleResult(User.class, "login", login);
}
}
| lgpl-3.0 |
xiaoxiaoyao/MyApp | JAVA/JavaProject/src/main/java/package-info.java | 408 | /**
* This is an example of
* Java doc
* ¸öÈËJAVAѧϰ¡¢Á·Ï°ÓÃ
* Õâ¸ö°üÀïµÄjava³ÌÐò´ó¶àÖ»ÄÜͨ¹ýConsoleÊä³ö
* @author yao
* @version 0.1, 2016-07-04
*/
package main.java;
//ÕâÀïÊǰüÀ࣬ÉùÃ÷Ò»¸ö°üʹÓõĹ«¹²À࣬ǿµ÷µÄÊǰü·ÃÎÊȨÏÞ
class PackgeClass{
public void test(){
}
}
//°ü³£Á¿£¬Ö»ÔËÐаüÄÚ·ÃÎÊ£¬ÊÊÓÃÓÚ·Ö¡°°ü¡±¿ª·¢
class PackgeConst{
static final String PACAKGE_NAME="JAVA PACKGE";
} | unlicense |
SixArmDonkey/aerodrome-for-jet | src/main/java/com/buffalokiwi/aerodrome/jet/reports/ReportStatusRec.java | 12937 | /**
* This file is part of the Aerodrome package, and is subject to the
* terms and conditions defined in file 'LICENSE', which is part
* of this source code package.
*
* Copyright (c) 2017 All Rights Reserved, John T. Quinn III,
* <johnquinn3@gmail.com>
*
* THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
* KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
* PARTICULAR PURPOSE.
*/
package com.buffalokiwi.aerodrome.jet.reports;
import com.buffalokiwi.aerodrome.jet.BuildableObject;
import com.buffalokiwi.aerodrome.jet.IJetDate;
import com.buffalokiwi.aerodrome.jet.JetDate;
import com.buffalokiwi.aerodrome.jet.Utils;
import javax.json.JsonObject;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* The bulk reporting feature allows the merchant to request a report which
* will return to the merchant a JSON file with the requested information.
* Jet.com will use this to pass data to the retailer
*
* @param <R> Subclass Type
* @param <B> Inner builder subclass type
*
* @author John Quinn
*/
public class ReportStatusRec<R extends ReportStatusRec, B extends ReportStatusRec.Builder> extends BuildableObject<R,B>
{
/**
* Log instance
*/
private final static Log LOG = LogFactory.getLog( ReportStatusRec.class );
//////////////////////////////////////////////////////////////////////////////
// BEGIN BUILDER //
//////////////////////////////////////////////////////////////////////////////
/**
* Builder
* @param <T> Subclass Type
* @param <R> Outer class Type
*/
public static class Builder<T extends Builder, R extends ReportStatusRec> extends BuildableObject.Builder<T,R>
{
/**
* The Jet defined merchant ID associated with the merchant account
*/
private String merchantId = "";
/**
* The Jet defined report ID associated with the report requested
*/
private String reportId = "";
/**
* The date the report was requested
*/
private IJetDate requestedDate = null;
/**
* The current status of the report
*/
private ReportStatus status = ReportStatus.NONE;
/**
* The type of report requested.
*/
private ReportType type = ReportType.NONE;
/**
* The date-time the report started processing
*/
private IJetDate processingStart = null;
/**
* The date-time the report ended processing
*/
private IJetDate processingEnd = null;
/**
* The date the report will no longer be accessible for download. A new report
* will need to be requested
*/
private IJetDate reportExpDate = null;
/**
* The URL where the report can be downloaded
*/
private String reportUrl = "";
/**
* The Jet defined merchant ID associated with the merchant account
* @param merchantId the merchantId to set
*/
public T setMerchantId( String merchantId )
{
Utils.checkNullEmpty( merchantId, "merchantId" );
this.merchantId = merchantId;
return getReference();
}
/**
* The Jet defined report ID associated with the report requested
* @param reportId the reportId to set
*/
public T setReportId( String reportId )
{
Utils.checkNull( reportId, "reportId" );
this.reportId = reportId;
return getReference();
}
/**
* The date the report was requested
* @param requestedDate the requestedDate to set
*/
public T setRequestedDate( IJetDate requestedDate )
{
this.requestedDate = requestedDate;
return getReference();
}
/**
* The current status of the report
* @param status the status to set
*/
public T setStatus( ReportStatus status )
{
Utils.checkNull( status, "status" );
this.status = status;
return getReference();
}
/**
* The type of report requested.
* @param type the type to set
*/
public T setType( ReportType type )
{
Utils.checkNull( type, "type" );
this.type = type;
return getReference();
}
/**
* The date-time the report started processing
* @param processingStart the processingStart to set
*/
public T setProcessingStart( IJetDate processingStart )
{
this.processingStart = processingStart;
return getReference();
}
/**
* The date-time the report ended processing
* @param processingEnd the processingEnd to set
*/
public T setProcessingEnd( IJetDate processingEnd )
{
this.processingEnd = processingEnd;
return getReference();
}
/**
* The date the report will no longer be accessible for download. A new
* report will need to be requested
* @param reportExpDate the reportExpDate to set
*/
public T setReportExpDate( IJetDate reportExpDate )
{
this.reportExpDate = reportExpDate;
return getReference();
}
/**
* The URL where the report can be downloaded
* @param reportUrl the reportUrl to set
*/
public T setReportUrl( String reportUrl )
{
if ( reportUrl != null )
this.reportUrl = reportUrl;
return getReference();
}
/**
* The type of report requested.
* @return the type
*/
public ReportType getType()
{
return type;
}
/**
* The date the report was requested
* @return the requestedDate
*/
public IJetDate getRequestedDate()
{
return requestedDate;
}
/**
* The Jet defined merchant ID associated with the merchant account
* @return the merchantId
*/
public String getMerchantId()
{
return merchantId;
}
/**
* The Jet defined report ID associated with the report requested
* @return the reportId
*/
public String getReportId()
{
return reportId;
}
/**
* The current status of the report
* @return the status
*/
public ReportStatus getStatus()
{
return status;
}
/**
* The date-time the report started processing
* @return the processingStart
*/
public IJetDate getProcessingStart()
{
return processingStart;
}
/**
* The date-time the report ended processing
* @return the processingEnd
*/
public IJetDate getProcessingEnd()
{
return processingEnd;
}
/**
* The date the report will no longer be accessible for download. A
* new report will need to be requested
* @return the reportExpDate
*/
public IJetDate getReportExpDate()
{
return reportExpDate;
}
/**
* The URL where the report can be downloaded
* @return the reportUrl
*/
public String getReportUrl()
{
return reportUrl;
}
/**
* Build the object
* @return Built object
*/
@Override
public R build()
{
return (R)(new ReportStatusRec( Builder.class, this ));
}
}
//////////////////////////////////////////////////////////////////////////////
// END BUILDER //
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// ReportStatusRec Properties
//////////////////////////////////////////////////////////////////////////////
/**
* The Jet defined merchant ID associated with the merchant account
*/
private final String merchantId;
/**
* The Jet defined report ID associated with the report requested
*/
private final String reportId;
/**
* The date the report was requested
*/
private final IJetDate requestedDate;
/**
* The current status of the report
*/
private final ReportStatus status;
/**
* The type of report requested.
*/
private final ReportType type;
/**
* The date-time the report started processing
*/
private final IJetDate processingStart;
/**
* The date-time the report ended processing
*/
private final IJetDate processingEnd;
/**
* The date the report will no longer be accessible for download. A new report
* will need to be requested
*/
private final IJetDate reportExpDate;
/**
* The URL where the report can be downloaded
*/
private final String reportUrl;
/**
* Turn jet json into an instance of this object
* @param reportId The report id
* @param json json
* @return object
*/
public static ReportStatusRec fromJSON( final String reportId,
final JsonObject json )
{
Utils.checkNullEmpty( reportId, "reportId" );
Utils.checkNull( json, "json" );
return new Builder()
.setMerchantId( json.getString( "merchant_id" ))
.setProcessingEnd( JetDate.fromJetValueOrNull( json.getString( "processing_end" )))
.setProcessingStart( JetDate.fromJetValueOrNull( json.getString( "processing_start" )))
.setReportExpDate( JetDate.fromJetValueOrNull( json.getString( "report_expiration_date" )))
.setReportId( reportId )
.setReportUrl( json.getString( "report_url" ))
.setRequestedDate( JetDate.fromJetValueOrNull( json.getString( "report_requested_date" )))
.setStatus( ReportStatus.fromText( json.getString( "report_status" )))
.setType( ReportType.fromText( json.getString( "report_type" )))
.build();
}
/**
* Constructor.
* Creates an immutable object instance based on the builder properties.
* @param builderClass Builder class type
* @param b Builder instance
*/
protected ReportStatusRec( final Class<? extends ReportStatusRec.Builder> builderClass, final Builder b )
{
super( builderClass, b );
//..Set the local properties from the builder here.
merchantId = b.merchantId;
reportId = b.reportId;
requestedDate = b.requestedDate;
status = b.status;
type = b.type;
processingStart = b.processingStart;
processingEnd = b.processingEnd;
reportExpDate = b.reportExpDate;
reportUrl = b.reportUrl;
}
//////////////////////////////////////////////////////////////////////////////
// ReportStatusRec Methods
//////////////////////////////////////////////////////////////////////////////
/**
* Convert the immutable instance into a mutable builder instance.
* @return Builder
*/
@Override
public B toBuilder()
{
return (B)super.toBuilder()
.setMerchantId( merchantId )
.setReportId( reportId )
.setRequestedDate( requestedDate )
.setStatus( status )
.setType( type )
.setProcessingStart( processingStart )
.setProcessingEnd( processingEnd )
.setReportExpDate( reportExpDate )
.setReportUrl( reportUrl );
}
/**
* The type of report requested
* @return the type
*/
public ReportType getType()
{
return type;
}
/**
* The date the report was requested
* @return the requestedDate
*/
public IJetDate getRequestedDate()
{
return requestedDate;
}
/**
* The Jet defined merchant ID associated with the merchant account
* @return the merchantId
*/
public String getMerchantId()
{
return merchantId;
}
/**
* The Jet defined report ID associated with the report requested
* @return the reportId
*/
public String getReportId()
{
return reportId;
}
/**
* The current status of the report
* @return the status
*/
public ReportStatus getStatus()
{
return status;
}
/**
* The date-time the report started processing
* @return the processingStart
*/
public IJetDate getProcessingStart()
{
return processingStart;
}
/**
* The date-time the report ended processing
* @return the processingEnd
*/
public IJetDate getProcessingEnd()
{
return processingEnd;
}
/**
* The date the report will no longer be accessible for download. A new
* report will need to be requested
* @return the reportExpDate
*/
public IJetDate getReportExpDate()
{
return reportExpDate;
}
/**
* The URL where the report can be downloaded
* @return the reportUrl
*/
public String getReportUrl()
{
return reportUrl;
}
/**
* If the report is complete and ready for download or marked as failed.
* @return is done
*/
public boolean isDone()
{
return !getStatus().equals( ReportStatus.REQUESTED );
}
}
| apache-2.0 |
nakul02/incubator-systemml | src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptTreeConverter.java | 25338 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.runtime.controlprogram.parfor.opt;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.sysml.hops.DataOp;
import org.apache.sysml.hops.FunctionOp;
import org.apache.sysml.hops.Hop;
import org.apache.sysml.hops.Hop.MultiThreadedHop;
import org.apache.sysml.hops.HopsException;
import org.apache.sysml.hops.LiteralOp;
import org.apache.sysml.hops.OptimizerUtils;
import org.apache.sysml.lops.LopProperties;
import org.apache.sysml.parser.DMLProgram;
import org.apache.sysml.parser.ForStatement;
import org.apache.sysml.parser.ForStatementBlock;
import org.apache.sysml.parser.FunctionStatement;
import org.apache.sysml.parser.FunctionStatementBlock;
import org.apache.sysml.parser.IfStatement;
import org.apache.sysml.parser.IfStatementBlock;
import org.apache.sysml.parser.ParForStatement;
import org.apache.sysml.parser.ParForStatementBlock;
import org.apache.sysml.parser.StatementBlock;
import org.apache.sysml.parser.WhileStatement;
import org.apache.sysml.parser.WhileStatementBlock;
import org.apache.sysml.runtime.DMLRuntimeException;
import org.apache.sysml.runtime.controlprogram.ForProgramBlock;
import org.apache.sysml.runtime.controlprogram.FunctionProgramBlock;
import org.apache.sysml.runtime.controlprogram.IfProgramBlock;
import org.apache.sysml.runtime.controlprogram.LocalVariableMap;
import org.apache.sysml.runtime.controlprogram.ParForProgramBlock;
import org.apache.sysml.runtime.controlprogram.Program;
import org.apache.sysml.runtime.controlprogram.ProgramBlock;
import org.apache.sysml.runtime.controlprogram.WhileProgramBlock;
import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;
import org.apache.sysml.runtime.controlprogram.parfor.opt.OptNode.ExecType;
import org.apache.sysml.runtime.controlprogram.parfor.opt.OptNode.NodeType;
import org.apache.sysml.runtime.controlprogram.parfor.opt.OptNode.ParamType;
import org.apache.sysml.runtime.controlprogram.parfor.opt.Optimizer.PlanInputType;
import org.apache.sysml.runtime.instructions.Instruction;
import org.apache.sysml.runtime.instructions.MRJobInstruction;
import org.apache.sysml.runtime.instructions.cp.FunctionCallCPInstruction;
import org.apache.sysml.runtime.instructions.cpfile.MatrixIndexingCPFileInstruction;
import org.apache.sysml.runtime.instructions.spark.SPInstruction;
/**
* Converter for creating an internal plan representation for a given runtime program
* and to modify/create the runtime program according to the optimized plan.
*
* NOTE: currently only one abstract and one runtime plan at a time.
* This implies that only one parfor optimization can happen at a time.
*/
public class OptTreeConverter
{
//internal configuration flags
public static boolean INCLUDE_FUNCTIONS = true;
//internal state
private static OptTreePlanMappingAbstract _hlMap = null;
private static OptTreePlanMappingRuntime _rtMap = null;
static
{
_hlMap = new OptTreePlanMappingAbstract();
_rtMap = new OptTreePlanMappingRuntime();
}
public static OptTree createOptTree( int ck, double cm, PlanInputType type, ParForStatementBlock pfsb, ParForProgramBlock pfpb, ExecutionContext ec ) {
OptNode root = null;
switch( type )
{
case ABSTRACT_PLAN:
_hlMap.putRootProgram(pfsb.getDMLProg(), pfpb.getProgram());
Set<String> memo = new HashSet<>();
root = rCreateAbstractOptNode(pfsb, pfpb, ec.getVariables(), true, memo);
root.checkAndCleanupRecursiveFunc(new HashSet<String>()); //create consistency between recursive info
root.checkAndCleanupLeafNodes(); //prune unnecessary nodes
break;
case RUNTIME_PLAN:
root = rCreateOptNode( pfpb, ec.getVariables(), true, true );
break;
default:
throw new DMLRuntimeException("Optimizer plan input type "+type+" not supported.");
}
OptTree tree = new OptTree(ck, cm, type, root);
return tree;
}
public static OptTree createAbstractOptTree( int ck, double cm, ParForStatementBlock pfsb, ParForProgramBlock pfpb, Set<String> memo, ExecutionContext ec )
{
OptTree tree = null;
OptNode root = null;
try
{
root = rCreateAbstractOptNode( pfsb, pfpb, ec.getVariables(), true, memo );
tree = new OptTree(ck, cm, root);
}
catch(HopsException he)
{
throw new DMLRuntimeException(he);
}
return tree;
}
public static OptNode rCreateOptNode( ProgramBlock pb, LocalVariableMap vars, boolean topLevel, boolean storeObjs )
{
OptNode node = null;
if( pb instanceof IfProgramBlock )
{
IfProgramBlock ipb = (IfProgramBlock) pb;
node = new OptNode( NodeType.IF );
if(storeObjs)
_rtMap.putMapping(ipb, node);
node.setExecType(ExecType.CP);
//process if condition
OptNode ifn = new OptNode(NodeType.GENERIC);
node.addChilds( createOptNodes( ipb.getPredicate(), vars,storeObjs ) );
node.addChild( ifn );
for( ProgramBlock lpb : ipb.getChildBlocksIfBody() )
ifn.addChild( rCreateOptNode(lpb,vars,topLevel, storeObjs) );
//process else condition
if( ipb.getChildBlocksElseBody() != null && ipb.getChildBlocksElseBody().size()>0 )
{
OptNode efn = new OptNode(NodeType.GENERIC);
node.addChild( efn );
for( ProgramBlock lpb : ipb.getChildBlocksElseBody() )
efn.addChild( rCreateOptNode(lpb,vars,topLevel, storeObjs) );
}
}
else if( pb instanceof WhileProgramBlock )
{
WhileProgramBlock wpb = (WhileProgramBlock) pb;
node = new OptNode( NodeType.WHILE );
if(storeObjs)
_rtMap.putMapping(wpb, node);
node.setExecType(ExecType.CP);
//process predicate instruction
node.addChilds( createOptNodes( wpb.getPredicate(), vars,storeObjs ) );
//process body
for( ProgramBlock lpb : wpb.getChildBlocks() )
node.addChild( rCreateOptNode(lpb,vars,topLevel,storeObjs) );
}
else if( pb instanceof ForProgramBlock && !(pb instanceof ParForProgramBlock) )
{
ForProgramBlock fpb = (ForProgramBlock) pb;
node = new OptNode( NodeType.FOR );
if(storeObjs)
_rtMap.putMapping(fpb, node);
node.setExecType(ExecType.CP);
//determine number of iterations
long N = OptimizerUtils.getNumIterations(fpb, vars, CostEstimator.FACTOR_NUM_ITERATIONS);
node.addParam(ParamType.NUM_ITERATIONS, String.valueOf(N));
node.addChilds( createOptNodes( fpb.getFromInstructions(), vars,storeObjs ) );
node.addChilds( createOptNodes( fpb.getToInstructions(), vars,storeObjs ) );
node.addChilds( createOptNodes( fpb.getIncrementInstructions(), vars,storeObjs ) );
//process body
for( ProgramBlock lpb : fpb.getChildBlocks() )
node.addChild( rCreateOptNode(lpb,vars,topLevel,storeObjs) );
}
else if( pb instanceof ParForProgramBlock )
{
ParForProgramBlock fpb = (ParForProgramBlock) pb;
node = new OptNode( NodeType.PARFOR );
if(storeObjs)
_rtMap.putMapping(fpb, node);
node.setK( fpb.getDegreeOfParallelism() );
long N = fpb.getNumIterations();
node.addParam(ParamType.NUM_ITERATIONS, (N!=-1) ?
String.valueOf(N) : String.valueOf(CostEstimator.FACTOR_NUM_ITERATIONS));
switch(fpb.getExecMode())
{
case LOCAL:
node.setExecType(ExecType.CP);
break;
case REMOTE_MR:
case REMOTE_MR_DP:
node.setExecType(ExecType.MR);
break;
case REMOTE_SPARK:
case REMOTE_SPARK_DP:
node.setExecType(ExecType.SPARK);
break;
default:
node.setExecType(null);
}
if( !topLevel )
{
node.addChilds( createOptNodes( fpb.getFromInstructions(), vars, storeObjs ) );
node.addChilds( createOptNodes( fpb.getToInstructions(), vars, storeObjs ) );
node.addChilds( createOptNodes( fpb.getIncrementInstructions(), vars, storeObjs ) );
}
//process body
for( ProgramBlock lpb : fpb.getChildBlocks() )
node.addChild( rCreateOptNode(lpb,vars,false,storeObjs) );
//parameters, add required parameters
}
else //last level program block
{
node = new OptNode(NodeType.GENERIC);
if(storeObjs)
_rtMap.putMapping(pb, node);
node.addChilds( createOptNodes(pb.getInstructions(), vars, storeObjs) );
node.setExecType(ExecType.CP);
}
return node;
}
public static ArrayList<OptNode> createOptNodes (ArrayList<Instruction> instset, LocalVariableMap vars, boolean storeObjs) {
ArrayList<OptNode> tmp = new ArrayList<>(instset.size());
for( Instruction inst : instset )
tmp.add( createOptNode(inst,vars,storeObjs) );
return tmp;
}
public static OptNode createOptNode( Instruction inst, LocalVariableMap vars, boolean storeObjs ) {
OptNode node = new OptNode(NodeType.INST);
String instStr = inst.toString();
String opstr = instStr.split(Instruction.OPERAND_DELIM)[1];
if(storeObjs)
_rtMap.putMapping(inst, node);
node.addParam(ParamType.OPSTRING,opstr);
//exec type
switch( inst.getType() )
{
case CONTROL_PROGRAM:
node.setExecType(ExecType.CP);
//exec operations
//CPInstruction cpinst = (CPInstruction) inst;
//node.addParam(ParamType.OPTYPE,cpinst.getCPInstructionType().toString());
break;
case MAPREDUCE:
case MAPREDUCE_JOB:
node.setExecType(ExecType.MR);
//exec operations
//MRInstruction mrinst = (MRInstruction) inst;
//node.addParam(ParamType.OPTYPE,mrinst.getMRInstructionType().toString());
break;
default:
// In initial prototype, parfor is not supported for spark, so this exception will be thrown
throw new DMLRuntimeException("Unsupported instruction type.");
}
return node;
}
public static OptNode rCreateAbstractOptNode( StatementBlock sb, ProgramBlock pb, LocalVariableMap vars, boolean topLevel, Set<String> memo )
{
OptNode node = null;
if( pb instanceof IfProgramBlock && sb instanceof IfStatementBlock )
{
IfProgramBlock ipb = (IfProgramBlock) pb;
IfStatementBlock isb = (IfStatementBlock) sb;
IfStatement is = (IfStatement) isb.getStatement(0);
node = new OptNode( NodeType.IF );
_hlMap.putProgMapping(sb, pb, node);
node.setExecType(ExecType.CP);
node.setLineNumbers(isb.getBeginLine(), isb.getEndLine());
//handle predicate
isb.getPredicateHops().resetVisitStatus();
node.addChilds( rCreateAbstractOptNodes( isb.getPredicateHops(), vars, memo ) );
//process if branch
OptNode ifn = new OptNode(NodeType.GENERIC);
_hlMap.putProgMapping(sb, pb, ifn);
ifn.setExecType(ExecType.CP);
node.addChild( ifn );
int len = is.getIfBody().size();
for( int i=0; i<ipb.getChildBlocksIfBody().size() && i<len; i++ )
{
ProgramBlock lpb = ipb.getChildBlocksIfBody().get(i);
StatementBlock lsb = is.getIfBody().get(i);
ifn.addChild( rCreateAbstractOptNode(lsb,lpb,vars,false, memo) );
}
//process else branch
if( ipb.getChildBlocksElseBody() != null )
{
OptNode efn = new OptNode(NodeType.GENERIC);
_hlMap.putProgMapping(sb, pb, efn);
efn.setExecType(ExecType.CP);
node.addChild( efn );
int len2 = is.getElseBody().size();
for( int i=0; i<ipb.getChildBlocksElseBody().size() && i<len2; i++ )
{
ProgramBlock lpb = ipb.getChildBlocksElseBody().get(i);
StatementBlock lsb = is.getElseBody().get(i);
efn.addChild( rCreateAbstractOptNode(lsb,lpb,vars,false, memo) );
}
}
}
else if( pb instanceof WhileProgramBlock && sb instanceof WhileStatementBlock )
{
WhileProgramBlock wpb = (WhileProgramBlock) pb;
WhileStatementBlock wsb = (WhileStatementBlock)sb;
WhileStatement ws = (WhileStatement) wsb.getStatement(0);
node = new OptNode( NodeType.WHILE );
_hlMap.putProgMapping(sb, pb, node);
node.setExecType(ExecType.CP);
node.setLineNumbers(wsb.getBeginLine(), wsb.getEndLine());
//handle predicate
wsb.getPredicateHops().resetVisitStatus();
node.addChilds( rCreateAbstractOptNodes( wsb.getPredicateHops(), vars, memo ) );
//process body
int len = ws.getBody().size();
for( int i=0; i<wpb.getChildBlocks().size() && i<len; i++ ) {
ProgramBlock lpb = wpb.getChildBlocks().get(i);
StatementBlock lsb = ws.getBody().get(i);
node.addChild( rCreateAbstractOptNode(lsb,lpb,vars,false, memo) );
}
}
else if( pb instanceof ForProgramBlock && sb instanceof ForStatementBlock && !(pb instanceof ParForProgramBlock) )
{
ForProgramBlock fpb = (ForProgramBlock) pb;
ForStatementBlock fsb = (ForStatementBlock)sb;
ForStatement fs = (ForStatement) fsb.getStatement(0);
node = new OptNode( NodeType.FOR );
_hlMap.putProgMapping(sb, pb, node);
node.setExecType(ExecType.CP);
node.setLineNumbers(fsb.getBeginLine(), fsb.getEndLine());
//determine number of iterations
long N = OptimizerUtils.getNumIterations(fpb, vars, CostEstimator.FACTOR_NUM_ITERATIONS);
node.addParam(ParamType.NUM_ITERATIONS, String.valueOf(N));
//handle predicate
fsb.getFromHops().resetVisitStatus();
fsb.getToHops().resetVisitStatus();
if( fsb.getIncrementHops()!=null )
fsb.getIncrementHops().resetVisitStatus();
node.addChilds( rCreateAbstractOptNodes( fsb.getFromHops(), vars, memo ) );
node.addChilds( rCreateAbstractOptNodes( fsb.getToHops(), vars, memo ) );
if( fsb.getIncrementHops()!=null )
node.addChilds( rCreateAbstractOptNodes( fsb.getIncrementHops(), vars, memo ) );
//process body
int len = fs.getBody().size();
for( int i=0; i<fpb.getChildBlocks().size() && i<len; i++ ) {
ProgramBlock lpb = fpb.getChildBlocks().get(i);
StatementBlock lsb = fs.getBody().get(i);
node.addChild( rCreateAbstractOptNode(lsb,lpb,vars,false, memo) );
}
}
else if( pb instanceof ParForProgramBlock && sb instanceof ParForStatementBlock )
{
ParForProgramBlock fpb = (ParForProgramBlock) pb;
ParForStatementBlock fsb = (ParForStatementBlock)sb;
ParForStatement fs = (ParForStatement) fsb.getStatement(0);
node = new OptNode( NodeType.PARFOR );
node.setLineNumbers(fsb.getBeginLine(), fsb.getEndLine());
_hlMap.putProgMapping(sb, pb, node);
node.setK( fpb.getDegreeOfParallelism() );
long N = fpb.getNumIterations();
node.addParam(ParamType.NUM_ITERATIONS, (N!=-1) ? String.valueOf(N) :
String.valueOf(CostEstimator.FACTOR_NUM_ITERATIONS));
switch(fpb.getExecMode()) {
case LOCAL:
node.setExecType(ExecType.CP);
break;
case REMOTE_MR:
case REMOTE_MR_DP:
node.setExecType(ExecType.MR);
break;
case REMOTE_SPARK:
case REMOTE_SPARK_DP:
node.setExecType(ExecType.SPARK);
break;
case UNSPECIFIED:
node.setExecType(null);
}
if( !topLevel ) {
fsb.getFromHops().resetVisitStatus();
fsb.getToHops().resetVisitStatus();
if( fsb.getIncrementHops()!=null )
fsb.getIncrementHops().resetVisitStatus();
node.addChilds( rCreateAbstractOptNodes( fsb.getFromHops(), vars, memo ) );
node.addChilds( rCreateAbstractOptNodes( fsb.getToHops(), vars, memo ) );
if( fsb.getIncrementHops()!=null )
node.addChilds( rCreateAbstractOptNodes( fsb.getIncrementHops(), vars, memo ) );
}
//process body
int len = fs.getBody().size();
for( int i=0; i<fpb.getChildBlocks().size() && i<len; i++ ) {
ProgramBlock lpb = fpb.getChildBlocks().get(i);
StatementBlock lsb = fs.getBody().get(i);
node.addChild( rCreateAbstractOptNode(lsb,lpb,vars,false, memo) );
}
//parameters, add required parameters
Map<String,String> lparams = fpb.getParForParams();
node.addParam(ParamType.DATA_PARTITIONER, lparams.get(ParForStatementBlock.DATA_PARTITIONER));
node.addParam(ParamType.TASK_PARTITIONER, lparams.get(ParForStatementBlock.TASK_PARTITIONER));
node.addParam(ParamType.RESULT_MERGE, lparams.get(ParForStatementBlock.RESULT_MERGE));
//TODO task size
}
else //last level program block
{
sb = pb.getStatementBlock();
//process all hops
node = new OptNode(NodeType.GENERIC);
_hlMap.putProgMapping(sb, pb, node);
node.addChilds( createAbstractOptNodes(sb.getHops(), vars, memo) );
node.setExecType(ExecType.CP);
node.setLineNumbers(sb.getBeginLine(), sb.getEndLine());
//TODO remove this workaround once this information can be obtained from hops/lops compiler
if( node.isCPOnly() ) {
boolean isSparkExec = OptimizerUtils.isSparkExecutionMode();
if( !isSparkExec && containsMRJobInstruction(pb, false, false) )
node.setExecType(ExecType.MR);
else if( isSparkExec && containsMRJobInstruction(pb, false, true))
node.setExecType(ExecType.SPARK);
}
}
//final cleanup
node.checkAndCleanupLeafNodes(); //NOTE: required because this function is also used to create subtrees
return node;
}
public static ArrayList<OptNode> createAbstractOptNodes(ArrayList<Hop> hops, LocalVariableMap vars, Set<String> memo ) {
ArrayList<OptNode> ret = new ArrayList<>();
//reset all hops
Hop.resetVisitStatus(hops);
//created and add actual opt nodes
if( hops != null )
for( Hop hop : hops )
ret.addAll(rCreateAbstractOptNodes(hop, vars, memo));
return ret;
}
public static ArrayList<OptNode> rCreateAbstractOptNodes(Hop hop, LocalVariableMap vars, Set<String> memo) {
ArrayList<OptNode> ret = new ArrayList<>();
ArrayList<Hop> in = hop.getInput();
if( hop.isVisited() )
return ret;
//general case
if( !(hop instanceof DataOp || hop instanceof LiteralOp || hop instanceof FunctionOp) )
{
OptNode node = new OptNode(NodeType.HOP);
String opstr = hop.getOpString();
node.addParam(ParamType.OPSTRING,opstr);
//handle execution type
LopProperties.ExecType et = (hop.getExecType()!=null) ?
hop.getExecType() : LopProperties.ExecType.CP;
switch( et ) {
case CP:case GPU:
node.setExecType(ExecType.CP); break;
case SPARK:
node.setExecType(ExecType.SPARK); break;
case MR:
node.setExecType(ExecType.MR); break;
default:
throw new DMLRuntimeException("Unsupported optnode exec type: "+et);
}
//handle degree of parallelism
if( et == LopProperties.ExecType.CP && hop instanceof MultiThreadedHop ){
MultiThreadedHop mtop = (MultiThreadedHop) hop;
node.setK( OptimizerUtils.getConstrainedNumThreads(mtop.getMaxNumThreads()) );
}
//assign node to return
_hlMap.putHopMapping(hop, node);
ret.add(node);
}
//process function calls
else if (hop instanceof FunctionOp && INCLUDE_FUNCTIONS )
{
FunctionOp fhop = (FunctionOp) hop;
String fname = fhop.getFunctionName();
String fnspace = fhop.getFunctionNamespace();
String fKey = fhop.getFunctionKey();
Object[] prog = _hlMap.getRootProgram();
OptNode node = new OptNode(NodeType.FUNCCALL);
_hlMap.putHopMapping(fhop, node);
node.setExecType(ExecType.CP);
node.addParam(ParamType.OPSTRING, fKey);
if( !fnspace.equals(DMLProgram.INTERNAL_NAMESPACE) )
{
FunctionProgramBlock fpb = ((Program)prog[1]).getFunctionProgramBlock(fnspace, fname);
FunctionStatementBlock fsb = ((DMLProgram)prog[0]).getFunctionStatementBlock(fnspace, fname);
FunctionStatement fs = (FunctionStatement) fsb.getStatement(0);
//process body; NOTE: memo prevents inclusion of functions multiple times
if( !memo.contains(fKey) )
{
memo.add(fKey);
int len = fs.getBody().size();
for( int i=0; i<fpb.getChildBlocks().size() && i<len; i++ ) {
ProgramBlock lpb = fpb.getChildBlocks().get(i);
StatementBlock lsb = fs.getBody().get(i);
node.addChild( rCreateAbstractOptNode(lsb, lpb, vars, false, memo) );
}
memo.remove(fKey);
}
else
node.addParam(ParamType.RECURSIVE_CALL, "true");
}
ret.add(node);
}
if( in != null )
for( Hop hin : in )
if( !(hin instanceof DataOp || hin instanceof LiteralOp ) ) //no need for opt nodes
ret.addAll(rCreateAbstractOptNodes(hin, vars, memo));
hop.setVisited();
return ret;
}
public static boolean rContainsMRJobInstruction( ProgramBlock pb, boolean inclFunctions )
{
boolean ret = false;
if (pb instanceof WhileProgramBlock)
{
WhileProgramBlock tmp = (WhileProgramBlock)pb;
ret = containsMRJobInstruction(tmp.getPredicate(), true, true);
if( ret ) return ret;
for (ProgramBlock pb2 : tmp.getChildBlocks()) {
ret = rContainsMRJobInstruction(pb2, inclFunctions);
if( ret ) return ret;
}
}
else if (pb instanceof IfProgramBlock)
{
IfProgramBlock tmp = (IfProgramBlock)pb;
ret = containsMRJobInstruction(tmp.getPredicate(), true, true);
if( ret ) return ret;
for( ProgramBlock pb2 : tmp.getChildBlocksIfBody() ){
ret = rContainsMRJobInstruction(pb2, inclFunctions);
if( ret ) return ret;
}
for( ProgramBlock pb2 : tmp.getChildBlocksElseBody() ){
ret = rContainsMRJobInstruction(pb2, inclFunctions);
if( ret ) return ret;
}
}
else if (pb instanceof ForProgramBlock) //includes ParFORProgramBlock
{
ForProgramBlock tmp = (ForProgramBlock)pb;
ret = containsMRJobInstruction(tmp.getFromInstructions(), true, true);
ret |= containsMRJobInstruction(tmp.getToInstructions(), true, true);
ret |= containsMRJobInstruction(tmp.getIncrementInstructions(), true, true);
if( ret ) return ret;
for( ProgramBlock pb2 : tmp.getChildBlocks() ){
ret = rContainsMRJobInstruction(pb2, inclFunctions);
if( ret ) return ret;
}
}
else if ( pb instanceof FunctionProgramBlock ) //includes ExternalFunctionProgramBlock and ExternalFunctionProgramBlockCP)
{
//do nothing
}
else
{
ret = containsMRJobInstruction(pb, true, true)
|| (inclFunctions && containsFunctionCallInstruction(pb));
}
return ret;
}
public static boolean containsMRJobInstruction( ProgramBlock pb, boolean inclCPFile, boolean inclSpark ) {
return containsMRJobInstruction(pb.getInstructions(), inclCPFile, inclSpark);
}
public static boolean containsMRJobInstruction( ArrayList<Instruction> instSet, boolean inclCPFile, boolean inclSpark ) {
return instSet.stream().anyMatch(inst -> inst instanceof MRJobInstruction
|| (inclSpark && inst instanceof SPInstruction)
|| (inclCPFile && inst instanceof MatrixIndexingCPFileInstruction));
}
public static boolean containsFunctionCallInstruction( ProgramBlock pb ) {
return pb.getInstructions().stream()
.anyMatch(inst -> inst instanceof FunctionCallCPInstruction);
}
public static void replaceProgramBlock(OptNode parent, OptNode n, ProgramBlock pbOld, ProgramBlock pbNew, boolean rtMap) {
ProgramBlock pbParent = null;
if( rtMap )
pbParent = (ProgramBlock)_rtMap.getMappedObject( parent.getID() );
else
{
if( parent.getNodeType()==NodeType.FUNCCALL )
{
FunctionOp fop = (FunctionOp) _hlMap.getMappedHop(parent.getID());
pbParent = ((Program)_hlMap.getRootProgram()[1]).getFunctionProgramBlock(fop.getFunctionNamespace(), fop.getFunctionName());
}
else
pbParent = (ProgramBlock)_hlMap.getMappedProg( parent.getID() )[1];
}
if( pbParent instanceof IfProgramBlock )
{
IfProgramBlock ipb = (IfProgramBlock) pbParent;
replaceProgramBlock( ipb.getChildBlocksIfBody(), pbOld, pbNew );
replaceProgramBlock( ipb.getChildBlocksElseBody(), pbOld, pbNew );
}
else if( pbParent instanceof WhileProgramBlock )
{
WhileProgramBlock wpb = (WhileProgramBlock) pbParent;
replaceProgramBlock( wpb.getChildBlocks(), pbOld, pbNew );
}
else if( pbParent instanceof ForProgramBlock || pbParent instanceof ParForProgramBlock )
{
ForProgramBlock fpb = (ForProgramBlock) pbParent;
replaceProgramBlock( fpb.getChildBlocks(), pbOld, pbNew );
}
else if( pbParent instanceof FunctionProgramBlock )
{
FunctionProgramBlock fpb = (FunctionProgramBlock) pbParent;
replaceProgramBlock( fpb.getChildBlocks(), pbOld, pbNew );
}
else
throw new DMLRuntimeException("Optimizer doesn't support "+pbParent.getClass().getName());
//update repository
if( rtMap )
_rtMap.replaceMapping(pbNew, n);
else
_hlMap.replaceMapping(pbNew, n);
}
public static void replaceProgramBlock(ArrayList<ProgramBlock> pbs, ProgramBlock pbOld, ProgramBlock pbNew)
{
int len = pbs.size();
for( int i=0; i<len; i++ )
if( pbs.get(i) == pbOld )
pbs.set(i, pbNew);
}
///////////////////////////////
// //
// internal state management //
// //
///////////////////////////////
public static OptTreePlanMappingAbstract getAbstractPlanMapping()
{
return _hlMap;
}
public static void clear()
{
if( _hlMap != null )
_hlMap.clear();
if( _rtMap != null )
_rtMap.clear();
}
}
| apache-2.0 |
jtwig/jtwig-core | src/test/java/org/jtwig/value/convert/character/CharConverterTest.java | 659 | package org.jtwig.value.convert.character;
import org.jtwig.value.convert.Converter;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class CharConverterTest {
private CharConverter underTest = new CharConverter();
@Test
public void convert() throws Exception {
Object item = 'a';
Converter.Result<Character> result = underTest.convert('a');
assertEquals(item, result.get());
}
@Test
public void convertString() throws Exception {
Object item = 'a';
Converter.Result<Character> result = underTest.convert("a");
assertEquals(item, result.get());
}
} | apache-2.0 |
sekikn/ambari | ambari-server/src/main/java/org/apache/ambari/server/orm/models/HostComponentSummary.java | 4278 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ambari.server.orm.models;
import java.util.ArrayList;
import java.util.List;
import org.apache.ambari.server.StaticallyInject;
import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
import org.apache.ambari.server.orm.dao.HostDAO;
import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.state.State;
import org.codehaus.jackson.annotate.JsonProperty;
import com.google.inject.Inject;
/**
* The {@link HostComponentSum1mary} class provides a concise representation of
* the state of a component on a given host. Some of its fields are serializable
* to JSON.
*/
@StaticallyInject
public class HostComponentSummary {
@JsonProperty("service_name")
private String serviceName;
@JsonProperty("component_name")
private String componentName;
@JsonProperty("host_id")
private Long hostId;
@JsonProperty("host_name")
private String hostName;
@JsonProperty("desired_state")
private State desiredState;
@JsonProperty("current_state")
private State currentState;
@Inject
private static HostDAO hostDao;
@Inject
private static HostComponentStateDAO hostComponentStateDao;
@Inject
private static HostComponentDesiredStateDAO hostComponentDesiredStateDao;
public HostComponentSummary(String serviceName, String componentName, Long hostId, State desiredState, State currentState) {
this.serviceName = serviceName;
this.componentName = componentName;
this.hostId = hostId;
HostEntity host = hostDao.findById(hostId);
if (host != null) {
hostName = host.getHostName();
}
this.desiredState = desiredState;
this.currentState = currentState;
}
public long getHostId() {
return hostId;
}
public String getHostName() {
return (hostName == null || hostName.isEmpty()) ? "" : hostName;
}
public State getDesiredState() {
return desiredState;
}
public State getCurrentState() {
return currentState;
}
public static List<HostComponentSummary> getHostComponentSummaries(String serviceName, String componentName) {
List<HostComponentSummary> hostComponentSummaries = new ArrayList<>();
List<HostComponentStateEntity> hostComponentStates = hostComponentStateDao.findByServiceAndComponent(serviceName, componentName);
if (hostComponentStates != null) {
for (HostComponentStateEntity hcse : hostComponentStates) {
// Find the corresponding record for HostComponentDesiredStateEntity
HostComponentDesiredStateEntity hcdse = hostComponentDesiredStateDao.findByServiceComponentAndHost(hcse.getServiceName(), hcse.getComponentName(), hcse.getHostName());
if (hcdse != null) {
HostComponentSummary s = new HostComponentSummary(hcse.getServiceName(), hcse.getComponentName(), hcse.getHostId(), hcdse.getDesiredState(), hcse.getCurrentState());
hostComponentSummaries.add(s);
}
}
}
return hostComponentSummaries;
}
@Override
public int hashCode() {
int result;
result = 31 + (serviceName != null ? serviceName.hashCode() : 0);
result = result + (componentName != null ? componentName.hashCode() : 0);
result = result + (hostId != null ? hostId.hashCode() : 0);
return result;
}
}
| apache-2.0 |
Alachisoft/TayzGrid | tools/addqueryindex/src/com/alachisoft/tayzgrid/tools/AssemblyUsage.java | 2617 | /*
* Copyright (c) 2015, Alachisoft. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alachisoft.tayzgrid.tools;
/**
Internal class that helps display assembly usage information.
*/
public final class AssemblyUsage
{
private static String VERSION = "4.6.0.0";
/**
Displays logo banner
@param printlogo Specifies whether to print logo or not
*/
public static void PrintLogo(boolean printlogo)
{
String logo = "Alachisoft (R) TayzGrid Utility - AddQueryIndex. Version " + VERSION + "\nCopyright (C) Alachisoft 2015. All rights reserved.\n";
if (printlogo)
{
System.out.println(logo);
System.out.println();
}
}
/**
Displays assembly usage information.
*/
public static void PrintUsage()
{
String usage = "Usage: addqueryindex [option[...]]." + "\r\n" + "\r\n" + "\r\n" + "" + "\r\n" + " "
+ " Specifies the id/name of the cache for which query index will be configured. "
+ "\r\n" + "\r\n" + " -a --assembly-path" + "\r\n" + " "
+ "Specifies the path of the assembly which will be configured. "
+ "\r\n" + "\r\n" + " -c --class" + "\r\n" + " Specifies the class for query indexing. "
+ "\r\n" + "\r\n" + " -L --attrib-list" + "\r\n" + " "
+ " Specifies the attributes for query indexing ($ seperated and within single quotation) e.g. 'CustomerID$Name' .... "
+ "\r\n" + "\r\n" + "Optional:" + "\r\n" + "\r\n" + " -s --server" + "\r\n" + " "
+ " Specifies the TayzGrid server name/ip." + "\r\n" + "\r\n" + " -p --port" + "\r\n" + " "
+ " Specifies the port on which TayzGrid server is listening." + "\r\n" + "\r\n"
+ " -G" + "\r\n" + " " + " Suppresses display of the logo banner " + "\r\n" + "\r\n" + " -h --help" + "\r\n" + " "
+ " Displays a detailed help screen " + "\r\n" + "";
System.out.println(usage);
}
}
| apache-2.0 |
CloudVLab/professional-services | examples/pubsub-publish-avro-example/src/test/java/com/google/cloud/pso/pubsub/GenerateRandomEmployeeReaderTest.java | 1555 | /*
* Copyright (C) 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.pso.pubsub;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import com.google.cloud.pso.Employee;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/**
* Tests for {@link GenerateRandomEmployeeReader}
* */
@RunWith(JUnit4.class)
public class GenerateRandomEmployeeReaderTest {
private GenerateRandomEmployeeReader reader;
private Employee expectedEmployee;
private static final Long EMP_ID = 1234L;
private static final String EMP_NAME = "John";
@Before
public void setup() {
reader = new GenerateRandomEmployeeReader(1);
expectedEmployee = Employee.newBuilder().setId(EMP_ID).setName(EMP_NAME).build();
}
@Test
public void testGetEmployee() {
assertThat(expectedEmployee, is(equalTo(reader.getEmployee(EMP_ID, EMP_NAME))));
}
}
| apache-2.0 |
jior/glaf | workspace/glaf-core/src/main/java/com/glaf/core/util/http/HttpConnectionFactory.java | 3739 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.glaf.core.util.http;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import com.glaf.core.util.MyX509TrustManager;
import com.ning.http.client.AsyncHttpClient;
import com.ning.http.client.AsyncHttpClientConfig;
import com.ning.http.client.providers.netty.NettyAsyncHttpProvider;
import com.ning.http.client.providers.netty.NettyAsyncHttpProviderConfig;
public class HttpConnectionFactory {
private static volatile AsyncHttpClient httpClient;
private static volatile AsyncHttpClient httpsClient;
public static void close() {
if (httpClient != null && !httpClient.isClosed()) {
httpClient.close();
}
if (httpsClient != null && !httpsClient.isClosed()) {
httpsClient.close();
}
}
public static AsyncHttpClient getHttpClient() {
if (httpClient == null || httpClient.isClosed()) {
NettyAsyncHttpProviderConfig providerConfig = new NettyAsyncHttpProviderConfig();
AsyncHttpClientConfig.Builder builder = new AsyncHttpClientConfig.Builder();
builder.setMaxRequestRetry(3);
builder.setConnectTimeout(1000 * 10);
builder.setCompressionEnforced(false);
builder.setAllowPoolingSslConnections(false);
builder.setAllowPoolingConnections(true);
builder.setFollowRedirect(true);
builder.setMaxRedirects(3);
builder.setMaxConnectionsPerHost(500);
builder.setMaxConnections(5000);
builder.setAsyncHttpClientProviderConfig(providerConfig);
AsyncHttpClientConfig config = builder.build();
httpClient = new AsyncHttpClient(
new NettyAsyncHttpProvider(config), config);
}
return httpClient;
}
public static AsyncHttpClient getHttpsClient() {
if (httpsClient == null || httpsClient.isClosed()) {
NettyAsyncHttpProviderConfig providerConfig = new NettyAsyncHttpProviderConfig();
AsyncHttpClientConfig.Builder builder = new AsyncHttpClientConfig.Builder();
builder.setMaxRequestRetry(3);
builder.setConnectTimeout(1000 * 10);
builder.setCompressionEnforced(false);
builder.setAllowPoolingSslConnections(false);
builder.setAllowPoolingConnections(true);
builder.setFollowRedirect(true);
builder.setMaxRedirects(3);
builder.setMaxConnectionsPerHost(500);
builder.setMaxConnections(5000);
builder.setAsyncHttpClientProviderConfig(providerConfig);
AsyncHttpClientConfig config = builder.build();
try {
// 创建SSLContext对象,并使用我们指定的信任管理器初始化
TrustManager[] tm = { new MyX509TrustManager() };
SSLContext sslContext = SSLContext
.getInstance("SSL", "SunJSSE");
sslContext.init(null, tm, new java.security.SecureRandom());
builder.setSSLContext(sslContext);
builder.setAcceptAnyCertificate(true);
builder.setAllowPoolingSslConnections(true);
} catch (Exception ex) {
}
httpsClient = new AsyncHttpClient(
new NettyAsyncHttpProvider(config), config);
}
return httpsClient;
}
private HttpConnectionFactory() {
}
}
| apache-2.0 |
firebata/skysport160125 | one/src/main/java/com/skysport/interfaces/mapper/permission/UserInfoMapper.java | 491 | package com.skysport.interfaces.mapper.permission;
import com.skysport.core.bean.permission.UserInfo;
import com.skysport.core.mapper.CommonMapper;
import org.springframework.stereotype.Repository;
/**
* 类说明:
* Created by zhangjh on 2015/8/17.
*/
@Repository("userInfoMapper")
public interface UserInfoMapper extends CommonMapper<UserInfo> {
UserInfo queryInfoByUserInfoName(String userName);
void chgpwd(UserInfo userInfo);
String queryParentId(String groupId);
}
| apache-2.0 |
heitorfm/flicklib | flicklib-omdb/src/main/java/com/flicklib/service/movie/omdb/OmdbModule.java | 304 | package com.flicklib.service.movie.omdb;
import com.flicklib.api.MovieInfoFetcher;
import com.google.inject.AbstractModule;
public class OmdbModule extends AbstractModule {
@Override
protected void configure() {
bind(MovieInfoFetcher.class).annotatedWith(Omdb.class).to(OmdbFetcher.class);
}
}
| apache-2.0 |
kumarmah/JStego | J4.0/src/BlinkMsg.java | 1217 | /*
* BlinkMsg.java
*
* Created on October 26, 2007, 9:29 PM
*
* To change this template, choose Tools | Template Manager
* and open the template in the editor.
*/
/**
*
* @author Mahendra Kumar(kumarmahATgmail.com)
*/
public class BlinkMsg implements Runnable{
javax.swing.JLabel processmsg;
boolean c=true;
/** Creates a new instance of BlinkMsg */
public BlinkMsg(javax.swing.JLabel processmsg) {
this.processmsg=processmsg;
Thread t=new Thread(this);
t.start();
}
public void run(){
while(true){
if(c==false){
processmsg.setText("");
processmsg.repaint();
try {
Thread.sleep(2);
} catch (InterruptedException ex) {
ex.printStackTrace();
}
break;
}
processmsg.setText("Processing......");
processmsg.repaint();
try {
Thread.sleep(100);
} catch (InterruptedException ex) {
ex.printStackTrace();
}
// processmsg.setText("");
}
}
public void makefalse(){
c=false;
}
}
| apache-2.0 |
kris-sigur/crawlrss | src/main/java/is/landsbokasafn/crawler/rss/db/Site.java | 1846 | package is.landsbokasafn.crawler.rss.db;
import java.util.Date;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.OneToMany;
import javax.persistence.Table;
@Entity
@Table(name="Site")
public class Site {
@Id
@GeneratedValue
int id;
@Column(name="Name")
String name;
@Column(name="LastFeedUpdate")
Date lastFeedUpdate;
@Column(name="MinWaitPeriod")
String minWaitPeriod;
@Column(name="Active")
boolean active;
@OneToMany(fetch = FetchType.EAGER, targetEntity=Feed.class, mappedBy = "site")
Set<Feed> feeds;
@OneToMany(fetch = FetchType.EAGER, targetEntity=ImpliedPage.class, mappedBy = "site")
Set<ImpliedPage> pages;
public Site() {
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Date getLastFeedUpdate() {
return lastFeedUpdate;
}
public void setLastFeedUpdate(Date lastFeedUpdate) {
this.lastFeedUpdate = lastFeedUpdate;
}
public String getMinWaitPeriod() {
return minWaitPeriod;
}
public void setMinWaitPeriod(String minWaitPeriod) {
this.minWaitPeriod = minWaitPeriod;
}
public boolean isActive() {
return active;
}
public void setActive(boolean active) {
this.active = active;
}
public Set<Feed> getFeeds() {
return feeds;
}
public Set<ImpliedPage> getPages() {
return pages;
}
public void setPages(Set<ImpliedPage> pages) {
this.pages = pages;
}
public void setFeeds(Set<Feed> feeds) {
this.feeds = feeds;
}
}
| apache-2.0 |
dlukyanov/nifi | nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/serialization/FlowFromDOMFactory.java | 24219 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.controller.serialization;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.nifi.connectable.Size;
import org.apache.nifi.controller.ScheduledState;
import org.apache.nifi.controller.service.ControllerServiceState;
import org.apache.nifi.encrypt.EncryptionException;
import org.apache.nifi.encrypt.StringEncryptor;
import org.apache.nifi.groups.RemoteProcessGroupPortDescriptor;
import org.apache.nifi.remote.StandardRemoteProcessGroupPortDescriptor;
import org.apache.nifi.scheduling.ExecutionNode;
import org.apache.nifi.scheduling.SchedulingStrategy;
import org.apache.nifi.util.DomUtils;
import org.apache.nifi.web.api.dto.BundleDTO;
import org.apache.nifi.web.api.dto.ConnectableDTO;
import org.apache.nifi.web.api.dto.ConnectionDTO;
import org.apache.nifi.web.api.dto.ControllerServiceDTO;
import org.apache.nifi.web.api.dto.FlowSnippetDTO;
import org.apache.nifi.web.api.dto.FunnelDTO;
import org.apache.nifi.web.api.dto.LabelDTO;
import org.apache.nifi.web.api.dto.PortDTO;
import org.apache.nifi.web.api.dto.PositionDTO;
import org.apache.nifi.web.api.dto.ProcessGroupDTO;
import org.apache.nifi.web.api.dto.ProcessorConfigDTO;
import org.apache.nifi.web.api.dto.ProcessorDTO;
import org.apache.nifi.web.api.dto.RemoteProcessGroupDTO;
import org.apache.nifi.web.api.dto.ReportingTaskDTO;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
public class FlowFromDOMFactory {
private static final Logger logger = LoggerFactory.getLogger(FlowFromDOMFactory.class);
public static BundleDTO getBundle(final Element bundleElement) {
if (bundleElement == null) {
return null;
}
final Element groupElement = DomUtils.getChild(bundleElement, "group");
final Element artifactElement = DomUtils.getChild(bundleElement, "artifact");
final Element versionElement = DomUtils.getChild(bundleElement, "version");
return new BundleDTO(groupElement.getTextContent(), artifactElement.getTextContent(), versionElement.getTextContent());
}
public static PositionDTO getPosition(final Element positionElement) {
if (positionElement == null) {
throw new IllegalArgumentException("Invalid Flow: Found no 'position' element");
}
return new PositionDTO(Double.parseDouble(positionElement.getAttribute("x")), Double.parseDouble(positionElement.getAttribute("y")));
}
public static Size getSize(final Element sizeElement) {
if (sizeElement == null) {
throw new IllegalArgumentException("Invalid Flow: Found no 'size' element");
}
return new Size(Double.parseDouble(sizeElement.getAttribute("width")), Double.parseDouble(sizeElement.getAttribute("height")));
}
public static Map<String, String> getStyle(final Element stylesElement) {
final Map<String, String> styles = new HashMap<>();
if (stylesElement == null) {
return styles;
}
for (final Element styleElement : getChildrenByTagName(stylesElement, "style")) {
final String styleName = styleElement.getAttribute("name");
final String styleValue = styleElement.getTextContent();
styles.put(styleName, styleValue);
}
return styles;
}
public static ControllerServiceDTO getControllerService(final Element element, final StringEncryptor encryptor) {
final ControllerServiceDTO dto = new ControllerServiceDTO();
dto.setId(getString(element, "id"));
dto.setName(getString(element, "name"));
dto.setComments(getString(element, "comment"));
dto.setType(getString(element, "class"));
dto.setBundle(getBundle(DomUtils.getChild(element, "bundle")));
final boolean enabled = getBoolean(element, "enabled");
dto.setState(enabled ? ControllerServiceState.ENABLED.name() : ControllerServiceState.DISABLED.name());
dto.setProperties(getProperties(element, encryptor));
dto.setAnnotationData(getString(element, "annotationData"));
return dto;
}
public static ReportingTaskDTO getReportingTask(final Element element, final StringEncryptor encryptor) {
final ReportingTaskDTO dto = new ReportingTaskDTO();
dto.setId(getString(element, "id"));
dto.setName(getString(element, "name"));
dto.setComments(getString(element, "comment"));
dto.setType(getString(element, "class"));
dto.setBundle(getBundle(DomUtils.getChild(element, "bundle")));
dto.setSchedulingPeriod(getString(element, "schedulingPeriod"));
dto.setState(getString(element, "scheduledState"));
dto.setSchedulingStrategy(getString(element, "schedulingStrategy"));
dto.setProperties(getProperties(element, encryptor));
dto.setAnnotationData(getString(element, "annotationData"));
return dto;
}
public static ProcessGroupDTO getProcessGroup(final String parentId, final Element element, final StringEncryptor encryptor, final FlowEncodingVersion encodingVersion) {
final ProcessGroupDTO dto = new ProcessGroupDTO();
final String groupId = getString(element, "id");
dto.setId(groupId);
dto.setParentGroupId(parentId);
dto.setName(getString(element, "name"));
dto.setPosition(getPosition(DomUtils.getChild(element, "position")));
dto.setComments(getString(element, "comment"));
final Map<String, String> variables = new HashMap<>();
final NodeList variableList = DomUtils.getChildNodesByTagName(element, "variable");
for (int i = 0; i < variableList.getLength(); i++) {
final Element variableElement = (Element) variableList.item(i);
final String name = variableElement.getAttribute("name");
final String value = variableElement.getAttribute("value");
variables.put(name, value);
}
dto.setVariables(variables);
final Set<ProcessorDTO> processors = new HashSet<>();
final Set<ConnectionDTO> connections = new HashSet<>();
final Set<FunnelDTO> funnels = new HashSet<>();
final Set<PortDTO> inputPorts = new HashSet<>();
final Set<PortDTO> outputPorts = new HashSet<>();
final Set<LabelDTO> labels = new HashSet<>();
final Set<ProcessGroupDTO> processGroups = new HashSet<>();
final Set<RemoteProcessGroupDTO> remoteProcessGroups = new HashSet<>();
NodeList nodeList = DomUtils.getChildNodesByTagName(element, "processor");
for (int i = 0; i < nodeList.getLength(); i++) {
processors.add(getProcessor((Element) nodeList.item(i), encryptor));
}
nodeList = DomUtils.getChildNodesByTagName(element, "funnel");
for (int i = 0; i < nodeList.getLength(); i++) {
funnels.add(getFunnel((Element) nodeList.item(i)));
}
nodeList = DomUtils.getChildNodesByTagName(element, "inputPort");
for (int i = 0; i < nodeList.getLength(); i++) {
inputPorts.add(getPort((Element) nodeList.item(i)));
}
nodeList = DomUtils.getChildNodesByTagName(element, "outputPort");
for (int i = 0; i < nodeList.getLength(); i++) {
outputPorts.add(getPort((Element) nodeList.item(i)));
}
nodeList = DomUtils.getChildNodesByTagName(element, "label");
for (int i = 0; i < nodeList.getLength(); i++) {
labels.add(getLabel((Element) nodeList.item(i)));
}
nodeList = DomUtils.getChildNodesByTagName(element, "processGroup");
for (int i = 0; i < nodeList.getLength(); i++) {
processGroups.add(getProcessGroup(groupId, (Element) nodeList.item(i), encryptor, encodingVersion));
}
nodeList = DomUtils.getChildNodesByTagName(element, "remoteProcessGroup");
for (int i = 0; i < nodeList.getLength(); i++) {
remoteProcessGroups.add(getRemoteProcessGroup((Element) nodeList.item(i), encryptor));
}
nodeList = DomUtils.getChildNodesByTagName(element, "connection");
for (int i = 0; i < nodeList.getLength(); i++) {
connections.add(getConnection((Element) nodeList.item(i)));
}
final FlowSnippetDTO groupContents = new FlowSnippetDTO();
groupContents.setConnections(connections);
groupContents.setFunnels(funnels);
groupContents.setInputPorts(inputPorts);
groupContents.setLabels(labels);
groupContents.setOutputPorts(outputPorts);
groupContents.setProcessGroups(processGroups);
groupContents.setProcessors(processors);
groupContents.setRemoteProcessGroups(remoteProcessGroups);
dto.setContents(groupContents);
return dto;
}
public static ConnectionDTO getConnection(final Element element) {
final ConnectionDTO dto = new ConnectionDTO();
dto.setId(getString(element, "id"));
dto.setName(getString(element, "name"));
dto.setLabelIndex(getOptionalInt(element, "labelIndex"));
dto.setzIndex(getOptionalLong(element, "zIndex"));
final List<PositionDTO> bends = new ArrayList<>();
final Element bendPointsElement = DomUtils.getChild(element, "bendPoints");
if (bendPointsElement != null) {
for (final Element bendPointElement : getChildrenByTagName(bendPointsElement, "bendPoint")) {
final PositionDTO bend = getPosition(bendPointElement);
bends.add(bend);
}
}
dto.setBends(bends);
final ConnectableDTO sourceConnectable = new ConnectableDTO();
dto.setSource(sourceConnectable);
sourceConnectable.setId(getString(element, "sourceId"));
sourceConnectable.setGroupId(getString(element, "sourceGroupId"));
sourceConnectable.setType(getString(element, "sourceType"));
final ConnectableDTO destConnectable = new ConnectableDTO();
dto.setDestination(destConnectable);
destConnectable.setId(getString(element, "destinationId"));
destConnectable.setGroupId(getString(element, "destinationGroupId"));
destConnectable.setType(getString(element, "destinationType"));
final Set<String> relationships = new HashSet<>();
final List<Element> relationshipNodeList = getChildrenByTagName(element, "relationship");
for (final Element relationshipElem : relationshipNodeList) {
relationships.add(relationshipElem.getTextContent());
}
dto.setSelectedRelationships(relationships);
dto.setBackPressureObjectThreshold(getLong(element, "maxWorkQueueSize"));
final String maxDataSize = getString(element, "maxWorkQueueDataSize");
if (maxDataSize != null && !maxDataSize.trim().isEmpty()) {
dto.setBackPressureDataSizeThreshold(maxDataSize);
}
String expiration = getString(element, "flowFileExpiration");
if (expiration == null) {
expiration = "0 sec";
}
dto.setFlowFileExpiration(expiration);
final List<String> prioritizerClasses = new ArrayList<>();
final List<Element> prioritizerNodeList = getChildrenByTagName(element, "queuePrioritizerClass");
for (final Element prioritizerElement : prioritizerNodeList) {
prioritizerClasses.add(prioritizerElement.getTextContent().trim());
}
dto.setPrioritizers(prioritizerClasses);
return dto;
}
public static RemoteProcessGroupDTO getRemoteProcessGroup(final Element element, final StringEncryptor encryptor) {
final RemoteProcessGroupDTO dto = new RemoteProcessGroupDTO();
dto.setId(getString(element, "id"));
dto.setName(getString(element, "name"));
dto.setTargetUri(getString(element, "url"));
dto.setTargetUris(getString(element, "urls"));
dto.setTransmitting(getBoolean(element, "transmitting"));
dto.setPosition(getPosition(DomUtils.getChild(element, "position")));
dto.setCommunicationsTimeout(getString(element, "timeout"));
dto.setComments(getString(element, "comment"));
dto.setYieldDuration(getString(element, "yieldPeriod"));
dto.setTransportProtocol(getString(element, "transportProtocol"));
dto.setProxyHost(getString(element, "proxyHost"));
dto.setProxyPort(getOptionalInt(element, "proxyPort"));
dto.setProxyUser(getString(element, "proxyUser"));
dto.setLocalNetworkInterface(getString(element, "networkInterface"));
final String rawPassword = getString(element, "proxyPassword");
final String proxyPassword = encryptor == null ? rawPassword : decrypt(rawPassword, encryptor);
dto.setProxyPassword(proxyPassword);
return dto;
}
public static LabelDTO getLabel(final Element element) {
final LabelDTO dto = new LabelDTO();
dto.setId(getString(element, "id"));
dto.setLabel(getString(element, "value"));
dto.setPosition(getPosition(DomUtils.getChild(element, "position")));
final Size size = getSize(DomUtils.getChild(element, "size"));
dto.setWidth(size.getWidth());
dto.setHeight(size.getHeight());
dto.setStyle(getStyle(DomUtils.getChild(element, "styles")));
return dto;
}
public static FunnelDTO getFunnel(final Element element) {
final FunnelDTO dto = new FunnelDTO();
dto.setId(getString(element, "id"));
dto.setPosition(getPosition(DomUtils.getChild(element, "position")));
return dto;
}
public static PortDTO getPort(final Element element) {
final PortDTO portDTO = new PortDTO();
portDTO.setId(getString(element, "id"));
portDTO.setPosition(getPosition(DomUtils.getChild(element, "position")));
portDTO.setName(getString(element, "name"));
portDTO.setComments(getString(element, "comments"));
final ScheduledState scheduledState = getScheduledState(element);
portDTO.setState(scheduledState.toString());
final List<Element> maxTasksElements = getChildrenByTagName(element, "maxConcurrentTasks");
if (!maxTasksElements.isEmpty()) {
portDTO.setConcurrentlySchedulableTaskCount(Integer.parseInt(maxTasksElements.get(0).getTextContent()));
}
final List<Element> userAccessControls = getChildrenByTagName(element, "userAccessControl");
if (userAccessControls != null && !userAccessControls.isEmpty()) {
final Set<String> users = new HashSet<>();
portDTO.setUserAccessControl(users);
for (final Element userElement : userAccessControls) {
users.add(userElement.getTextContent());
}
}
final List<Element> groupAccessControls = getChildrenByTagName(element, "groupAccessControl");
if (groupAccessControls != null && !groupAccessControls.isEmpty()) {
final Set<String> groups = new HashSet<>();
portDTO.setGroupAccessControl(groups);
for (final Element groupElement : groupAccessControls) {
groups.add(groupElement.getTextContent());
}
}
return portDTO;
}
public static RemoteProcessGroupPortDescriptor getRemoteProcessGroupPort(final Element element) {
final StandardRemoteProcessGroupPortDescriptor descriptor = new StandardRemoteProcessGroupPortDescriptor();
// What we have serialized is the ID of the Remote Process Group, followed by a dash ('-'), followed by
// the actual ID of the port; we want to get rid of the remote process group id.
String id = getString(element, "id");
if (id.length() > 37) {
id = id.substring(37);
}
descriptor.setId(id);
descriptor.setName(getString(element, "name"));
descriptor.setComments(getString(element, "comments"));
descriptor.setConcurrentlySchedulableTaskCount(getInt(element, "maxConcurrentTasks"));
descriptor.setUseCompression(getBoolean(element, "useCompression"));
descriptor.setBatchCount(getOptionalInt(element, "batchCount"));
descriptor.setBatchSize(getString(element, "batchSize"));
descriptor.setBatchDuration(getString(element, "batchDuration"));
descriptor.setTransmitting("RUNNING".equalsIgnoreCase(getString(element, "scheduledState")));
return descriptor;
}
public static ProcessorDTO getProcessor(final Element element, final StringEncryptor encryptor) {
final ProcessorDTO dto = new ProcessorDTO();
dto.setId(getString(element, "id"));
dto.setName(getString(element, "name"));
dto.setType(getString(element, "class"));
dto.setBundle(getBundle(DomUtils.getChild(element, "bundle")));
dto.setPosition(getPosition(DomUtils.getChild(element, "position")));
dto.setStyle(getStyle(DomUtils.getChild(element, "styles")));
final ProcessorConfigDTO configDto = new ProcessorConfigDTO();
dto.setConfig(configDto);
configDto.setComments(getString(element, "comment"));
configDto.setConcurrentlySchedulableTaskCount(getInt(element, "maxConcurrentTasks"));
final String schedulingPeriod = getString(element, "schedulingPeriod");
configDto.setSchedulingPeriod(schedulingPeriod);
configDto.setPenaltyDuration(getString(element, "penalizationPeriod"));
configDto.setYieldDuration(getString(element, "yieldPeriod"));
configDto.setBulletinLevel(getString(element, "bulletinLevel"));
configDto.setLossTolerant(getBoolean(element, "lossTolerant"));
final ScheduledState scheduledState = getScheduledState(element);
dto.setState(scheduledState.toString());
// handle scheduling strategy
final String schedulingStrategyName = getString(element, "schedulingStrategy");
if (schedulingStrategyName == null || schedulingStrategyName.trim().isEmpty()) {
configDto.setSchedulingStrategy(SchedulingStrategy.TIMER_DRIVEN.name());
} else {
configDto.setSchedulingStrategy(schedulingStrategyName.trim());
}
// handle execution node
final String executionNode = getString(element, "executionNode");
if (executionNode == null || executionNode.trim().isEmpty()) {
configDto.setExecutionNode(ExecutionNode.ALL.name());
} else {
configDto.setExecutionNode(executionNode.trim());
}
final Long runDurationNanos = getOptionalLong(element, "runDurationNanos");
if (runDurationNanos != null) {
configDto.setRunDurationMillis(TimeUnit.NANOSECONDS.toMillis(runDurationNanos));
}
configDto.setProperties(getProperties(element, encryptor));
configDto.setAnnotationData(getString(element, "annotationData"));
final Set<String> autoTerminatedRelationships = new HashSet<>();
final List<Element> autoTerminateList = getChildrenByTagName(element, "autoTerminatedRelationship");
for (final Element autoTerminateElement : autoTerminateList) {
autoTerminatedRelationships.add(autoTerminateElement.getTextContent());
}
configDto.setAutoTerminatedRelationships(autoTerminatedRelationships);
return dto;
}
private static LinkedHashMap<String, String> getProperties(final Element element, final StringEncryptor encryptor) {
final LinkedHashMap<String, String> properties = new LinkedHashMap<>();
final List<Element> propertyNodeList = getChildrenByTagName(element, "property");
for (final Element propertyElement : propertyNodeList) {
final String name = getString(propertyElement, "name");
final String rawPropertyValue = getString(propertyElement, "value");
final String value = encryptor == null ? rawPropertyValue : decrypt(rawPropertyValue, encryptor);
properties.put(name, value);
}
return properties;
}
private static String getString(final Element element, final String childElementName) {
final List<Element> nodeList = getChildrenByTagName(element, childElementName);
if (nodeList == null || nodeList.isEmpty()) {
return null;
}
final Element childElement = nodeList.get(0);
return childElement.getTextContent();
}
private static Integer getOptionalInt(final Element element, final String childElementName) {
final List<Element> nodeList = getChildrenByTagName(element, childElementName);
if (nodeList == null || nodeList.isEmpty()) {
return null;
}
final Element childElement = nodeList.get(0);
final String val = childElement.getTextContent();
if (val == null) {
return null;
}
return Integer.parseInt(val);
}
private static Long getOptionalLong(final Element element, final String childElementName) {
final List<Element> nodeList = getChildrenByTagName(element, childElementName);
if (nodeList == null || nodeList.isEmpty()) {
return null;
}
final Element childElement = nodeList.get(0);
final String val = childElement.getTextContent();
if (val == null) {
return null;
}
return Long.parseLong(val);
}
private static int getInt(final Element element, final String childElementName) {
return Integer.parseInt(getString(element, childElementName));
}
private static long getLong(final Element element, final String childElementName) {
return Long.parseLong(getString(element, childElementName));
}
private static boolean getBoolean(final Element element, final String childElementName) {
return Boolean.parseBoolean(getString(element, childElementName));
}
private static ScheduledState getScheduledState(final Element element) {
return ScheduledState.valueOf(getString(element, "scheduledState"));
}
private static List<Element> getChildrenByTagName(final Element element, final String childElementName) {
return DomUtils.getChildElementsByTagName(element, childElementName);
}
private static String decrypt(final String value, final StringEncryptor encryptor) {
if (value != null && value.startsWith(FlowSerializer.ENC_PREFIX) && value.endsWith(FlowSerializer.ENC_SUFFIX)) {
try {
return encryptor.decrypt(value.substring(FlowSerializer.ENC_PREFIX.length(), value.length() - FlowSerializer.ENC_SUFFIX.length()));
} catch (EncryptionException e) {
final String moreDescriptiveMessage = "There was a problem decrypting a sensitive flow configuration value. " +
"Check that the nifi.sensitive.props.key value in nifi.properties matches the value used to encrypt the flow.xml.gz file";
logger.error(moreDescriptiveMessage, e);
throw new EncryptionException(moreDescriptiveMessage, e);
}
} else {
return value;
}
}
}
| apache-2.0 |
FingolfinTEK/camel | components/camel-etcd/src/main/java/org/apache/camel/component/etcd/processor/remote/EtcdServiceCallServerListStrategies.java | 3345 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.etcd.processor.remote;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import mousio.etcd4j.requests.EtcdKeyGetRequest;
import mousio.etcd4j.responses.EtcdKeysResponse;
import org.apache.camel.RuntimeCamelException;
import org.apache.camel.component.etcd.EtcdConfiguration;
import org.apache.camel.spi.ServiceCallServer;
import org.apache.camel.util.ObjectHelper;
public final class EtcdServiceCallServerListStrategies {
private EtcdServiceCallServerListStrategies() {
}
public static final class OnDemand extends EtcdServiceCallServerListStrategy {
public OnDemand(EtcdConfiguration configuration) throws Exception {
super(configuration);
}
@Override
public List<ServiceCallServer> getUpdatedListOfServers(String name) {
List<ServiceCallServer> servers = Collections.emptyList();
try {
final EtcdConfiguration conf = getConfiguration();
final EtcdKeyGetRequest request = getClient().get(conf.getServicePath()).recursive();
if (conf.hasTimeout()) {
request.timeout(conf.getTimeout(), TimeUnit.SECONDS);
}
final EtcdKeysResponse response = request.send().get();
if (Objects.nonNull(response.node) && !response.node.nodes.isEmpty()) {
servers = response.node.nodes.stream()
.map(node -> node.value)
.filter(ObjectHelper::isNotEmpty)
.map(this::nodeFromString)
.filter(Objects::nonNull)
.filter(s -> name.equalsIgnoreCase(s.getName()))
.sorted(EtcdServiceCallServer.COMPARATOR)
.collect(Collectors.toList());
}
} catch (Exception e) {
throw new RuntimeCamelException(e);
}
return servers;
}
@Override
public String toString() {
return "OnDemand";
}
}
// *************************************************************************
// Helpers
// *************************************************************************
public static EtcdServiceCallServerListStrategy onDemand(EtcdConfiguration configuration) throws Exception {
return new OnDemand(configuration);
}
}
| apache-2.0 |
fogbeam/S4 | subprojects/s4-tools/src/main/java/org/apache/s4/tools/Status.java | 14381 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.s4.tools;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.I0Itec.zkclient.exception.ZkNoNodeException;
import org.apache.s4.comm.topology.ClusterNode;
import org.apache.s4.comm.topology.ZNRecord;
import org.apache.s4.comm.topology.ZNRecordSerializer;
import org.apache.s4.comm.topology.ZkClient;
import org.apache.s4.tools.S4ArgsBase.GradleOptsConverter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
import com.google.common.collect.Maps;
public class Status extends S4ArgsBase {
static Logger logger = LoggerFactory.getLogger(Status.class);
private static String NONE = "--";
public static void main(String[] args) {
StatusArgs statusArgs = new StatusArgs();
Tools.parseArgs(statusArgs, args);
List<Cluster> clusterStatus = new ArrayList<Cluster>();
List<Stream> streamStatus = new ArrayList<Stream>();
try {
ZkClient zkClient = new ZkClient(statusArgs.zkConnectionString, statusArgs.timeout);
zkClient.setZkSerializer(new ZNRecordSerializer());
List<String> clusters = statusArgs.clusters;
if (clusters == null) {
// Load all subclusters
clusters = zkClient.getChildren("/s4/clusters");
}
Set<String> app = null;
Set<String> requiredAppCluster = new HashSet<String>();
if (statusArgs.apps != null) {
app = new HashSet<String>(statusArgs.apps);
}
for (String clusterName : clusters) {
try {
if (zkClient.exists("/s4/clusters/" + clusterName)) {
Cluster cluster = new Cluster(clusterName, zkClient);
if (app == null || app.contains(cluster.app.name)) {
clusterStatus.add(cluster);
requiredAppCluster.add(cluster.clusterName);
}
} else {
logger.error("/s4/clusters/" + clusterName + " doesn't exist");
}
} catch (Exception e) {
logger.error("Cannot get the status of " + clusterName, e);
}
}
List<String> streams = statusArgs.streams;
if (streams == null) {
// Load all streams published
streams = zkClient.getChildren("/s4/streams");
}
for (String streamName : streams) {
try {
if (zkClient.exists("/s4/streams/" + streamName)) {
Stream stream = new Stream(streamName, zkClient);
if (app == null) {
streamStatus.add(stream);
} else {
for (String cluster : requiredAppCluster) {
if (stream.containsCluster(cluster)) {
streamStatus.add(stream);
break;
}
}
}
} else {
logger.error("/s4/streams/" + streamName + " doesn't exist");
}
} catch (Exception e) {
logger.error("Cannot get the status of " + streamName, e);
}
}
System.out.println();
showAppsStatus(clusterStatus);
System.out.println("\n\n");
showClustersStatus(clusterStatus);
System.out.println("\n\n");
showStreamsStatus(streamStatus);
System.out.println("\n\n");
} catch (Exception e) {
logger.error("Cannot get the status of S4", e);
}
}
@Parameters(commandNames = "s4 status", commandDescription = "Show status of S4", separators = "=")
static class StatusArgs extends S4ArgsBase {
@Parameter(names = { "-app" }, description = "Only show status of specified S4 application(s)", required = false)
List<String> apps;
@Parameter(names = { "-c", "-cluster" }, description = "Only show status of specified S4 cluster(s)", required = false)
List<String> clusters;
@Parameter(names = { "-s", "-stream" }, description = "Only show status of specified published stream(s)", required = false)
List<String> streams;
@Parameter(names = "-zk", description = "ZooKeeper connection string")
String zkConnectionString = "localhost:2181";
@Parameter(names = "-timeout", description = "Connection timeout to Zookeeper, in ms")
int timeout = 10000;
}
private static void showAppsStatus(List<Cluster> clusters) {
System.out.println("App Status");
System.out.println(generateEdge(130));
System.out.format("%-20s%-20s%-90s%n", inMiddle("Name", 20), inMiddle("Cluster", 20), inMiddle("URI", 90));
System.out.println(generateEdge(130));
for (Cluster cluster : clusters) {
if (!NONE.equals(cluster.app.name)) {
System.out.format("%-20s%-20s%-90s%n", inMiddle(cluster.app.name, 20),
inMiddle(cluster.app.cluster, 20), cluster.app.uri);
}
}
System.out.println(generateEdge(130));
}
private static void showClustersStatus(List<Cluster> clusters) {
System.out.println("Cluster Status");
System.out.println(generateEdge(130));
System.out.format("%-50s%-80s%n", " ", inMiddle("Active nodes", 80));
System.out.format("%-20s%-20s%-10s%s%n", inMiddle("Name", 20), inMiddle("App", 20), inMiddle("Tasks", 10),
generateEdge(80));
System.out.format("%-50s%-10s%-10s%-50s%-10s%n", " ", inMiddle("Number", 8), inMiddle("Task id", 10),
inMiddle("Host", 50), inMiddle("Port", 8));
System.out.println(generateEdge(130));
for (Cluster cluster : clusters) {
System.out.format("%-20s%-20s%-10s%-10s", inMiddle(cluster.clusterName, 20),
inMiddle(cluster.app.name, 20), inMiddle("" + cluster.taskNumber, 8),
inMiddle("" + cluster.nodes.size(), 8));
boolean first = true;
for (ClusterNode node : cluster.nodes) {
if (first) {
first = false;
} else {
System.out.format("%n%-60s", " ");
}
System.out.format("%-10s%-50s%-10s", inMiddle("" + node.getTaskId(), 10),
inMiddle(node.getMachineName(), 50), inMiddle(node.getPort() + "", 10));
}
System.out.println();
}
System.out.println(generateEdge(130));
}
private static void showStreamsStatus(List<Stream> streams) {
System.out.println("Stream Status");
System.out.println(generateEdge(130));
System.out.format("%-20s%-55s%-55s%n", inMiddle("Name", 20), inMiddle("Producers", 55),
inMiddle("Consumers", 55));
System.out.println(generateEdge(130));
for (Stream stream : streams) {
System.out.format("%-20s%-55s%-55s%n", inMiddle(stream.streamName, 20),
inMiddle(getFormatString(stream.producers, stream.clusterAppMap), 55),
inMiddle(getFormatString(stream.consumers, stream.clusterAppMap), 55));
}
System.out.println(generateEdge(130));
}
private static String inMiddle(String content, int width) {
int i = (width - content.length()) / 2;
return String.format("%" + i + "s%s", " ", content);
}
private static String generateEdge(int length) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < length; i++) {
sb.append("-");
}
return sb.toString();
}
/**
* show as cluster1(app1), cluster2(app2)
*
* @param clusters
* cluster list
* @param clusterAppMap
* <cluster,app>
* @return
*/
private static String getFormatString(Collection<String> clusters, Map<String, String> clusterAppMap) {
if (clusters == null || clusters.size() == 0) {
return NONE;
} else {
// show as: cluster1(app1), cluster2(app2)
StringBuilder sb = new StringBuilder();
for (String cluster : clusters) {
String app = clusterAppMap.get(cluster);
sb.append(cluster);
if (!NONE.equals(app)) {
sb.append("(").append(app).append(")");
}
sb.append(" ");
}
return sb.toString();
}
}
static class Stream {
private final ZkClient zkClient;
private final String consumerPath;
private final String producerPath;
String streamName;
Set<String> producers = new HashSet<String>();// cluster name
Set<String> consumers = new HashSet<String>();// cluster name
Map<String, String> clusterAppMap = Maps.newHashMap();
public Stream(String streamName, ZkClient zkClient) throws Exception {
this.streamName = streamName;
this.zkClient = zkClient;
this.consumerPath = "/s4/streams/" + streamName + "/consumers";
this.producerPath = "/s4/streams/" + streamName + "/producers";
readStreamFromZk();
}
private void readStreamFromZk() throws Exception {
List<String> consumerNodes = zkClient.getChildren(consumerPath);
for (String node : consumerNodes) {
ZNRecord consumer = zkClient.readData(consumerPath + "/" + node, true);
consumers.add(consumer.getSimpleField("clusterName"));
}
List<String> producerNodes = zkClient.getChildren(producerPath);
for (String node : producerNodes) {
ZNRecord consumer = zkClient.readData(producerPath + "/" + node, true);
producers.add(consumer.getSimpleField("clusterName"));
}
getAppNames();
}
private void getAppNames() {
Set<String> clusters = new HashSet<String>(consumers);
clusters.addAll(producers);
for (String cluster : clusters) {
clusterAppMap.put(cluster, getApp(cluster, zkClient));
}
}
public boolean containsCluster(String cluster) {
if (producers.contains(cluster) || consumers.contains(cluster)) {
return true;
}
return false;
}
private static String getApp(String clusterName, ZkClient zkClient) {
String appPath = "/s4/clusters/" + clusterName + "/app/s4App";
if (zkClient.exists(appPath)) {
ZNRecord appRecord = zkClient.readData("/s4/clusters/" + clusterName + "/app/s4App");
return appRecord.getSimpleField("name");
}
return NONE;
}
}
static class App {
private String name = NONE;
private String cluster;
private String uri = NONE;
}
static class Cluster {
private final ZkClient zkClient;
private final String taskPath;
private final String processPath;
private final String appPath;
String clusterName;
int taskNumber;
App app;
List<ClusterNode> nodes = new ArrayList<ClusterNode>();
public Cluster(String clusterName, ZkClient zkClient) throws Exception {
this.clusterName = clusterName;
this.zkClient = zkClient;
this.taskPath = "/s4/clusters/" + clusterName + "/tasks";
this.processPath = "/s4/clusters/" + clusterName + "/process";
this.appPath = "/s4/clusters/" + clusterName + "/app/s4App";
readClusterFromZk();
}
public void readClusterFromZk() throws Exception {
List<String> processes;
List<String> tasks;
tasks = zkClient.getChildren(taskPath);
processes = zkClient.getChildren(processPath);
taskNumber = tasks.size();
for (int i = 0; i < processes.size(); i++) {
ZNRecord process = zkClient.readData(processPath + "/" + processes.get(i), true);
if (process != null) {
int partition = Integer.parseInt(process.getSimpleField("partition"));
String host = process.getSimpleField("host");
int port = Integer.parseInt(process.getSimpleField("port"));
String taskId = process.getSimpleField("taskId");
ClusterNode node = new ClusterNode(partition, port, host, taskId);
nodes.add(node);
}
}
app = new App();
app.cluster = clusterName;
try {
ZNRecord appRecord = zkClient.readData(appPath);
app.name = appRecord.getSimpleField("name");
app.uri = appRecord.getSimpleField("s4r_uri");
} catch (ZkNoNodeException e) {
logger.warn(appPath + " doesn't exist");
}
}
}
}
| apache-2.0 |
LaraiFox/Foxtail-Game-Engine | src/laraifox/foxtail/core/math/Matrix4f.java | 21881 | package laraifox.foxtail.core.math;
import java.text.DecimalFormat;
/**
* An array of 16 floating point numbers representing a 4x4 matrix in column major order. Representation of how the matrix is numbered is below.<br>
* [ 0, 4, 8, 12 ]<br>
* [ 1, 5, 9, 13 ]<br>
* [ 2, 6, 10, 14 ]<br>
* [ 3, 7, 11, 15 ]<br>
*
* @author Larai Fox
*
*/
public class Matrix4f {
public static final int WIDTH = 4;
public static final int HEIGHT = 4;
public static final int COMPONENT_COUNT = WIDTH * HEIGHT;
public static final int BYTE_COUNT = COMPONENT_COUNT * Float.BYTES;
private float[] data;
public Matrix4f() {
this.data = new float[COMPONENT_COUNT];
}
public Matrix4f(float[] values) {
this.data = new float[COMPONENT_COUNT];
for (int i = 0; i < Matrix4f.COMPONENT_COUNT; i++) {
this.data[i] = values[i];
}
}
public Matrix4f(Matrix4f matrix) {
this.data = new float[COMPONENT_COUNT];
for (int i = 0; i < Matrix4f.COMPONENT_COUNT; i++) {
this.data[i] = matrix.data[i];
}
}
public Matrix4f(org.lwjgl.util.vector.Matrix4f matrix) {
this.set(new float[] {
matrix.m00, matrix.m01, matrix.m02, matrix.m03, //
matrix.m10, matrix.m11, matrix.m12, matrix.m13, //
matrix.m20, matrix.m21, matrix.m22, matrix.m23, //
matrix.m30, matrix.m31, matrix.m32, matrix.m33
});
}
public static Matrix4f multiply(Matrix4f left, Matrix4f right) {
return new Matrix4f(left).multiply(right);
}
public static Matrix4f inverse(Matrix4f matrix) {
return new Matrix4f(matrix).inverse();
}
public static Matrix4f transpose(Matrix4f matrix) {
return new Matrix4f(matrix).transpose();
}
public Vector4f multiply(Vector3f vector, float w) {
Vector4f result = new Vector4f();
result.setX(data[0] * vector.getX() + data[4] * vector.getY() + data[8] * vector.getZ() + data[12] * w);
result.setY(data[1] * vector.getX() + data[5] * vector.getY() + data[9] * vector.getZ() + data[13] * w);
result.setZ(data[2] * vector.getX() + data[6] * vector.getY() + data[10] * vector.getZ() + data[14] * w);
result.setW(data[3] * vector.getX() + data[7] * vector.getY() + data[11] * vector.getZ() + data[15] * w);
return result;
}
public Vector4f multiply(Vector4f vector) {
Vector4f result = new Vector4f();
result.setX(data[0] * vector.getX() + data[4] * vector.getY() + data[8] * vector.getZ() + data[12] * vector.getW());
result.setY(data[1] * vector.getX() + data[5] * vector.getY() + data[9] * vector.getZ() + data[13] * vector.getW());
result.setZ(data[2] * vector.getX() + data[6] * vector.getY() + data[10] * vector.getZ() + data[14] * vector.getW());
result.setW(data[3] * vector.getX() + data[7] * vector.getY() + data[11] * vector.getZ() + data[15] * vector.getW());
return result;
}
public Matrix4f multiply(Matrix4f matrix) {
float[] result = new float[COMPONENT_COUNT];
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < HEIGHT; j++) {
result[j + i * HEIGHT] = data[j + 0 * HEIGHT] * matrix.data[0 + i * HEIGHT] + data[j + 1 * HEIGHT] * matrix.data[1 + i * HEIGHT] //
+ data[j + 2 * HEIGHT] * matrix.data[2 + i * HEIGHT] + data[j + 3 * HEIGHT] * matrix.data[3 + i * HEIGHT];
}
}
return this.set(result);
}
private Matrix4f set(float[] values) {
this.data = values;
return this;
}
public Matrix4f inverse() {
float[] result = new float[COMPONENT_COUNT];
result[0] = data[5] * data[10] * data[15] - data[5] * data[11] * data[14] - //
data[9] * data[6] * data[15] + data[9] * data[7] * data[14] + //
data[13] * data[6] * data[11] - data[13] * data[7] * data[10];
result[1] = -data[1] * data[10] * data[15] + data[1] * data[11] * data[14] + //
data[9] * data[2] * data[15] - data[9] * data[3] * data[14] - //
data[13] * data[2] * data[11] + data[13] * data[3] * data[10];
result[2] = data[1] * data[6] * data[15] - data[1] * data[7] * data[14] - //
data[5] * data[2] * data[15] + data[5] * data[3] * data[14] + //
data[13] * data[2] * data[7] - data[13] * data[3] * data[6];
result[3] = -data[1] * data[6] * data[11] + data[1] * data[7] * data[10] + //
data[5] * data[2] * data[11] - data[5] * data[3] * data[10] - //
data[9] * data[2] * data[7] + data[9] * data[3] * data[6];
result[4] = -data[4] * data[10] * data[15] + data[4] * data[11] * data[14] + //
data[8] * data[6] * data[15] - data[8] * data[7] * data[14] - //
data[12] * data[6] * data[11] + data[12] * data[7] * data[10];
result[5] = data[0] * data[10] * data[15] - data[0] * data[11] * data[14] - //
data[8] * data[2] * data[15] + data[8] * data[3] * data[14] + //
data[12] * data[2] * data[11] - data[12] * data[3] * data[10];
result[6] = -data[0] * data[6] * data[15] + data[0] * data[7] * data[14] + //
data[4] * data[2] * data[15] - data[4] * data[3] * data[14] - //
data[12] * data[2] * data[7] + data[12] * data[3] * data[6];
result[7] = data[0] * data[6] * data[11] - data[0] * data[7] * data[10] - //
data[4] * data[2] * data[11] + data[4] * data[3] * data[10] + //
data[8] * data[2] * data[7] - data[8] * data[3] * data[6];
result[8] = data[4] * data[9] * data[15] - data[4] * data[11] * data[13] - //
data[8] * data[5] * data[15] + data[8] * data[7] * data[13] + //
data[12] * data[5] * data[11] - data[12] * data[7] * data[9];
result[9] = -data[0] * data[9] * data[15] + data[0] * data[11] * data[13] + //
data[8] * data[1] * data[15] - data[8] * data[3] * data[13] - //
data[12] * data[1] * data[11] + data[12] * data[3] * data[9];
result[10] = data[0] * data[5] * data[15] - data[0] * data[7] * data[13] - //
data[4] * data[1] * data[15] + data[4] * data[3] * data[13] + //
data[12] * data[1] * data[7] - data[12] * data[3] * data[5];
result[11] = -data[0] * data[5] * data[11] + data[0] * data[7] * data[9] + //
data[4] * data[1] * data[11] - data[4] * data[3] * data[9] - //
data[8] * data[1] * data[7] + data[8] * data[3] * data[5];
result[12] = -data[4] * data[9] * data[14] + data[4] * data[10] * data[13] + //
data[8] * data[5] * data[14] - data[8] * data[6] * data[13] - //
data[12] * data[5] * data[10] + data[12] * data[6] * data[9];
result[13] = data[0] * data[9] * data[14] - data[0] * data[10] * data[13] - //
data[8] * data[1] * data[14] + data[8] * data[2] * data[13] + //
data[12] * data[1] * data[10] - data[12] * data[2] * data[9];
result[14] = -data[0] * data[5] * data[14] + data[0] * data[6] * data[13] + //
data[4] * data[1] * data[14] - data[4] * data[2] * data[13] - //
data[12] * data[1] * data[6] + data[12] * data[2] * data[5];
result[15] = data[0] * data[5] * data[10] - data[0] * data[6] * data[9] - //
data[4] * data[1] * data[10] + data[4] * data[2] * data[9] + //
data[8] * data[1] * data[6] - data[8] * data[2] * data[5];
float determinant = data[0] * result[0] + data[1] * result[4] + data[2] * result[8] + data[3] * result[12];
if (determinant == 0)
return this;
determinant = 1.0f / determinant;
for (int i = 0; i < COMPONENT_COUNT; i++) {
data[i] = result[i] * determinant;
}
return this;
}
public Matrix4f transpose() {
float temp = data[1];
data[1] = data[4];
data[4] = temp;
temp = data[2];
data[2] = data[8];
data[8] = temp;
temp = data[3];
data[3] = data[12];
data[12] = temp;
temp = data[6];
data[6] = data[9];
data[9] = temp;
temp = data[7];
data[7] = data[13];
data[13] = temp;
temp = data[11];
data[11] = data[14];
data[14] = temp;
return this;
}
public Matrix4f translate(float x, float y, float z) {
return this.multiply(Matrix4f.Translation(x, y, z));
}
public Matrix4f rotate(Quaternion quaternion) {
return this.multiply(Matrix4f.Rotation(quaternion));
}
public Matrix4f scale(float x, float y, float z) {
return this.multiply(Matrix4f.Scale(x, y, z));
}
@Override
public String toString() {
return this.toString(false);
}
public String toString(boolean newlineAfterEachRow) {
StringBuilder builder = new StringBuilder();
if (newlineAfterEachRow) {
builder.append("[ ").append(data[0]).append(", ").append(data[4]).append(", ").append(data[8]).append(", ").append(data[12]).append(" ],\n");
builder.append("[ ").append(data[1]).append(", ").append(data[5]).append(", ").append(data[9]).append(", ").append(data[13]).append(" ],\n");
builder.append("[ ").append(data[2]).append(", ").append(data[6]).append(", ").append(data[10]).append(", ").append(data[14]).append(" ],\n");
builder.append("[ ").append(data[3]).append(", ").append(data[7]).append(", ").append(data[11]).append(", ").append(data[15]).append(" ];");
} else {
builder.append("[ ").append(data[0]).append(", ").append(data[4]).append(", ").append(data[8]).append(", ").append(data[12]).append(" ], ");
builder.append("[ ").append(data[1]).append(", ").append(data[5]).append(", ").append(data[9]).append(", ").append(data[13]).append(" ], ");
builder.append("[ ").append(data[2]).append(", ").append(data[6]).append(", ").append(data[10]).append(", ").append(data[14]).append(" ], ");
builder.append("[ ").append(data[3]).append(", ").append(data[7]).append(", ").append(data[11]).append(", ").append(data[15]).append(" ];");
}
return builder.toString();
}
public String toFormattedString() {
return this.toFormattedString(4, true);
}
public String toFormattedString(int precision) {
return this.toFormattedString(precision, true);
}
public String toFormattedString(boolean newlineAfterEachRow) {
return this.toFormattedString(4, newlineAfterEachRow);
}
public String toFormattedString(int precision, boolean newlineAfterEachRow) {
DecimalFormat formatter = new DecimalFormat(String.format("%1$-" + (precision + 2) + "s", "0.").replace(' ', '0'));
String[] strings = new String[] {
formatter.format(data[0]), formatter.format(data[4]), formatter.format(data[8]), formatter.format(data[12]),//
formatter.format(data[1]), formatter.format(data[5]), formatter.format(data[9]), formatter.format(data[13]),//
formatter.format(data[2]), formatter.format(data[6]), formatter.format(data[10]), formatter.format(data[14]),//
formatter.format(data[3]), formatter.format(data[7]), formatter.format(data[11]), formatter.format(data[15])
};
StringBuilder builder = new StringBuilder(46 + (3 + precision) * COMPONENT_COUNT);
if (newlineAfterEachRow) {
int longestStringLength = 3 + precision;
for (String string : strings) {
longestStringLength = Math.max(longestStringLength, string.length());
}
int lineWidth = 2 + longestStringLength;
builder.append(String.format("%1$-" + (lineWidth - strings[0].length()) + "s", "[ "));
builder.append(strings[0]).append(String.format("%1$-" + (lineWidth - strings[1].length()) + "s", ", "));
builder.append(strings[1]).append(String.format("%1$-" + (lineWidth - strings[2].length()) + "s", ", "));
builder.append(strings[2]).append(String.format("%1$-" + (lineWidth - strings[3].length()) + "s", ", "));
builder.append(strings[3]).append(" ],\n");
builder.append(String.format("%1$-" + (lineWidth - strings[4].length()) + "s", "["));
builder.append(strings[4]).append(String.format("%1$-" + (lineWidth - strings[5].length()) + "s", ","));
builder.append(strings[5]).append(String.format("%1$-" + (lineWidth - strings[6].length()) + "s", ","));
builder.append(strings[6]).append(String.format("%1$-" + (lineWidth - strings[7].length()) + "s", ","));
builder.append(strings[7]).append(" ],\n");
builder.append(String.format("%1$-" + (lineWidth - strings[8].length()) + "s", "["));
builder.append(strings[8]).append(String.format("%1$-" + (lineWidth - strings[9].length()) + "s", ","));
builder.append(strings[9]).append(String.format("%1$-" + (lineWidth - strings[10].length()) + "s", ","));
builder.append(strings[10]).append(String.format("%1$-" + (lineWidth - strings[11].length()) + "s", ","));
builder.append(strings[11]).append(" ],\n");
builder.append(String.format("%1$-" + (lineWidth - strings[12].length()) + "s", "["));
builder.append(strings[12]).append(String.format("%1$-" + (lineWidth - strings[13].length()) + "s", ","));
builder.append(strings[13]).append(String.format("%1$-" + (lineWidth - strings[14].length()) + "s", ","));
builder.append(strings[14]).append(String.format("%1$-" + (lineWidth - strings[15].length()) + "s", ","));
builder.append(strings[15]).append(" ];\n");
} else {
builder.append("[ ").append(strings[0]).append(", ").append(strings[1]).append(", ").append(strings[2]).append(", ").append(strings[3]).append(" ], ");
builder.append("[ ").append(strings[4]).append(", ").append(strings[5]).append(", ").append(strings[6]).append(", ").append(strings[7]).append(" ], ");
builder.append("[ ").append(strings[8]).append(", ").append(strings[9]).append(", ").append(strings[10]).append(", ").append(strings[11]).append(" ], ");
builder.append("[ ").append(strings[12]).append(", ").append(strings[13]).append(", ").append(strings[14]).append(", ").append(strings[15]).append(" ]; ");
}
return builder.toString();
}
public float[] getData() {
return this.data;
}
public float getData(int i) {
return this.data[i];
}
public float getData(int row, int column) {
return this.data[row + column * HEIGHT];
}
public Vector4f getRow(int row) {
return new Vector4f(data[row + 0 * HEIGHT], data[row + 1 * HEIGHT], data[row + 2 * HEIGHT], data[row + 3 * HEIGHT]);
}
public Vector4f getColumn(int column) {
return new Vector4f(data[0 + column * HEIGHT], data[1 + column * HEIGHT], data[2 + column * HEIGHT], data[3 + column * HEIGHT]);
}
public Matrix4f setData(float[] values) {
this.data = values;
return this;
}
public Matrix4f setData(int i, float value) {
this.data[i] = value;
return this;
}
public Matrix4f setData(int row, int column, float value) {
this.data[row + column * HEIGHT] = value;
return this;
}
public Matrix4f setRow(int i, Vector4f value) {
this.data[i + 0 * HEIGHT] = value.getX();
this.data[i + 1 * HEIGHT] = value.getY();
this.data[i + 2 * HEIGHT] = value.getZ();
this.data[i + 3 * HEIGHT] = value.getW();
return this;
}
public Matrix4f setColumn(int i, Vector4f value) {
this.data[0 + i * HEIGHT] = value.getX();
this.data[1 + i * HEIGHT] = value.getY();
this.data[2 + i * HEIGHT] = value.getZ();
this.data[3 + i * HEIGHT] = value.getW();
return this;
}
public static Matrix4f Identity() {
Matrix4f result = new Matrix4f();
result.data[0] = 1;
result.data[1] = 0;
result.data[2] = 0;
result.data[3] = 0;
result.data[4] = 0;
result.data[5] = 1;
result.data[6] = 0;
result.data[7] = 0;
result.data[8] = 0;
result.data[9] = 0;
result.data[10] = 1;
result.data[11] = 0;
result.data[12] = 0;
result.data[13] = 0;
result.data[14] = 0;
result.data[15] = 1;
return result;
}
public static Matrix4f Translation(Vector3f translation) {
return Matrix4f.Translation(translation.getX(), translation.getY(), translation.getZ());
}
public static Matrix4f Translation(float x, float y, float z) {
Matrix4f result = new Matrix4f();
result.data[0] = 1;
result.data[1] = 0;
result.data[2] = 0;
result.data[3] = 0;
result.data[4] = 0;
result.data[5] = 1;
result.data[6] = 0;
result.data[7] = 0;
result.data[8] = 0;
result.data[9] = 0;
result.data[10] = 1;
result.data[11] = 0;
result.data[12] = x;
result.data[13] = y;
result.data[14] = z;
result.data[15] = 1;
return result;
}
public static Matrix4f Rotation(Quaternion quaternion) {
Vector3f forward = new Vector3f(2.0f * (quaternion.getX() * quaternion.getZ() - quaternion.getW() * quaternion.getY()),
2.0f * (quaternion.getY() * quaternion.getZ() + quaternion.getW() * quaternion.getX()), 1.0f - 2.0f * (quaternion.getX() * quaternion.getX() + quaternion.getY()
* quaternion.getY()));
Vector3f up = new Vector3f(2.0f * (quaternion.getX() * quaternion.getY() + quaternion.getW() * quaternion.getZ()),
1.0f - 2.0f * (quaternion.getX() * quaternion.getX() + quaternion.getZ() * quaternion.getZ()), 2.0f * (quaternion.getY() * quaternion.getZ() - quaternion.getW()
* quaternion.getX()));
Vector3f right = new Vector3f(1.0f - 2.0f * (quaternion.getY() * quaternion.getY() + quaternion.getZ() * quaternion.getZ()),
2.0f * (quaternion.getX() * quaternion.getY() - quaternion.getW() * quaternion.getZ()), 2.0f * (quaternion.getX() * quaternion.getZ() + quaternion.getW()
* quaternion.getY()));
return Matrix4f.Rotation(forward, up, right);
}
public static Matrix4f Rotation(float x, float y, float z) {
Matrix4f result = new Matrix4f();
Matrix4f rx = new Matrix4f();
Matrix4f ry = new Matrix4f();
Matrix4f rz = new Matrix4f();
x = (float) Math.toRadians(x);
y = (float) Math.toRadians(y);
z = (float) Math.toRadians(z);
// Rotation around the Z axis
rz.data[0] = (float) Math.cos(z);
rz.data[1] = (float) Math.sin(z);
rz.data[2] = 0;
rz.data[3] = 0;
rz.data[4] = (float) -Math.sin(z);
rz.data[5] = (float) Math.cos(z);
rz.data[6] = 0;
rz.data[7] = 0;
rz.data[8] = 0;
rz.data[9] = 0;
rz.data[10] = 1;
rz.data[11] = 0;
rz.data[12] = 0;
rz.data[13] = 0;
rz.data[14] = 0;
rz.data[15] = 1;
// Rotation around the X axis
rx.data[0] = 1;
rx.data[1] = 0;
rx.data[2] = 0;
rx.data[3] = 0;
rx.data[4] = 0;
rx.data[5] = (float) Math.cos(x);
rx.data[6] = (float) Math.sin(x);
rx.data[7] = 0;
rx.data[8] = 0;
rx.data[9] = (float) -Math.sin(x);
rx.data[10] = (float) Math.cos(x);
rx.data[11] = 0;
rx.data[12] = 0;
rx.data[13] = 0;
rx.data[14] = 0;
rx.data[15] = 1;
// Rotation around the Y axis
ry.data[0] = (float) Math.cos(y);
ry.data[1] = 0;
ry.data[2] = (float) Math.sin(y);
ry.data[3] = 0;
ry.data[4] = 0;
ry.data[5] = 1;
ry.data[6] = 0;
ry.data[7] = 0;
ry.data[8] = (float) -Math.sin(y);
ry.data[9] = 0;
ry.data[10] = (float) Math.cos(y);
ry.data[11] = 0;
ry.data[12] = 0;
ry.data[13] = 0;
ry.data[14] = 0;
ry.data[15] = 1;
result = rz.multiply(ry.multiply(rx));
return result;
}
public static Matrix4f Rotation(Vector3f forward, Vector3f upward) {
Vector3f zAxis = Vector3f.normalize(forward);
Vector3f xAxis = Vector3f.normalize(upward).cross(zAxis);
Vector3f yAxis = Vector3f.cross(zAxis, xAxis).normalize();
return Matrix4f.Rotation(zAxis, yAxis, xAxis);
}
public static Matrix4f Rotation(Vector3f forward, Vector3f upward, Vector3f right) {
Matrix4f result = new Matrix4f();
Vector3f xAxis = Vector3f.normalize(right);
Vector3f yAxis = Vector3f.normalize(upward);
Vector3f zAxis = Vector3f.normalize(forward);
result.data[0] = xAxis.getX();
result.data[1] = yAxis.getX();
result.data[2] = zAxis.getX();
result.data[3] = 0;
result.data[4] = xAxis.getY();
result.data[5] = yAxis.getY();
result.data[6] = zAxis.getY();
result.data[7] = 0;
result.data[8] = xAxis.getZ();
result.data[9] = yAxis.getZ();
result.data[10] = zAxis.getZ();
result.data[11] = 0;
result.data[12] = 0;
result.data[13] = 0;
result.data[14] = 0;
result.data[15] = 1;
return result;
}
public static Matrix4f ViewRotation(Vector3f forward, Vector3f upward, Vector3f right) {
Matrix4f result = new Matrix4f();
Vector3f xAxis = Vector3f.normalize(right);
Vector3f yAxis = Vector3f.normalize(upward);
Vector3f zAxis = Vector3f.normalize(forward);
result.data[0] = xAxis.getX();
result.data[1] = xAxis.getY();
result.data[2] = xAxis.getZ();
result.data[3] = 0;
result.data[4] = yAxis.getX();
result.data[5] = yAxis.getY();
result.data[6] = yAxis.getZ();
result.data[7] = 0;
result.data[8] = zAxis.getX();
result.data[9] = zAxis.getY();
result.data[10] = zAxis.getZ();
result.data[11] = 0;
result.data[12] = 0;
result.data[13] = 0;
result.data[14] = 0;
result.data[15] = 1;
return result;
}
public static Matrix4f Scale(float x, float y, float z) {
Matrix4f result = new Matrix4f();
result.data[0] = x;
result.data[1] = 0;
result.data[2] = 0;
result.data[3] = 0;
result.data[4] = 0;
result.data[5] = y;
result.data[6] = 0;
result.data[7] = 0;
result.data[8] = 0;
result.data[9] = 0;
result.data[10] = z;
result.data[11] = 0;
result.data[12] = 0;
result.data[13] = 0;
result.data[14] = 0;
result.data[15] = 1;
return result;
}
public static Matrix4f Projection(float left, float right, float bottom, float top, float near, float far) {
Matrix4f result = new Matrix4f();
float xRange = right - left;
float yRange = top - bottom;
float zRange = far - near;
result.data[0] = 2.0f / xRange;
result.data[1] = 0;
result.data[2] = 0;
result.data[3] = -((right + left) / xRange);
result.data[4] = 0;
result.data[5] = 2.0f / yRange;
result.data[6] = 0;
result.data[7] = -((top + bottom) / yRange);
result.data[8] = 0;
result.data[9] = 0;
result.data[10] = -2.0f / zRange;
result.data[11] = -((far + near) / zRange);
result.data[12] = 0;
result.data[13] = 0;
result.data[14] = 0;
result.data[15] = 1;
return result;
}
public static Matrix4f Projection(float fov, float width, float height, float zNear, float zFar) {
Matrix4f result = new Matrix4f();
float aspect = width / height;
float tanHalfFOV = (float) Math.tan(Math.toRadians(fov / 2.0f));
float zRange = zNear - zFar;
result.data[0] = 1.0f / (tanHalfFOV * aspect);
result.data[1] = 0;
result.data[2] = 0;
result.data[3] = 0;
result.data[4] = 0;
result.data[5] = 1.0f / tanHalfFOV;
result.data[6] = 0;
result.data[7] = 0;
result.data[8] = 0;
result.data[9] = 0;
result.data[10] = (-zNear - zFar) / zRange;
result.data[11] = 1;
result.data[12] = 0;
result.data[13] = 0;
result.data[14] = (2.0f * zFar * zNear) / zRange;
result.data[15] = 0;
return result;
}
}
| apache-2.0 |
fhueske/flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/TypeInformationAnyType.java | 4843 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.types.logical;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.table.api.TableException;
import org.apache.flink.util.Preconditions;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Set;
/**
* Placeholder type of an arbitrary serialized type backed by {@link TypeInformation}. This type is
* a black box within the table ecosystem and is only deserialized at the edges. The any type is an
* extension to the SQL standard.
*
* <p>Compared to an {@link AnyType}, this type does not contain a {@link TypeSerializer} yet. The
* serializer will be generated from the enclosed {@link TypeInformation} but needs access to the
* {@link ExecutionConfig} of the current execution environment. Thus, this type is just a placeholder
* for the fully resolved {@link AnyType} returned by {@link #resolve(ExecutionConfig)}.
*
* <p>This type has no serializable string representation.
*
* <p>If no type information is supplied, generic type serialization for {@link Object} is used.
*/
@PublicEvolving
public final class TypeInformationAnyType<T> extends LogicalType {
private static final String FORMAT = "ANY('%s', ?)";
private static final Set<String> INPUT_OUTPUT_CONVERSION = conversionSet(
byte[].class.getName(),
"org.apache.flink.table.dataformat.BinaryGeneric");
private static final TypeInformation<?> DEFAULT_TYPE_INFO = Types.GENERIC(Object.class);
private final TypeInformation<T> typeInfo;
public TypeInformationAnyType(boolean isNullable, TypeInformation<T> typeInfo) {
super(isNullable, LogicalTypeRoot.ANY);
this.typeInfo = Preconditions.checkNotNull(typeInfo, "Type information must not be null.");
}
public TypeInformationAnyType(TypeInformation<T> typeInfo) {
this(true, typeInfo);
}
@SuppressWarnings("unchecked")
public TypeInformationAnyType() {
this(true, (TypeInformation<T>) DEFAULT_TYPE_INFO);
}
public TypeInformation<T> getTypeInformation() {
return typeInfo;
}
@Internal
public AnyType<T> resolve(ExecutionConfig config) {
return new AnyType<>(isNullable(), typeInfo.getTypeClass(), typeInfo.createSerializer(config));
}
@Override
public LogicalType copy(boolean isNullable) {
return new TypeInformationAnyType<>(isNullable, typeInfo); // we must assume immutability here
}
@Override
public String asSummaryString() {
return withNullability(FORMAT, typeInfo.getTypeClass().getName());
}
@Override
public String asSerializableString() {
throw new TableException(
"An any type backed by type information has no serializable string representation. It " +
"needs to be resolved into a proper any type.");
}
@Override
public boolean supportsInputConversion(Class<?> clazz) {
return typeInfo.getTypeClass().isAssignableFrom(clazz) ||
INPUT_OUTPUT_CONVERSION.contains(clazz.getName());
}
@Override
public boolean supportsOutputConversion(Class<?> clazz) {
return clazz.isAssignableFrom(typeInfo.getTypeClass()) ||
INPUT_OUTPUT_CONVERSION.contains(clazz.getName());
}
@Override
public Class<?> getDefaultConversion() {
return typeInfo.getTypeClass();
}
@Override
public List<LogicalType> getChildren() {
return Collections.emptyList();
}
@Override
public <R> R accept(LogicalTypeVisitor<R> visitor) {
return visitor.visit(this);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
TypeInformationAnyType<?> that = (TypeInformationAnyType<?>) o;
return typeInfo.equals(that.typeInfo);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), typeInfo);
}
}
| apache-2.0 |
DozerMapper/dozer | core/src/test/java/com/github/dozermapper/core/vo/LoopObjectChild.java | 1234 | /*
* Copyright 2005-2019 Dozer Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dozermapper.core.vo;
public class LoopObjectChild extends BaseTestObject {
private LoopObjectParent parent;
public LoopObjectParent getParent() {
return parent;
}
public void setParent(LoopObjectParent parent) {
this.parent = parent;
}
@Override
public int hashCode() {
return System.identityHashCode(this);
}
@Override
public String toString() {
return this.getClass().getName() + "@" + Integer.toHexString(hashCode());
}
@Override
public boolean equals(Object obj) {
return hashCode() == obj.hashCode();
}
}
| apache-2.0 |
gocd/gocd | api/api-internal-pipeline-groups-v1/src/main/java/com/thoughtworks/go/apiv1/internalpipelinegroups/InternalPipelineGroupsControllerV1.java | 4589 | /*
* Copyright 2022 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.apiv1.internalpipelinegroups;
import com.google.common.collect.ImmutableMap;
import com.thoughtworks.go.api.ApiController;
import com.thoughtworks.go.api.ApiVersion;
import com.thoughtworks.go.api.spring.ApiAuthenticationHelper;
import com.thoughtworks.go.api.util.HaltApiResponses;
import com.thoughtworks.go.apiv1.internalpipelinegroups.models.PipelineGroupsViewModel;
import com.thoughtworks.go.apiv1.internalpipelinegroups.representers.InternalPipelineGroupsRepresenter;
import com.thoughtworks.go.config.EnvironmentsConfig;
import com.thoughtworks.go.domain.PipelineGroups;
import com.thoughtworks.go.server.service.EnvironmentConfigService;
import com.thoughtworks.go.server.service.PipelineConfigService;
import com.thoughtworks.go.spark.Routes;
import com.thoughtworks.go.spark.spring.SparkSpringController;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import spark.Request;
import spark.Response;
import java.io.IOException;
import java.util.Map;
import java.util.function.Supplier;
import static spark.Spark.*;
/**
* Internal PipelineGroups API, to be used on the environments SPA while editing pipelines.
*/
@Component
public class InternalPipelineGroupsControllerV1 extends ApiController implements SparkSpringController {
private final ApiAuthenticationHelper apiAuthenticationHelper;
private final Map<String, Supplier<PipelineGroups>> pipelineGroupAuthorizationRegistry;
private final EnvironmentConfigService environmentConfigService;
@Autowired
public InternalPipelineGroupsControllerV1(ApiAuthenticationHelper apiAuthenticationHelper,
PipelineConfigService pipelineConfigService,
EnvironmentConfigService environmentConfigService) {
super(ApiVersion.v1);
this.apiAuthenticationHelper = apiAuthenticationHelper;
this.pipelineGroupAuthorizationRegistry = ImmutableMap.<String, Supplier<PipelineGroups>>builder()
.put("view", () -> pipelineConfigService.viewableGroupsForUserIncludingConfigRepos(currentUsername()))
.put("operate", () -> pipelineConfigService.viewableOrOperatableGroupsForIncludingConfigRepos(currentUsername()))
.put("administer", () -> pipelineConfigService.adminGroupsForIncludingConfigRepos(currentUsername()))
.build();
this.environmentConfigService = environmentConfigService;
}
@Override
public String controllerBasePath() {
return Routes.InternalPipelineGroups.BASE;
}
@Override
public void setupRoutes() {
path(controllerBasePath(), () -> {
before("", mimeType, this::setContentType);
before("/*", mimeType, this::setContentType);
before("", this.mimeType, this.apiAuthenticationHelper::checkUserAnd403);
before("/*", this.mimeType, this.apiAuthenticationHelper::checkUserAnd403);
get("", mimeType, this::index);
});
}
public String index(Request request, Response response) throws IOException {
String pipelineGroupAuthorizationType = request.queryParamOrDefault("pipeline_group_authorization", "view");
Supplier<PipelineGroups> pipelineGroupsSupplier = pipelineGroupAuthorizationRegistry.get(pipelineGroupAuthorizationType);
if (pipelineGroupsSupplier == null) {
HaltApiResponses.haltBecauseOfReason("Bad query parameter.");
}
EnvironmentsConfig environments = new EnvironmentsConfig();
environments.addAll(environmentConfigService.getEnvironments());
PipelineGroupsViewModel pipelineGroupsViewModel = new PipelineGroupsViewModel(pipelineGroupsSupplier.get(), environments);
return writerForTopLevelObject(request, response, outputWriter -> InternalPipelineGroupsRepresenter.toJSON(outputWriter, pipelineGroupsViewModel));
}
}
| apache-2.0 |
apache/incubator-shardingsphere | shardingsphere-proxy/shardingsphere-proxy-bootstrap/src/test/java/org/apache/shardingsphere/proxy/fixture/YamlRuleConfigurationSwapperFixture.java | 1853 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shardingsphere.proxy.fixture;
import org.apache.shardingsphere.infra.yaml.config.swapper.YamlRuleConfigurationSwapper;
public final class YamlRuleConfigurationSwapperFixture implements YamlRuleConfigurationSwapper<YamlRuleConfigurationFixture, RuleConfigurationFixture> {
@Override
public Class<RuleConfigurationFixture> getTypeClass() {
return RuleConfigurationFixture.class;
}
@Override
public YamlRuleConfigurationFixture swapToYamlConfiguration(final RuleConfigurationFixture data) {
YamlRuleConfigurationFixture result = new YamlRuleConfigurationFixture();
result.setName(data.getName());
return result;
}
@Override
public RuleConfigurationFixture swapToObject(final YamlRuleConfigurationFixture yamlConfig) {
return new RuleConfigurationFixture(yamlConfig.getName());
}
@Override
public String getRuleTagName() {
return "FIXTURE";
}
@Override
public int getOrder() {
return -1;
}
}
| apache-2.0 |
nuan-nuan/sync-android | cloudant-sync-datastore-core/src/main/java/com/cloudant/sync/datastore/DocumentRevision.java | 2199 | /**
* Original iOS version by Jens Alfke, ported to Android by Marty Schoch
* Copyright (c) 2012 Couchbase, Inc. All rights reserved.
*
* Modifications for this distribution by Cloudant, Inc., Copyright (c) 2013 Cloudant, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.cloudant.sync.datastore;
import java.util.Map;
/**
* <p>A single revision of a document within a datastore.</p>
*
* <p>Documents within the datastore are in fact trees of document revisions,
* with one document marked as the current winner at any point. Branches in
* the tree are caused when a document is edited in more than one place before
* being replicated between datastores. The consuming application is responsible
* for finding active branches (also called conflicts), and marking the leaf
* nodes of all branches but one deleted (thereby resolving the conflict).</p>
*
* <p>A {@code DocumentRevision} contains all the information for a single document
* revision, including its ID and revision ID, along with the document's
* content for this revision as a {@link DocumentBody} object. Clients will
* typically set only the revision content rather than the metadata
* explicitly.</p>
*/
public interface DocumentRevision {
/**
* @return the unique identifier of the document
*/
String getId();
/**
* @return the revision ID of this document revision
*/
String getRevision();
/**
* @return the {@code DocumentBody} of the document
*/
DocumentBody getBody();
// NB the key is purely for the user's convenience and doesn't have to be the same as the attachment name
Map<String, Attachment> getAttachments();
}
| apache-2.0 |
PathVisio/biopax-plugin | test/org/pathvisio/biopax3/bots/WikipathwaysBiopaxValidator.java | 5691 | package org.pathvisio.biopax3.bots;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.rmi.RemoteException;
import java.util.Date;
import java.util.HashSet;
import java.util.Set;
import javax.xml.bind.JAXBException;
import javax.xml.rpc.ServiceException;
import org.biopax.paxtools.client.BiopaxValidatorClient;
import org.biopax.paxtools.client.BiopaxValidatorClient.RetFormat;
import org.biopax.validator.jaxb.Behavior;
import org.biopax.validator.jaxb.ErrorCaseType;
import org.biopax.validator.jaxb.ErrorType;
import org.biopax.validator.jaxb.Validation;
import org.biopax.validator.jaxb.ValidatorResponse;
import org.bridgedb.bio.Organism;
import org.pathvisio.biopax3.BiopaxFormat;
import org.pathvisio.core.model.ConverterException;
import org.pathvisio.core.model.Pathway;
import org.pathvisio.wikipathways.webservice.WSCurationTag;
import org.pathvisio.wikipathways.webservice.WSPathway;
import org.pathvisio.wikipathways.webservice.WSPathwayInfo;
import org.wikipathways.client.WikiPathwaysClient;
/**
* This script reads a pathway from WikiPathways, converts it to BioPAX (locally), and
* then submits it to the BioPAX validator webservice.
*/
public class WikipathwaysBiopaxValidator
{
public static void main(String [] args) throws ConverterException, ServiceException, IOException, JAXBException
{
if (args.length == 1)
{
new WikipathwaysBiopaxValidator().runOne(args[0]);
}
else
{
new WikipathwaysBiopaxValidator().run();
}
}
private void runOne(String id) throws ConverterException, IOException, JAXBException
{
results = new ValidationResultSet();
checkPathway (id);
}
private Set<String> getAllIds() throws RemoteException
{
Set<String> result = new HashSet<String>();
for (WSPathwayInfo info : client.listPathways(Organism.HomoSapiens))
{
// only use human for now
if ("Homo sapiens".equals(info.getSpecies())) {
WSCurationTag [] tags = client.getCurationTags(info.getId());
for(WSCurationTag tag : tags) {
if(tag.getName().equals("Curation:AnalysisCollection")) {
System.out.println(info.getId());
result.add(info.getId());
}
}
}
}
System.out.println(result.size());
return result;
}
private Set<String> needsUpdate(Date cutoff) throws RemoteException
{
Set<String> result = new HashSet<String>();
for (WSPathwayInfo info : client.getRecentChanges(cutoff))
{
result.add(info.getId());
}
return result;
}
private void writeReport(long date) throws IOException
{
FileOutputStream str = new FileOutputStream (new File ("biopaxreport_" + date + ".html"));
results.printHtmlOverview(new PrintStream(str));
str.close();
}
private void run() throws ConverterException, ServiceException, IOException, JAXBException
{
Date now = new Date(); // store date before start of run, to account for changes during the run.
File resultsStoreFile = new File ("validator.objectstore_" + now.getTime());
results = ValidationResultSet.readOrCreate(resultsStoreFile);
try
{
// Date now = new Date(); // store date before start of run, to account for changes during the run.
Date lastChangeDate = results.previousRunDate;
Set<String> alreadyDone = results.getIds();
if (lastChangeDate != null)
alreadyDone.remove(needsUpdate(lastChangeDate));
//create list of pathways to update
Set<String> todo = new HashSet<String>();
todo = getAllIds();
todo.removeAll(alreadyDone);
// todo.add ("WP15");
System.out.println ("Already done: " + alreadyDone.size());
System.out.println ("TODO: " + todo.size() + " pathways\n" + todo);
//WP157: glycolysis M. musculus
for (String id : todo)
{
checkPathway (id);
results.store(resultsStoreFile);
writeReport(now.getTime());
}
results.previousRunDate = now;
results.store(resultsStoreFile);
}
finally
{
writeReport(now.getTime());
}
}
final WikiPathwaysClient client;
final BiopaxFormat format;
final BiopaxValidatorClient bpValidator;
ValidationResultSet results;
public WikipathwaysBiopaxValidator() throws ServiceException, MalformedURLException
{
client = new WikiPathwaysClient(new URL("http://webservice.wikipathways.org"));
format = new BiopaxFormat();
bpValidator = new BiopaxValidatorClient();
}
private void checkPathway(String id) throws ConverterException, IOException, JAXBException
{
System.out.println ("CHECKING " + id);
WSPathway wpwy = client.getPathway(id);
Pathway pwy = WikiPathwaysClient.toPathway(wpwy);
// File tempFile = File.createTempFile("biopaxTest.", ".owl");
File tempFile = new File("output/biopaxText_" + id + ".owl");
format.doExport(tempFile, pwy);
System.out.println ("Writing to temp file: " + tempFile);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
bpValidator.validate(false, "strict",
RetFormat.XML, Behavior.ERROR,
null, null,
new File[] { tempFile }, baos);
String data = baos.toString("UTF-8"); // TODO: not sure about encoding
ValidatorResponse resp = BiopaxValidatorClient.unmarshal(data);
results.evict(id);
for (Validation v : resp.getValidation())
{
System.out.println (v.getDescription() + " " + v.getMaxErrors());
for (ErrorType e : v.getError())
{
for (ErrorCaseType c : e.getErrorCase())
{
results.record(id, wpwy.getName(), e.getCode(), c.getObject(), e.getMessage(), c.getMessage());
}
}
}
System.out.println ("DONE with " + id);
}
}
| apache-2.0 |
sleuthkit/autopsy | Core/src/org/sleuthkit/autopsy/corecomponents/DataContentPanel.java | 12536 | /*
* Autopsy Forensic Browser
*
* Copyright 2011-2018 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.corecomponents;
import java.awt.Cursor;
import java.beans.PropertyChangeEvent;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.logging.Level;
import javax.swing.JTabbedPane;
import javax.swing.SwingWorker;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import org.openide.nodes.Node;
import org.openide.util.Lookup;
import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.core.UserPreferences;
import org.sleuthkit.autopsy.corecomponentinterfaces.DataContent;
import org.sleuthkit.autopsy.corecomponentinterfaces.DataContentViewer;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.TskCoreException;
/**
* Data content panel.
*/
@SuppressWarnings("PMD.SingularField") // UI widgets cause lots of false positives
public class DataContentPanel extends javax.swing.JPanel implements DataContent, ChangeListener {
private static Logger logger = Logger.getLogger(DataContentPanel.class.getName());
private final List<UpdateWrapper> viewers = new ArrayList<>();
private Node currentNode;
private final boolean isMain;
private boolean listeningToTabbedPane = false;
private DataContentPanelWorker workerThread;
/**
* Creates new DataContentPanel panel The main data content panel can only
* be created by the data content top component, thus this constructor is
* not public.
*
* Use the createInstance factory method to create an external viewer data
* content panel.
*
*/
DataContentPanel(boolean isMain) {
this.isMain = isMain;
initComponents();
// add all implementors of DataContentViewer and put them in the tabbed pane
Collection<? extends DataContentViewer> dcvs = Lookup.getDefault().lookupAll(DataContentViewer.class);
for (DataContentViewer factory : dcvs) {
DataContentViewer dcv;
if (isMain) {
//use the instance from Lookup for the main viewer
dcv = factory;
} else {
dcv = factory.createInstance();
}
viewers.add(new UpdateWrapper(dcv));
javax.swing.JScrollPane scrollTab = new javax.swing.JScrollPane(dcv.getComponent());
scrollTab.setVerticalScrollBarPolicy(javax.swing.JScrollPane.VERTICAL_SCROLLBAR_NEVER);
jTabbedPane1.addTab(dcv.getTitle(), null,
scrollTab, dcv.getToolTip());
}
// disable the tabs
int numTabs = jTabbedPane1.getTabCount();
for (int tab = 0; tab < numTabs; ++tab) {
jTabbedPane1.setEnabledAt(tab, false);
}
}
/**
* Factory method to create an external (not main window) data content panel
* to be used in an external window
*
* @return a new instance of a data content panel
*/
public static DataContentPanel createInstance() {
return new DataContentPanel(false);
}
public JTabbedPane getTabPanels() {
return jTabbedPane1;
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jTabbedPane1 = new javax.swing.JTabbedPane();
setMinimumSize(new java.awt.Dimension(5, 5));
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
this.setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jTabbedPane1)
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jTabbedPane1)
);
}// </editor-fold>//GEN-END:initComponents
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JTabbedPane jTabbedPane1;
// End of variables declaration//GEN-END:variables
@Override
public void setNode(Node selectedNode) {
if (workerThread != null) {
workerThread.cancel(true);
workerThread = null;
}
currentNode = null;
// Reset everything
for (int index = 0; index < jTabbedPane1.getTabCount(); index++) {
jTabbedPane1.setEnabledAt(index, false);
String tabTitle = viewers.get(index).getTitle(selectedNode);
tabTitle = tabTitle == null ? "" : tabTitle;
if (!tabTitle.equals(jTabbedPane1.getTitleAt(index))) {
jTabbedPane1.setTitleAt(index, tabTitle);
}
viewers.get(index).resetComponent();
}
if (selectedNode != null) {
workerThread = new DataContentPanelWorker(selectedNode);
workerThread.execute();
}
}
/**
* Update the state of the tabs based on the given data.
*
* @param selectedNode The currently selected node.
* @param supportedIndices The indices of the tabs that are supported by
* this node type.
* @param preferredIndex The index of the tab which is preferred.
*/
private void updateTabs(Node selectedNode, List<Integer> supportedIndices, int preferredIndex) {
// Deferring becoming a listener to the tabbed pane until this point
// eliminates handling a superfluous stateChanged event during construction.
if (listeningToTabbedPane == false) {
jTabbedPane1.addChangeListener(this);
listeningToTabbedPane = true;
}
for (Integer index : supportedIndices) {
jTabbedPane1.setEnabledAt(index, true);
}
// let the user decide if we should stay with the current viewer
int tabIndex = UserPreferences.keepPreferredContentViewer() ? jTabbedPane1.getSelectedIndex() : preferredIndex;
UpdateWrapper dcv = viewers.get(tabIndex);
// this is really only needed if no tabs were enabled
if (jTabbedPane1.isEnabledAt(tabIndex) == false) {
dcv.resetComponent();
} else {
dcv.setNode(selectedNode);
}
// set the tab to the one the user wants, then set that viewer's node.
jTabbedPane1.setSelectedIndex(tabIndex);
jTabbedPane1.getSelectedComponent().repaint();
}
@Override
public void propertyChange(PropertyChangeEvent evt) {
}
@Override
public void stateChanged(ChangeEvent evt) {
JTabbedPane pane = (JTabbedPane) evt.getSource();
// Get and set current selected tab
int currentTab = pane.getSelectedIndex();
if (currentTab != -1) {
UpdateWrapper dcv = viewers.get(currentTab);
if (dcv.isOutdated() || dcv.getViewer() instanceof DataArtifactContentViewer) {
// change the cursor to "waiting cursor" for this operation
this.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR));
try {
dcv.setNode(currentNode);
} finally {
this.setCursor(null);
}
}
}
}
private static class UpdateWrapper {
private final DataContentViewer wrapped;
private boolean outdated;
UpdateWrapper(DataContentViewer wrapped) {
this.wrapped = wrapped;
this.outdated = true;
}
void setNode(Node selectedNode) {
this.wrapped.setNode(selectedNode);
this.outdated = false;
}
void resetComponent() {
this.wrapped.resetComponent();
this.outdated = true;
}
boolean isOutdated() {
return this.outdated;
}
boolean isSupported(Node node) {
return this.wrapped.isSupported(node);
}
int isPreferred(Node node) {
return this.wrapped.isPreferred(node);
}
String getTitle(Node node) {
return this.wrapped.getTitle(node);
}
DataContentViewer getViewer() {
return wrapped;
}
}
/**
* SwingWorker class to determine which tabs should be enabled for the given
* node.
*/
private class DataContentPanelWorker extends SwingWorker<WorkerResults, Void> {
private final Node node;
/**
* Worker constructor.
*
* @param node
*/
DataContentPanelWorker(Node node) {
this.node = node;
}
@Override
protected WorkerResults doInBackground() throws Exception {
List<Integer> supportedViewers = new ArrayList<>();
int preferredViewerIndex = 0;
int maxPreferred = 0;
for (int index = 0; index < viewers.size(); index++) {
UpdateWrapper dcv = viewers.get(index);
if (dcv.isSupported(node)) {
supportedViewers.add(index);
int currentPreferred = dcv.isPreferred(node);
if (currentPreferred > maxPreferred) {
preferredViewerIndex = index;
maxPreferred = currentPreferred;
}
}
if (this.isCancelled()) {
return null;
}
}
return new WorkerResults(node, supportedViewers, preferredViewerIndex);
}
@Override
protected void done() {
// Do nothing if the thread was cancelled.
if (isCancelled()) {
return;
}
try {
WorkerResults results = get();
currentNode = node;
if (results != null) {
updateTabs(results.getNode(), results.getSupportedIndices(), results.getPreferredViewerIndex());
}
} catch (InterruptedException | ExecutionException ex) {
logger.log(Level.SEVERE, "Failed to updated data content panel for node " + node.getName(), ex);
} finally {
setCursor(Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR));
}
}
}
/**
* Utility class to store all of the data the SwingWorker collected.
*/
private class WorkerResults {
private final Node node;
private final List<Integer> supportedViewerIndices;
private final int preferredViewerIndex;
WorkerResults(Node node, List<Integer> supportedViewerIndices, int preferredViewerIndex) {
this.node = node;
this.supportedViewerIndices = supportedViewerIndices;
this.preferredViewerIndex = preferredViewerIndex;
}
/**
* Returns the selected node.
*
* @return
*/
Node getNode() {
return node;
}
/**
* A list of tab indices that are supported by this node type.
*
* @return A list of indices.
*/
List<Integer> getSupportedIndices() {
return supportedViewerIndices;
}
/**
* Returns the preferred tab index for the given node type.
*
* @return A valid tab index.
*/
int getPreferredViewerIndex() {
return preferredViewerIndex;
}
}
}
| apache-2.0 |
xorware/android_frameworks_base | packages/SystemUI/src/com/android/systemui/tuner/KeycodeSelectionHelper.java | 3074 | /*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.android.systemui.tuner;
import android.app.AlertDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.view.KeyEvent;
import com.android.systemui.R;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
public class KeycodeSelectionHelper {
private static final ArrayList<String> mKeycodeStrings = new ArrayList<>();
private static final ArrayList<Integer> mKeycodes = new ArrayList<>();
private static final String KEYCODE_STRING = "KEYCODE_";
static {
Class<KeyEvent> cls = KeyEvent.class;
for (Field field : cls.getDeclaredFields()) {
if (Modifier.isStatic(field.getModifiers())
&& field.getName().startsWith(KEYCODE_STRING)
&& field.getType().equals(int.class)) {
try {
mKeycodeStrings.add(formatString(field.getName()));
mKeycodes.add((Integer) field.get(null));
} catch (IllegalAccessException e) {
}
}
}
}
// Force the string into something somewhat readable.
private static String formatString(String name) {
StringBuilder str = new StringBuilder(name.replace(KEYCODE_STRING, "").replace("_", " ")
.toLowerCase());
for (int i = 0; i < str.length(); i++) {
if (i == 0 || str.charAt(i - 1) == ' ') {
str.setCharAt(i, Character.toUpperCase(str.charAt(i)));
}
}
return str.toString();
}
public static void showKeycodeSelect(Context context, final OnSelectionComplete listener) {
new AlertDialog.Builder(context)
.setTitle(R.string.select_keycode)
.setItems(mKeycodeStrings.toArray(new String[0]),
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
listener.onSelectionComplete(mKeycodes.get(which));
}
}).show();
}
public static Intent getSelectImageIntent() {
return new Intent(Intent.ACTION_OPEN_DOCUMENT).addCategory(Intent.CATEGORY_OPENABLE)
.setType("image/*");
}
public interface OnSelectionComplete {
void onSelectionComplete(int code);
}
}
| apache-2.0 |
kromulan/ignite | modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java | 112287 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.managers.discovery;
import java.io.Externalizable;
import java.io.Serializable;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import java.lang.management.OperatingSystemMXBean;
import java.lang.management.RuntimeMXBean;
import java.lang.management.ThreadMXBean;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.zip.CRC32;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteClientDisconnectedException;
import org.apache.ignite.IgniteException;
import org.apache.ignite.IgniteInterruptedException;
import org.apache.ignite.cache.CacheMetrics;
import org.apache.ignite.cache.CacheMode;
import org.apache.ignite.cluster.ClusterMetrics;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.events.DiscoveryEvent;
import org.apache.ignite.events.Event;
import org.apache.ignite.internal.ClusterMetricsSnapshot;
import org.apache.ignite.internal.GridComponent;
import org.apache.ignite.internal.GridKernalContext;
import org.apache.ignite.internal.GridNodeOrderComparator;
import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.IgniteKernal;
import org.apache.ignite.internal.IgniteNodeAttributes;
import org.apache.ignite.internal.events.DiscoveryCustomEvent;
import org.apache.ignite.internal.managers.GridManagerAdapter;
import org.apache.ignite.internal.managers.communication.GridIoManager;
import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
import org.apache.ignite.internal.processors.cache.CacheAffinitySharedManager;
import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
import org.apache.ignite.internal.processors.jobmetrics.GridJobMetrics;
import org.apache.ignite.internal.processors.security.SecurityContext;
import org.apache.ignite.internal.processors.service.GridServiceProcessor;
import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor;
import org.apache.ignite.internal.util.F0;
import org.apache.ignite.internal.util.GridBoundedConcurrentOrderedMap;
import org.apache.ignite.internal.util.GridSpinBusyLock;
import org.apache.ignite.internal.util.future.GridFinishedFuture;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
import org.apache.ignite.internal.util.lang.GridTuple5;
import org.apache.ignite.internal.util.tostring.GridToStringExclude;
import org.apache.ignite.internal.util.tostring.GridToStringInclude;
import org.apache.ignite.internal.util.typedef.C1;
import org.apache.ignite.internal.util.typedef.CI1;
import org.apache.ignite.internal.util.typedef.F;
import org.apache.ignite.internal.util.typedef.G;
import org.apache.ignite.internal.util.typedef.P1;
import org.apache.ignite.internal.util.typedef.internal.CU;
import org.apache.ignite.internal.util.typedef.internal.LT;
import org.apache.ignite.internal.util.typedef.internal.S;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.internal.util.worker.GridWorker;
import org.apache.ignite.lang.IgniteFuture;
import org.apache.ignite.lang.IgnitePredicate;
import org.apache.ignite.lang.IgniteProductVersion;
import org.apache.ignite.lang.IgniteUuid;
import org.apache.ignite.plugin.security.SecurityCredentials;
import org.apache.ignite.plugin.segmentation.SegmentationPolicy;
import org.apache.ignite.spi.IgniteSpiException;
import org.apache.ignite.spi.discovery.DiscoveryMetricsProvider;
import org.apache.ignite.spi.discovery.DiscoverySpi;
import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage;
import org.apache.ignite.spi.discovery.DiscoverySpiDataExchange;
import org.apache.ignite.spi.discovery.DiscoverySpiHistorySupport;
import org.apache.ignite.spi.discovery.DiscoverySpiListener;
import org.apache.ignite.spi.discovery.DiscoverySpiNodeAuthenticator;
import org.apache.ignite.spi.discovery.DiscoverySpiOrderSupport;
import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode;
import org.apache.ignite.thread.IgniteThread;
import org.jetbrains.annotations.Nullable;
import org.jsr166.ConcurrentHashMap8;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.ignite.IgniteSystemProperties.IGNITE_OPTIMIZED_MARSHALLER_USE_DEFAULT_SUID;
import static org.apache.ignite.IgniteSystemProperties.IGNITE_SERVICES_COMPATIBILITY_MODE;
import static org.apache.ignite.IgniteSystemProperties.IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2;
import static org.apache.ignite.events.EventType.EVT_CLIENT_NODE_DISCONNECTED;
import static org.apache.ignite.events.EventType.EVT_CLIENT_NODE_RECONNECTED;
import static org.apache.ignite.events.EventType.EVT_NODE_FAILED;
import static org.apache.ignite.events.EventType.EVT_NODE_JOINED;
import static org.apache.ignite.events.EventType.EVT_NODE_LEFT;
import static org.apache.ignite.events.EventType.EVT_NODE_METRICS_UPDATED;
import static org.apache.ignite.events.EventType.EVT_NODE_SEGMENTED;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DEPLOYMENT_MODE;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_LATE_AFFINITY_ASSIGNMENT;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_MACS;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_MARSHALLER_USE_BINARY_STRING_SER_VER_2;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_MARSHALLER_USE_DFLT_SUID;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_PEER_CLASSLOADING;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_SERVICES_COMPATIBILITY_MODE;
import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_USER_NAME;
import static org.apache.ignite.internal.IgniteVersionUtils.VER;
import static org.apache.ignite.plugin.segmentation.SegmentationPolicy.NOOP;
/**
* Discovery SPI manager.
*/
public class GridDiscoveryManager extends GridManagerAdapter<DiscoverySpi> {
/** Fake key for {@code null}-named caches. Used inside {@link DiscoCache}. */
private static final String NULL_CACHE_NAME = UUID.randomUUID().toString();
/** Metrics update frequency. */
private static final long METRICS_UPDATE_FREQ = 3000;
/** */
private static final MemoryMXBean mem = ManagementFactory.getMemoryMXBean();
/** */
private static final OperatingSystemMXBean os = ManagementFactory.getOperatingSystemMXBean();
/** */
private static final RuntimeMXBean rt = ManagementFactory.getRuntimeMXBean();
/** */
private static final ThreadMXBean threads = ManagementFactory.getThreadMXBean();
/** */
private static final Collection<GarbageCollectorMXBean> gc = ManagementFactory.getGarbageCollectorMXBeans();
/** */
private static final String PREFIX = "Topology snapshot";
/** Discovery cached history size. */
protected static final int DISCOVERY_HISTORY_SIZE = 100;
/** Predicate filtering out daemon nodes. */
private static final IgnitePredicate<ClusterNode> FILTER_DAEMON = new P1<ClusterNode>() {
@Override public boolean apply(ClusterNode n) {
return !n.isDaemon();
}
};
/** Predicate filtering client nodes. */
private static final IgnitePredicate<ClusterNode> FILTER_CLI = new P1<ClusterNode>() {
@Override public boolean apply(ClusterNode n) {
return CU.clientNode(n);
}
};
/** Discovery event worker. */
private final DiscoveryWorker discoWrk = new DiscoveryWorker();
/** Network segment check worker. */
private SegmentCheckWorker segChkWrk;
/** Network segment check thread. */
private IgniteThread segChkThread;
/** Last logged topology. */
private final AtomicLong lastLoggedTop = new AtomicLong();
/** Local node. */
private ClusterNode locNode;
/** Local node daemon flag. */
private boolean isLocDaemon;
/** {@code True} if resolvers were configured and network segment check is enabled. */
private boolean hasRslvrs;
/** Last segment check result. */
private final AtomicBoolean lastSegChkRes = new AtomicBoolean(true);
/** Topology cache history. */
private final ConcurrentNavigableMap<AffinityTopologyVersion, DiscoCache> discoCacheHist =
new GridBoundedConcurrentOrderedMap<>(DISCOVERY_HISTORY_SIZE);
/** Topology snapshots history. */
private volatile Map<Long, Collection<ClusterNode>> topHist = new HashMap<>();
/** Topology version. */
private final AtomicReference<Snapshot> topSnap =
new AtomicReference<>(new Snapshot(AffinityTopologyVersion.ZERO, null));
/** Minor topology version. */
private int minorTopVer;
/** Order supported flag. */
private boolean discoOrdered;
/** Topology snapshots history supported flag. */
private boolean histSupported;
/** Configured network segment check frequency. */
private long segChkFreq;
/** Local node join to topology event. */
private GridFutureAdapter<DiscoveryEvent> locJoinEvt = new GridFutureAdapter<>();
/** GC CPU load. */
private volatile double gcCpuLoad;
/** CPU load. */
private volatile double cpuLoad;
/** Metrics. */
private final GridLocalMetrics metrics = createMetrics();
/** Metrics update worker. */
private GridTimeoutProcessor.CancelableTask metricsUpdateTask;
/** Custom event listener. */
private ConcurrentMap<Class<?>, List<CustomEventListener<DiscoveryCustomMessage>>> customEvtLsnrs =
new ConcurrentHashMap8<>();
/** Map of dynamic cache filters. */
private Map<String, CachePredicate> registeredCaches = new HashMap<>();
/** */
private final GridSpinBusyLock busyLock = new GridSpinBusyLock();
/** Received custom messages history. */
private final ArrayDeque<IgniteUuid> rcvdCustomMsgs = new ArrayDeque<>();
/** */
private final CountDownLatch startLatch = new CountDownLatch(1);
/** @param ctx Context. */
public GridDiscoveryManager(GridKernalContext ctx) {
super(ctx, ctx.config().getDiscoverySpi());
}
/**
* @return Memory usage of non-heap memory.
*/
private MemoryUsage nonHeapMemoryUsage() {
// Workaround of exception in WebSphere.
// We received the following exception:
// java.lang.IllegalArgumentException: used value cannot be larger than the committed value
// at java.lang.management.MemoryUsage.<init>(MemoryUsage.java:105)
// at com.ibm.lang.management.MemoryMXBeanImpl.getNonHeapMemoryUsageImpl(Native Method)
// at com.ibm.lang.management.MemoryMXBeanImpl.getNonHeapMemoryUsage(MemoryMXBeanImpl.java:143)
// at org.apache.ignite.spi.metrics.jdk.GridJdkLocalMetricsSpi.getMetrics(GridJdkLocalMetricsSpi.java:242)
//
// We so had to workaround this with exception handling, because we can not control classes from WebSphere.
try {
return mem.getNonHeapMemoryUsage();
}
catch (IllegalArgumentException ignored) {
return new MemoryUsage(0, 0, 0, 0);
}
}
/** {@inheritDoc} */
@Override public void onBeforeSpiStart() {
DiscoverySpi spi = getSpi();
spi.setNodeAttributes(ctx.nodeAttributes(), VER);
}
/**
* Adds dynamic cache filter.
*
* @param cacheName Cache name.
* @param filter Cache filter.
* @param nearEnabled Near enabled flag.
* @param cacheMode Cache mode.
*/
public void setCacheFilter(
String cacheName,
IgnitePredicate<ClusterNode> filter,
boolean nearEnabled,
CacheMode cacheMode
) {
if (!registeredCaches.containsKey(cacheName))
registeredCaches.put(cacheName, new CachePredicate(filter, nearEnabled, cacheMode));
}
/**
* Removes dynamic cache filter.
*
* @param cacheName Cache name.
*/
public void removeCacheFilter(String cacheName) {
CachePredicate p = registeredCaches.remove(cacheName);
assert p != null : cacheName;
}
/**
* Adds near node ID to cache filter.
*
* @param cacheName Cache name.
* @param clientNodeId Near node ID.
* @param nearEnabled Near enabled flag.
* @return {@code True} if new node ID was added.
*/
public boolean addClientNode(String cacheName, UUID clientNodeId, boolean nearEnabled) {
CachePredicate p = registeredCaches.get(cacheName);
assert p != null : cacheName;
return p.addClientNode(clientNodeId, nearEnabled);
}
/**
* Removes near node ID from cache filter.
*
* @param cacheName Cache name.
* @param clientNodeId Near node ID.
* @return {@code True} if existing node ID was removed.
*/
public boolean onClientCacheClose(String cacheName, UUID clientNodeId) {
CachePredicate p = registeredCaches.get(cacheName);
assert p != null : cacheName;
return p.onNodeLeft(clientNodeId);
}
/**
* @return Client nodes map.
*/
public Map<String, Map<UUID, Boolean>> clientNodesMap() {
Map<String, Map<UUID, Boolean>> res = null;
for (Map.Entry<String, CachePredicate> entry : registeredCaches.entrySet()) {
CachePredicate pred = entry.getValue();
if (!F.isEmpty(pred.clientNodes)) {
if (res == null)
res = U.newHashMap(registeredCaches.size());
res.put(entry.getKey(), new HashMap<>(pred.clientNodes));
}
}
return res;
}
/**
* @param leftNodeId Left node ID.
*/
private void updateClientNodes(UUID leftNodeId) {
for (Map.Entry<String, CachePredicate> entry : registeredCaches.entrySet()) {
CachePredicate pred = entry.getValue();
pred.onNodeLeft(leftNodeId);
}
}
/** {@inheritDoc} */
@Override protected void onKernalStart0() throws IgniteCheckedException {
if (Boolean.TRUE.equals(ctx.config().isClientMode()) && !getSpi().isClientMode())
ctx.performance().add("Enable client mode for TcpDiscoverySpi " +
"(set TcpDiscoverySpi.forceServerMode to false)");
}
/** {@inheritDoc} */
@Override public void start() throws IgniteCheckedException {
long totSysMemory = -1;
try {
totSysMemory = U.<Long>property(os, "totalPhysicalMemorySize");
}
catch (RuntimeException ignored) {
// No-op.
}
ctx.addNodeAttribute(IgniteNodeAttributes.ATTR_PHY_RAM, totSysMemory);
DiscoverySpi spi = getSpi();
discoOrdered = discoOrdered();
histSupported = historySupported();
isLocDaemon = ctx.isDaemon();
hasRslvrs = !ctx.config().isClientMode() && !F.isEmpty(ctx.config().getSegmentationResolvers());
segChkFreq = ctx.config().getSegmentCheckFrequency();
if (hasRslvrs) {
if (segChkFreq < 0)
throw new IgniteCheckedException("Segment check frequency cannot be negative: " + segChkFreq);
if (segChkFreq > 0 && segChkFreq < 2000)
U.warn(log, "Configuration parameter 'segmentCheckFrequency' is too low " +
"(at least 2000 ms recommended): " + segChkFreq);
int segResAttemp = ctx.config().getSegmentationResolveAttempts();
if (segResAttemp < 1)
throw new IgniteCheckedException(
"Segment resolve attempts cannot be negative or zero: " + segResAttemp);
checkSegmentOnStart();
}
metricsUpdateTask = ctx.timeout().schedule(new MetricsUpdater(), METRICS_UPDATE_FREQ, METRICS_UPDATE_FREQ);
spi.setMetricsProvider(createMetricsProvider());
if (ctx.security().enabled()) {
spi.setAuthenticator(new DiscoverySpiNodeAuthenticator() {
@Override public SecurityContext authenticateNode(ClusterNode node, SecurityCredentials cred) {
try {
return ctx.security().authenticateNode(node, cred);
}
catch (IgniteCheckedException e) {
throw U.convertException(e);
}
}
@Override public boolean isGlobalNodeAuthentication() {
return ctx.security().isGlobalNodeAuthentication();
}
});
}
spi.setListener(new DiscoverySpiListener() {
private long gridStartTime;
@Override public void onDiscovery(
final int type,
final long topVer,
final ClusterNode node,
final Collection<ClusterNode> topSnapshot,
final Map<Long, Collection<ClusterNode>> snapshots,
@Nullable DiscoverySpiCustomMessage spiCustomMsg
) {
DiscoveryCustomMessage customMsg = spiCustomMsg == null ? null
: ((CustomMessageWrapper)spiCustomMsg).delegate();
if (skipMessage(type, customMsg))
return;
final ClusterNode locNode = localNode();
if (snapshots != null)
topHist = snapshots;
boolean verChanged;
if (type == EVT_NODE_METRICS_UPDATED)
verChanged = false;
else {
if (type != EVT_NODE_SEGMENTED &&
type != EVT_CLIENT_NODE_DISCONNECTED &&
type != EVT_CLIENT_NODE_RECONNECTED &&
type != DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT) {
minorTopVer = 0;
verChanged = true;
}
else
verChanged = false;
}
if (type == EVT_NODE_FAILED || type == EVT_NODE_LEFT) {
for (DiscoCache c : discoCacheHist.values())
c.updateAlives(node);
updateClientNodes(node.id());
}
final AffinityTopologyVersion nextTopVer;
if (type == DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT) {
assert customMsg != null;
boolean incMinorTopVer = ctx.cache().onCustomEvent(customMsg,
new AffinityTopologyVersion(topVer, minorTopVer));
if (incMinorTopVer) {
minorTopVer++;
verChanged = true;
}
nextTopVer = new AffinityTopologyVersion(topVer, minorTopVer);
}
else {
nextTopVer = new AffinityTopologyVersion(topVer, minorTopVer);
ctx.cache().onDiscoveryEvent(type, node, nextTopVer);
}
if (type == DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT) {
for (Class cls = customMsg.getClass(); cls != null; cls = cls.getSuperclass()) {
List<CustomEventListener<DiscoveryCustomMessage>> list = customEvtLsnrs.get(cls);
if (list != null) {
for (CustomEventListener<DiscoveryCustomMessage> lsnr : list) {
try {
lsnr.onCustomEvent(nextTopVer, node, customMsg);
}
catch (Exception e) {
U.error(log, "Failed to notify direct custom event listener: " + customMsg, e);
}
}
}
}
}
// Put topology snapshot into discovery history.
// There is no race possible between history maintenance and concurrent discovery
// event notifications, since SPI notifies manager about all events from this listener.
if (verChanged) {
DiscoCache cache = new DiscoCache(locNode, F.view(topSnapshot, F.remoteNodes(locNode.id())));
discoCacheHist.put(nextTopVer, cache);
boolean set = updateTopologyVersionIfGreater(nextTopVer, cache);
assert set || topVer == 0 : "Topology version has not been updated [this.topVer=" +
topSnap + ", topVer=" + topVer + ", node=" + node +
", evt=" + U.gridEventName(type) + ']';
}
// If this is a local join event, just save it and do not notify listeners.
if (type == EVT_NODE_JOINED && node.id().equals(locNode.id())) {
if (gridStartTime == 0)
gridStartTime = getSpi().getGridStartTime();
updateTopologyVersionIfGreater(new AffinityTopologyVersion(locNode.order()),
new DiscoCache(localNode(), F.view(topSnapshot, F.remoteNodes(locNode.id()))));
startLatch.countDown();
DiscoveryEvent discoEvt = new DiscoveryEvent();
discoEvt.node(ctx.discovery().localNode());
discoEvt.eventNode(node);
discoEvt.type(EVT_NODE_JOINED);
discoEvt.topologySnapshot(topVer, new ArrayList<>(
F.viewReadOnly(topSnapshot, new C1<ClusterNode, ClusterNode>() {
@Override public ClusterNode apply(ClusterNode e) {
return e;
}
}, FILTER_DAEMON)));
locJoinEvt.onDone(discoEvt);
return;
}
else if (type == EVT_CLIENT_NODE_DISCONNECTED) {
/*
* Notify all components from discovery thread to avoid concurrent
* reconnect while disconnect handling is in progress.
*/
assert locNode.isClient() : locNode;
assert node.isClient() : node;
((IgniteKernal)ctx.grid()).onDisconnected();
locJoinEvt = new GridFutureAdapter<>();
registeredCaches.clear();
discoCacheHist.clear();
topHist.clear();
topSnap.set(new Snapshot(AffinityTopologyVersion.ZERO,
new DiscoCache(locNode, Collections.<ClusterNode>emptySet())));
}
else if (type == EVT_CLIENT_NODE_RECONNECTED) {
assert locNode.isClient() : locNode;
assert node.isClient() : node;
boolean clusterRestarted = gridStartTime != getSpi().getGridStartTime();
gridStartTime = getSpi().getGridStartTime();
((IgniteKernal)ctx.grid()).onReconnected(clusterRestarted);
ctx.cluster().clientReconnectFuture().listen(new CI1<IgniteFuture<?>>() {
@Override public void apply(IgniteFuture<?> fut) {
try {
fut.get();
discoWrk.addEvent(type, nextTopVer, node, topSnapshot, null);
}
catch (IgniteException ignore) {
// No-op.
}
}
});
return;
}
if (type == EVT_CLIENT_NODE_DISCONNECTED || type == EVT_NODE_SEGMENTED || !ctx.clientDisconnected())
discoWrk.addEvent(type, nextTopVer, node, topSnapshot, customMsg);
}
});
spi.setDataExchange(new DiscoverySpiDataExchange() {
@Override public Map<Integer, Serializable> collect(UUID nodeId) {
assert nodeId != null;
Map<Integer, Serializable> data = new HashMap<>();
for (GridComponent comp : ctx.components()) {
Serializable compData = comp.collectDiscoveryData(nodeId);
if (compData != null) {
assert comp.discoveryDataType() != null;
data.put(comp.discoveryDataType().ordinal(), compData);
}
}
return data;
}
@Override public void onExchange(UUID joiningNodeId, UUID nodeId, Map<Integer, Serializable> data) {
for (Map.Entry<Integer, Serializable> e : data.entrySet()) {
GridComponent comp = null;
for (GridComponent c : ctx.components()) {
if (c.discoveryDataType() != null && c.discoveryDataType().ordinal() == e.getKey()) {
comp = c;
break;
}
}
if (comp != null)
comp.onDiscoveryDataReceived(joiningNodeId, nodeId, e.getValue());
else {
if (log.isDebugEnabled())
log.debug("Received discovery data for unknown component: " + e.getKey());
}
}
}
});
startSpi();
try {
U.await(startLatch);
}
catch (IgniteInterruptedException e) {
throw new IgniteCheckedException("Failed to start discovery manager (thread has been interrupted).", e);
}
// Start segment check worker only if frequency is greater than 0.
if (hasRslvrs && segChkFreq > 0) {
segChkWrk = new SegmentCheckWorker();
segChkThread = new IgniteThread(segChkWrk);
segChkThread.start();
}
locNode = spi.getLocalNode();
checkAttributes(discoCache().remoteNodes());
ctx.service().initCompatibilityMode(discoCache().remoteNodes());
// Start discovery worker.
new IgniteThread(discoWrk).start();
if (log.isDebugEnabled())
log.debug(startInfo());
}
/**
* @param type Message type.
* @param customMsg Custom message.
* @return {@code True} if should not process message.
*/
private boolean skipMessage(int type, @Nullable DiscoveryCustomMessage customMsg) {
if (type == DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT) {
assert customMsg != null && customMsg.id() != null : customMsg;
if (rcvdCustomMsgs.contains(customMsg.id())) {
if (log.isDebugEnabled())
log.debug("Received duplicated custom message, will ignore [msg=" + customMsg + "]");
return true;
}
rcvdCustomMsgs.addLast(customMsg.id());
while (rcvdCustomMsgs.size() > DISCOVERY_HISTORY_SIZE)
rcvdCustomMsgs.pollFirst();
}
return false;
}
/**
* @param msgCls Message class.
* @param lsnr Custom event listener.
*/
public <T extends DiscoveryCustomMessage> void setCustomEventListener(Class<T> msgCls, CustomEventListener<T> lsnr) {
List<CustomEventListener<DiscoveryCustomMessage>> list = customEvtLsnrs.get(msgCls);
if (list == null) {
list = F.addIfAbsent(customEvtLsnrs, msgCls,
new CopyOnWriteArrayList<CustomEventListener<DiscoveryCustomMessage>>());
}
list.add((CustomEventListener<DiscoveryCustomMessage>)lsnr);
}
/**
* @return Metrics.
*/
private GridLocalMetrics createMetrics() {
return new GridLocalMetrics() {
@Override public int getAvailableProcessors() {
return os.getAvailableProcessors();
}
@Override public double getCurrentCpuLoad() {
return cpuLoad;
}
@Override public double getCurrentGcCpuLoad() {
return gcCpuLoad;
}
@Override public long getHeapMemoryInitialized() {
return mem.getHeapMemoryUsage().getInit();
}
@Override public long getHeapMemoryUsed() {
return mem.getHeapMemoryUsage().getUsed();
}
@Override public long getHeapMemoryCommitted() {
return mem.getHeapMemoryUsage().getCommitted();
}
@Override public long getHeapMemoryMaximum() {
return mem.getHeapMemoryUsage().getMax();
}
@Override public long getNonHeapMemoryInitialized() {
return nonHeapMemoryUsage().getInit();
}
@Override public long getNonHeapMemoryUsed() {
return nonHeapMemoryUsage().getUsed();
}
@Override public long getNonHeapMemoryCommitted() {
return nonHeapMemoryUsage().getCommitted();
}
@Override public long getNonHeapMemoryMaximum() {
return nonHeapMemoryUsage().getMax();
}
@Override public long getUptime() {
return rt.getUptime();
}
@Override public long getStartTime() {
return rt.getStartTime();
}
@Override public int getThreadCount() {
return threads.getThreadCount();
}
@Override public int getPeakThreadCount() {
return threads.getPeakThreadCount();
}
@Override public long getTotalStartedThreadCount() {
return threads.getTotalStartedThreadCount();
}
@Override public int getDaemonThreadCount() {
return threads.getDaemonThreadCount();
}
};
}
/**
* @return Metrics provider.
*/
private DiscoveryMetricsProvider createMetricsProvider() {
return new DiscoveryMetricsProvider() {
/** */
private final long startTime = U.currentTimeMillis();
/** {@inheritDoc} */
@Override public ClusterMetrics metrics() {
GridJobMetrics jm = ctx.jobMetric().getJobMetrics();
ClusterMetricsSnapshot nm = new ClusterMetricsSnapshot();
nm.setLastUpdateTime(U.currentTimeMillis());
// Job metrics.
nm.setMaximumActiveJobs(jm.getMaximumActiveJobs());
nm.setCurrentActiveJobs(jm.getCurrentActiveJobs());
nm.setAverageActiveJobs(jm.getAverageActiveJobs());
nm.setMaximumWaitingJobs(jm.getMaximumWaitingJobs());
nm.setCurrentWaitingJobs(jm.getCurrentWaitingJobs());
nm.setAverageWaitingJobs(jm.getAverageWaitingJobs());
nm.setMaximumRejectedJobs(jm.getMaximumRejectedJobs());
nm.setCurrentRejectedJobs(jm.getCurrentRejectedJobs());
nm.setAverageRejectedJobs(jm.getAverageRejectedJobs());
nm.setMaximumCancelledJobs(jm.getMaximumCancelledJobs());
nm.setCurrentCancelledJobs(jm.getCurrentCancelledJobs());
nm.setAverageCancelledJobs(jm.getAverageCancelledJobs());
nm.setTotalRejectedJobs(jm.getTotalRejectedJobs());
nm.setTotalCancelledJobs(jm.getTotalCancelledJobs());
nm.setTotalExecutedJobs(jm.getTotalExecutedJobs());
nm.setMaximumJobWaitTime(jm.getMaximumJobWaitTime());
nm.setCurrentJobWaitTime(jm.getCurrentJobWaitTime());
nm.setAverageJobWaitTime(jm.getAverageJobWaitTime());
nm.setMaximumJobExecuteTime(jm.getMaximumJobExecuteTime());
nm.setCurrentJobExecuteTime(jm.getCurrentJobExecuteTime());
nm.setAverageJobExecuteTime(jm.getAverageJobExecuteTime());
nm.setCurrentIdleTime(jm.getCurrentIdleTime());
nm.setTotalIdleTime(jm.getTotalIdleTime());
nm.setAverageCpuLoad(jm.getAverageCpuLoad());
// Job metrics.
nm.setTotalExecutedTasks(ctx.task().getTotalExecutedTasks());
// VM metrics.
nm.setAvailableProcessors(metrics.getAvailableProcessors());
nm.setCurrentCpuLoad(metrics.getCurrentCpuLoad());
nm.setCurrentGcCpuLoad(metrics.getCurrentGcCpuLoad());
nm.setHeapMemoryInitialized(metrics.getHeapMemoryInitialized());
nm.setHeapMemoryUsed(metrics.getHeapMemoryUsed());
nm.setHeapMemoryCommitted(metrics.getHeapMemoryCommitted());
nm.setHeapMemoryMaximum(metrics.getHeapMemoryMaximum());
nm.setHeapMemoryTotal(metrics.getHeapMemoryMaximum());
nm.setNonHeapMemoryInitialized(metrics.getNonHeapMemoryInitialized());
nonHeapMemoryUsed(nm);
nm.setNonHeapMemoryCommitted(metrics.getNonHeapMemoryCommitted());
nm.setNonHeapMemoryMaximum(metrics.getNonHeapMemoryMaximum());
nm.setNonHeapMemoryTotal(metrics.getNonHeapMemoryMaximum());
nm.setUpTime(metrics.getUptime());
nm.setStartTime(metrics.getStartTime());
nm.setNodeStartTime(startTime);
nm.setCurrentThreadCount(metrics.getThreadCount());
nm.setMaximumThreadCount(metrics.getPeakThreadCount());
nm.setTotalStartedThreadCount(metrics.getTotalStartedThreadCount());
nm.setCurrentDaemonThreadCount(metrics.getDaemonThreadCount());
nm.setTotalNodes(1);
// Data metrics.
nm.setLastDataVersion(ctx.cache().lastDataVersion());
GridIoManager io = ctx.io();
// IO metrics.
nm.setSentMessagesCount(io.getSentMessagesCount());
nm.setSentBytesCount(io.getSentBytesCount());
nm.setReceivedMessagesCount(io.getReceivedMessagesCount());
nm.setReceivedBytesCount(io.getReceivedBytesCount());
nm.setOutboundMessagesQueueSize(io.getOutboundMessagesQueueSize());
return nm;
}
/**
* @param nm Initializing metrics snapshot.
*/
private void nonHeapMemoryUsed(ClusterMetricsSnapshot nm) {
long nonHeapUsed = metrics.getNonHeapMemoryUsed();
Map<Integer, CacheMetrics> nodeCacheMetrics = cacheMetrics();
if (nodeCacheMetrics != null) {
for (Map.Entry<Integer, CacheMetrics> entry : nodeCacheMetrics.entrySet()) {
CacheMetrics e = entry.getValue();
if (e != null)
nonHeapUsed += e.getOffHeapAllocatedSize();
}
}
nm.setNonHeapMemoryUsed(nonHeapUsed);
}
/** {@inheritDoc} */
@Override public Map<Integer, CacheMetrics> cacheMetrics() {
Collection<GridCacheAdapter<?, ?>> caches = ctx.cache().internalCaches();
if (F.isEmpty(caches))
return Collections.emptyMap();
Map<Integer, CacheMetrics> metrics = null;
for (GridCacheAdapter<?, ?> cache : caches) {
if (cache.configuration().isStatisticsEnabled() &&
cache.context().started() &&
cache.context().affinity().affinityTopologyVersion().topologyVersion() > 0) {
if (metrics == null)
metrics = U.newHashMap(caches.size());
metrics.put(cache.context().cacheId(), cache.localMetrics());
}
}
return metrics == null ? Collections.<Integer, CacheMetrics>emptyMap() : metrics;
}
};
}
/**
* @return Local metrics.
*/
public GridLocalMetrics metrics() {
return metrics;
}
/** @return {@code True} if ordering is supported. */
private boolean discoOrdered() {
DiscoverySpiOrderSupport ann = U.getAnnotation(ctx.config().getDiscoverySpi().getClass(),
DiscoverySpiOrderSupport.class);
return ann != null && ann.value();
}
/** @return {@code True} if topology snapshots history is supported. */
private boolean historySupported() {
DiscoverySpiHistorySupport ann = U.getAnnotation(ctx.config().getDiscoverySpi().getClass(),
DiscoverySpiHistorySupport.class);
return ann != null && ann.value();
}
/**
* Checks segment on start waiting for correct segment if necessary.
*
* @throws IgniteCheckedException If check failed.
*/
private void checkSegmentOnStart() throws IgniteCheckedException {
assert hasRslvrs;
if (log.isDebugEnabled())
log.debug("Starting network segment check.");
while (true) {
if (ctx.segmentation().isValidSegment())
break;
if (ctx.config().isWaitForSegmentOnStart()) {
LT.warn(log, null, "Failed to check network segment (retrying every 2000 ms).");
// Wait and check again.
U.sleep(2000);
}
else
throw new IgniteCheckedException("Failed to check network segment.");
}
if (log.isDebugEnabled())
log.debug("Finished network segment check successfully.");
}
/**
* Checks whether attributes of the local node are consistent with remote nodes.
*
* @param nodes List of remote nodes to check attributes on.
* @throws IgniteCheckedException In case of error.
*/
private void checkAttributes(Iterable<ClusterNode> nodes) throws IgniteCheckedException {
ClusterNode locNode = getSpi().getLocalNode();
assert locNode != null;
// Fetch local node attributes once.
String locPreferIpV4 = locNode.attribute("java.net.preferIPv4Stack");
Object locMode = locNode.attribute(ATTR_DEPLOYMENT_MODE);
int locJvmMajVer = nodeJavaMajorVersion(locNode);
boolean locP2pEnabled = locNode.attribute(ATTR_PEER_CLASSLOADING);
boolean ipV4Warned = false;
boolean jvmMajVerWarned = false;
Boolean locMarshUseDfltSuid = locNode.attribute(ATTR_MARSHALLER_USE_DFLT_SUID);
boolean locMarshUseDfltSuidBool = locMarshUseDfltSuid == null ? true : locMarshUseDfltSuid;
Boolean locMarshStrSerVer2 = locNode.attribute(ATTR_MARSHALLER_USE_BINARY_STRING_SER_VER_2);
boolean locMarshStrSerVer2Bool = locMarshStrSerVer2 == null ?
false /* turned on and added to the attributes list by default only when BinaryMarshaller is used. */:
locMarshStrSerVer2;
boolean locDelayAssign = locNode.attribute(ATTR_LATE_AFFINITY_ASSIGNMENT);
Boolean locSrvcCompatibilityEnabled = locNode.attribute(ATTR_SERVICES_COMPATIBILITY_MODE);
for (ClusterNode n : nodes) {
int rmtJvmMajVer = nodeJavaMajorVersion(n);
if (locJvmMajVer != rmtJvmMajVer && !jvmMajVerWarned) {
U.warn(log, "Local java version is different from remote [loc=" +
locJvmMajVer + ", rmt=" + rmtJvmMajVer + "]");
jvmMajVerWarned = true;
}
String rmtPreferIpV4 = n.attribute("java.net.preferIPv4Stack");
if (!F.eq(rmtPreferIpV4, locPreferIpV4)) {
if (!ipV4Warned)
U.warn(log, "Local node's value of 'java.net.preferIPv4Stack' " +
"system property differs from remote node's " +
"(all nodes in topology should have identical value) " +
"[locPreferIpV4=" + locPreferIpV4 + ", rmtPreferIpV4=" + rmtPreferIpV4 +
", locId8=" + U.id8(locNode.id()) + ", rmtId8=" + U.id8(n.id()) +
", rmtAddrs=" + U.addressesAsString(n) + ']',
"Local and remote 'java.net.preferIPv4Stack' system properties do not match.");
ipV4Warned = true;
}
// Daemon nodes are allowed to have any deployment they need.
// Skip data center ID check for daemon nodes.
if (!isLocDaemon && !n.isDaemon()) {
Object rmtMode = n.attribute(ATTR_DEPLOYMENT_MODE);
if (!locMode.equals(rmtMode))
throw new IgniteCheckedException("Remote node has deployment mode different from local " +
"[locId8=" + U.id8(locNode.id()) + ", locMode=" + locMode +
", rmtId8=" + U.id8(n.id()) + ", rmtMode=" + rmtMode +
", rmtAddrs=" + U.addressesAsString(n) + ']');
boolean rmtP2pEnabled = n.attribute(ATTR_PEER_CLASSLOADING);
if (locP2pEnabled != rmtP2pEnabled)
throw new IgniteCheckedException("Remote node has peer class loading enabled flag different from" +
" local [locId8=" + U.id8(locNode.id()) + ", locPeerClassLoading=" + locP2pEnabled +
", rmtId8=" + U.id8(n.id()) + ", rmtPeerClassLoading=" + rmtP2pEnabled +
", rmtAddrs=" + U.addressesAsString(n) + ']');
}
Boolean rmtMarshUseDfltSuid = n.attribute(ATTR_MARSHALLER_USE_DFLT_SUID);
boolean rmtMarshUseDfltSuidBool = rmtMarshUseDfltSuid == null ? true : rmtMarshUseDfltSuid;
if (locMarshUseDfltSuidBool != rmtMarshUseDfltSuidBool) {
throw new IgniteCheckedException("Local node's " + IGNITE_OPTIMIZED_MARSHALLER_USE_DEFAULT_SUID +
" property value differs from remote node's value " +
"(to make sure all nodes in topology have identical marshaller settings, " +
"configure system property explicitly) " +
"[locMarshUseDfltSuid=" + locMarshUseDfltSuid + ", rmtMarshUseDfltSuid=" + rmtMarshUseDfltSuid +
", locNodeAddrs=" + U.addressesAsString(locNode) +
", rmtNodeAddrs=" + U.addressesAsString(n) +
", locNodeId=" + locNode.id() + ", rmtNodeId=" + n.id() + ']');
}
Boolean rmtMarshStrSerVer2 = n.attribute(ATTR_MARSHALLER_USE_BINARY_STRING_SER_VER_2);
boolean rmtMarshStrSerVer2Bool = rmtMarshStrSerVer2 == null ? false : rmtMarshStrSerVer2;
if (locMarshStrSerVer2Bool != rmtMarshStrSerVer2Bool) {
throw new IgniteCheckedException("Local node's " + IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2 +
" property value differs from remote node's value " +
"(to make sure all nodes in topology have identical marshaller settings, " +
"configure system property explicitly) " +
"[locMarshStrSerVer2=" + locMarshStrSerVer2 + ", rmtMarshStrSerVer2=" + rmtMarshStrSerVer2 +
", locNodeAddrs=" + U.addressesAsString(locNode) +
", rmtNodeAddrs=" + U.addressesAsString(n) +
", locNodeId=" + locNode.id() + ", rmtNodeId=" + n.id() + ']');
}
boolean rmtLateAssign;
if (n.version().compareToIgnoreTimestamp(CacheAffinitySharedManager.LATE_AFF_ASSIGN_SINCE) >= 0)
rmtLateAssign = n.attribute(ATTR_LATE_AFFINITY_ASSIGNMENT);
else
rmtLateAssign = false;
if (locDelayAssign != rmtLateAssign) {
throw new IgniteCheckedException("Remote node has cache affinity assignment mode different from local " +
"[locId8=" + U.id8(locNode.id()) +
", locDelayAssign=" + locDelayAssign +
", rmtId8=" + U.id8(n.id()) +
", rmtLateAssign=" + rmtLateAssign +
", rmtAddrs=" + U.addressesAsString(n) + ']');
}
if (n.version().compareToIgnoreTimestamp(GridServiceProcessor.LAZY_SERVICES_CFG_SINCE) >= 0) {
Boolean rmtSrvcCompatibilityEnabled = n.attribute(ATTR_SERVICES_COMPATIBILITY_MODE);
if (!F.eq(locSrvcCompatibilityEnabled, rmtSrvcCompatibilityEnabled)) {
throw new IgniteCheckedException("Local node's " + IGNITE_SERVICES_COMPATIBILITY_MODE +
" property value differs from remote node's value " +
"(to make sure all nodes in topology have identical IgniteServices compatibility mode enabled, " +
"configure system property explicitly) " +
"[locSrvcCompatibilityEnabled=" + locSrvcCompatibilityEnabled +
", rmtSrvcCompatibilityEnabled=" + rmtSrvcCompatibilityEnabled +
", locNodeAddrs=" + U.addressesAsString(locNode) +
", rmtNodeAddrs=" + U.addressesAsString(n) +
", locNodeId=" + locNode.id() + ", rmtNodeId=" + n.id() + ']');
}
}
else if (Boolean.FALSE.equals(locSrvcCompatibilityEnabled)) {
throw new IgniteCheckedException("Remote node doesn't support lazy services configuration and " +
"local node cannot join node because local node's "
+ IGNITE_SERVICES_COMPATIBILITY_MODE + " property value explicitly set to 'false'" +
"[locNodeAddrs=" + U.addressesAsString(locNode) +
", rmtNodeAddrs=" + U.addressesAsString(n) +
", locNodeId=" + locNode.id() + ", rmtNodeId=" + n.id() + ']');
}
}
if (log.isDebugEnabled())
log.debug("Finished node attributes consistency check.");
}
/**
* Gets Java major version running on the node.
*
* @param node Cluster node.
* @return Java major version.
* @throws IgniteCheckedException If failed to get the version.
*/
private int nodeJavaMajorVersion(ClusterNode node) throws IgniteCheckedException {
try {
// The format is identical for Oracle JDK, OpenJDK and IBM JDK.
return Integer.parseInt(node.<String>attribute("java.version").split("\\.")[1]);
}
catch (Exception e) {
U.error(log, "Failed to get java major version (unknown 'java.version' format) [ver=" +
node.<String>attribute("java.version") + "]", e);
return 0;
}
}
/**
* @param nodes Nodes.
* @return Total CPUs.
*/
private static int cpus(Collection<ClusterNode> nodes) {
Collection<String> macSet = new HashSet<>(nodes.size(), 1.0f);
int cpus = 0;
for (ClusterNode n : nodes) {
String macs = n.attribute(ATTR_MACS);
if (macSet.add(macs))
cpus += n.metrics().getTotalCpus();
}
return cpus;
}
/**
* Prints the latest topology info into log taking into account logging/verbosity settings.
*/
public void ackTopology() {
ackTopology(topSnap.get().topVer.topologyVersion(), false);
}
/**
* Logs grid size for license compliance.
*
* @param topVer Topology version.
* @param throttle Suppress printing if this topology was already printed.
*/
private void ackTopology(long topVer, boolean throttle) {
assert !isLocDaemon;
DiscoCache discoCache = discoCache();
Collection<ClusterNode> rmtNodes = discoCache.remoteNodes();
Collection<ClusterNode> srvNodes = F.view(discoCache.allNodes(), F.not(FILTER_CLI));
Collection<ClusterNode> clientNodes = F.view(discoCache.allNodes(), FILTER_CLI);
ClusterNode locNode = discoCache.localNode();
Collection<ClusterNode> allNodes = discoCache.allNodes();
long hash = topologyHash(allNodes);
// Prevent ack-ing topology for the same topology.
// Can happen only during node startup.
if (throttle && lastLoggedTop.getAndSet(hash) == hash)
return;
int totalCpus = cpus(allNodes);
double heap = U.heapSize(allNodes, 2);
if (log.isQuiet())
U.quiet(false, topologySnapshotMessage(srvNodes.size(), clientNodes.size(), totalCpus, heap));
if (log.isDebugEnabled()) {
String dbg = "";
dbg += U.nl() + U.nl() +
">>> +----------------+" + U.nl() +
">>> " + PREFIX + "." + U.nl() +
">>> +----------------+" + U.nl() +
">>> Grid name: " + (ctx.gridName() == null ? "default" : ctx.gridName()) + U.nl() +
">>> Number of server nodes: " + srvNodes.size() + U.nl() +
">>> Number of client nodes: " + clientNodes.size() + U.nl() +
(discoOrdered ? ">>> Topology version: " + topVer + U.nl() : "") +
">>> Topology hash: 0x" + Long.toHexString(hash).toUpperCase() + U.nl();
dbg += ">>> Local: " +
locNode.id().toString().toUpperCase() + ", " +
U.addressesAsString(locNode) + ", " +
locNode.order() + ", " +
locNode.attribute("os.name") + ' ' +
locNode.attribute("os.arch") + ' ' +
locNode.attribute("os.version") + ", " +
System.getProperty("user.name") + ", " +
locNode.attribute("java.runtime.name") + ' ' +
locNode.attribute("java.runtime.version") + U.nl();
for (ClusterNode node : rmtNodes)
dbg += ">>> Remote: " +
node.id().toString().toUpperCase() + ", " +
U.addressesAsString(node) + ", " +
node.order() + ", " +
node.attribute("os.name") + ' ' +
node.attribute("os.arch") + ' ' +
node.attribute("os.version") + ", " +
node.attribute(ATTR_USER_NAME) + ", " +
node.attribute("java.runtime.name") + ' ' +
node.attribute("java.runtime.version") + U.nl();
dbg += ">>> Total number of CPUs: " + totalCpus + U.nl();
dbg += ">>> Total heap size: " + heap + "GB" + U.nl();
log.debug(dbg);
}
else if (log.isInfoEnabled())
log.info(topologySnapshotMessage(srvNodes.size(), clientNodes.size(), totalCpus, heap));
}
/**
* @param srvNodesNum Server nodes number.
* @param clientNodesNum Client nodes number.
* @param totalCpus Total cpu number.
* @param heap Heap size.
* @return Topology snapshot message.
*/
private String topologySnapshotMessage(int srvNodesNum, int clientNodesNum, int totalCpus, double heap) {
return PREFIX + " [" +
(discoOrdered ? "ver=" + topSnap.get().topVer.topologyVersion() + ", " : "") +
"servers=" + srvNodesNum +
", clients=" + clientNodesNum +
", CPUs=" + totalCpus +
", heap=" + heap + "GB]";
}
/** {@inheritDoc} */
@Override public void onKernalStop0(boolean cancel) {
startLatch.countDown();
// Stop segment check worker.
if (segChkWrk != null) {
segChkWrk.cancel();
U.join(segChkThread, log);
}
if (!locJoinEvt.isDone())
locJoinEvt.onDone(
new IgniteCheckedException("Failed to wait for local node joined event (grid is stopping)."));
}
/** {@inheritDoc} */
@Override public void stop(boolean cancel) throws IgniteCheckedException {
busyLock.block();
// Stop receiving notifications.
getSpi().setListener(null);
// Stop discovery worker and metrics updater.
U.closeQuiet(metricsUpdateTask);
U.cancel(discoWrk);
U.join(discoWrk, log);
// Stop SPI itself.
stopSpi();
if (log.isDebugEnabled())
log.debug(stopInfo());
}
/**
* @param nodeIds Node IDs to check.
* @return {@code True} if at least one ID belongs to an alive node.
*/
public boolean aliveAll(@Nullable Collection<UUID> nodeIds) {
if (nodeIds == null || nodeIds.isEmpty())
return false;
for (UUID id : nodeIds)
if (!alive(id))
return false;
return true;
}
/**
* @param nodeId Node ID.
* @return {@code True} if node for given ID is alive.
*/
public boolean alive(UUID nodeId) {
return getAlive(nodeId) != null;
}
/**
* @param nodeId Node ID.
* @return Node if node is alive.
*/
@Nullable public ClusterNode getAlive(UUID nodeId) {
assert nodeId != null;
return getSpi().getNode(nodeId); // Go directly to SPI without checking disco cache.
}
/**
* @param node Node.
* @return {@code True} if node is alive.
*/
public boolean alive(ClusterNode node) {
assert node != null;
return alive(node.id());
}
/**
* @param nodeId ID of the node.
* @return {@code True} if ping succeeded.
* @throws IgniteClientDisconnectedCheckedException If ping failed.
*/
public boolean pingNode(UUID nodeId) throws IgniteClientDisconnectedCheckedException {
assert nodeId != null;
if (!busyLock.enterBusy())
return false;
try {
return getSpi().pingNode(nodeId);
}
catch (IgniteException e) {
if (e.hasCause(IgniteClientDisconnectedCheckedException.class)) {
IgniteFuture<?> reconnectFut = ctx.cluster().clientReconnectFuture();
throw new IgniteClientDisconnectedCheckedException(reconnectFut, e.getMessage());
}
throw e;
}
finally {
busyLock.leaveBusy();
}
}
/**
* @param nodeId ID of the node.
* @return {@code True} if ping succeeded.
*/
public boolean pingNodeNoError(UUID nodeId) {
assert nodeId != null;
if (!busyLock.enterBusy())
return false;
try {
return getSpi().pingNode(nodeId);
}
catch (IgniteException e) {
return false;
}
finally {
busyLock.leaveBusy();
}
}
/**
* @param nodeId ID of the node.
* @return Node for ID.
*/
@Nullable public ClusterNode node(UUID nodeId) {
assert nodeId != null;
return discoCache().node(nodeId);
}
/**
* Gets collection of node for given node IDs and predicates.
*
* @param ids Ids to include.
* @param p Filter for IDs.
* @return Collection with all alive nodes for given IDs.
*/
public Collection<ClusterNode> nodes(@Nullable Collection<UUID> ids, IgnitePredicate<UUID>... p) {
return F.isEmpty(ids) ? Collections.<ClusterNode>emptyList() :
F.view(
F.viewReadOnly(ids, U.id2Node(ctx), p),
F.notNull());
}
/**
* Gets topology hash for given set of nodes.
*
* @param nodes Subset of grid nodes for hashing.
* @return Hash for given topology.
*/
public long topologyHash(Iterable<? extends ClusterNode> nodes) {
assert nodes != null;
Iterator<? extends ClusterNode> iter = nodes.iterator();
if (!iter.hasNext())
return 0; // Special case.
List<String> uids = new ArrayList<>();
for (ClusterNode node : nodes)
uids.add(node.id().toString());
Collections.sort(uids);
CRC32 hash = new CRC32();
for (String uuid : uids)
hash.update(uuid.getBytes());
return hash.getValue();
}
/**
* Gets future that will be completed when current topology version becomes greater or equal to argument passed.
*
* @param awaitVer Topology version to await.
* @return Future.
*/
public IgniteInternalFuture<Long> topologyFuture(final long awaitVer) {
long topVer = topologyVersion();
if (topVer >= awaitVer)
return new GridFinishedFuture<>(topVer);
DiscoTopologyFuture fut = new DiscoTopologyFuture(ctx, awaitVer);
fut.init();
return fut;
}
/**
* Gets discovery collection cache from SPI safely guarding against "floating" collections.
*
* @return Discovery collection cache.
*/
public DiscoCache discoCache() {
Snapshot cur = topSnap.get();
assert cur != null;
return cur.discoCache;
}
/**
* Gets discovery collection cache from SPI safely guarding against "floating" collections.
*
* @return Discovery collection cache.
*/
public DiscoCache discoCache(AffinityTopologyVersion topVer) {
return discoCacheHist.get(topVer);
}
/** @return All non-daemon remote nodes in topology. */
public Collection<ClusterNode> remoteNodes() {
return discoCache().remoteNodes();
}
/** @return All non-daemon nodes in topology. */
public Collection<ClusterNode> allNodes() {
return discoCache().allNodes();
}
/**
* Gets topology grouped by node versions.
*
* @return Version to collection of nodes map.
*/
public NavigableMap<IgniteProductVersion, Collection<ClusterNode>> topologyVersionMap() {
return discoCache().versionsMap();
}
/** @return Full topology size. */
public int size() {
return discoCache().allNodes().size();
}
/**
* Gets all nodes for given topology version.
*
* @param topVer Topology version.
* @return Collection of cache nodes.
*/
public Collection<ClusterNode> nodes(long topVer) {
return nodes(new AffinityTopologyVersion(topVer));
}
/**
* Gets all nodes for given topology version.
*
* @param topVer Topology version.
* @return Collection of cache nodes.
*/
public Collection<ClusterNode> nodes(AffinityTopologyVersion topVer) {
return resolveDiscoCache(null, topVer).allNodes();
}
/**
* @param topVer Topology version.
* @return All server nodes for given topology version.
*/
public List<ClusterNode> serverNodes(AffinityTopologyVersion topVer) {
return resolveDiscoCache(null, topVer).srvNodes;
}
/**
* Gets node from history for given topology version.
*
* @param topVer Topology version.
* @param id Node ID.
* @return Node.
*/
public ClusterNode node(AffinityTopologyVersion topVer, UUID id) {
return resolveDiscoCache(null, topVer).node(id);
}
/**
* Gets cache nodes for cache with given name.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of cache nodes.
*/
public Collection<ClusterNode> cacheNodes(@Nullable String cacheName, AffinityTopologyVersion topVer) {
return resolveDiscoCache(cacheName, topVer).cacheNodes(cacheName, topVer.topologyVersion());
}
/**
* Gets all nodes with at least one cache configured.
*
* @param topVer Topology version.
* @return Collection of cache nodes.
*/
public Collection<ClusterNode> cacheNodes(AffinityTopologyVersion topVer) {
return resolveDiscoCache(null, topVer).allNodesWithCaches(topVer.topologyVersion());
}
/**
* Gets cache remote nodes for cache with given name.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of cache nodes.
*/
public Collection<ClusterNode> remoteCacheNodes(@Nullable String cacheName, AffinityTopologyVersion topVer) {
return resolveDiscoCache(cacheName, topVer).remoteCacheNodes(cacheName, topVer.topologyVersion());
}
/**
* Gets cache remote nodes for cache with given name.
*
* @param topVer Topology version.
* @return Collection of cache nodes.
*/
public Collection<ClusterNode> remoteCacheNodes(AffinityTopologyVersion topVer) {
return resolveDiscoCache(null, topVer).remoteCacheNodes(topVer.topologyVersion());
}
/**
* Gets cache nodes for cache with given name.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of cache nodes.
*/
public Collection<ClusterNode> aliveCacheNodes(@Nullable String cacheName, AffinityTopologyVersion topVer) {
return resolveDiscoCache(cacheName, topVer).aliveCacheNodes(cacheName, topVer.topologyVersion());
}
/**
* Gets cache remote nodes for cache with given name.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of cache nodes.
*/
public Collection<ClusterNode> aliveRemoteCacheNodes(@Nullable String cacheName, AffinityTopologyVersion topVer) {
return resolveDiscoCache(cacheName, topVer).aliveRemoteCacheNodes(cacheName, topVer.topologyVersion());
}
/**
* Gets alive remote server nodes with at least one cache configured.
*
* @param topVer Topology version (maximum allowed node order).
* @return Collection of alive cache nodes.
*/
public Collection<ClusterNode> aliveRemoteServerNodesWithCaches(AffinityTopologyVersion topVer) {
return resolveDiscoCache(null, topVer).aliveRemoteServerNodesWithCaches(topVer.topologyVersion());
}
/**
* Gets alive server nodes with at least one cache configured.
*
* @param topVer Topology version (maximum allowed node order).
* @return Collection of alive cache nodes.
*/
public Collection<ClusterNode> aliveServerNodesWithCaches(AffinityTopologyVersion topVer) {
return resolveDiscoCache(null, topVer).aliveServerNodesWithCaches(topVer.topologyVersion());
}
/**
* Gets alive nodes with at least one cache configured.
*
* @param topVer Topology version (maximum allowed node order).
* @return Collection of alive cache nodes.
*/
public Collection<ClusterNode> aliveNodesWithCaches(AffinityTopologyVersion topVer) {
return resolveDiscoCache(null, topVer).aliveNodesWithCaches(topVer.topologyVersion());
}
/**
* Gets cache nodes for cache with given name that participate in affinity calculation.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of cache affinity nodes.
*/
public Collection<ClusterNode> cacheAffinityNodes(@Nullable String cacheName, AffinityTopologyVersion topVer) {
return resolveDiscoCache(cacheName, topVer).cacheAffinityNodes(cacheName, topVer.topologyVersion());
}
/**
* Checks if node is a data node for the given cache.
*
* @param node Node to check.
* @param cacheName Cache name.
* @return {@code True} if node is a cache data node.
*/
public boolean cacheAffinityNode(ClusterNode node, String cacheName) {
CachePredicate pred = registeredCaches.get(cacheName);
return pred != null && pred.dataNode(node);
}
/**
* @param node Node to check.
* @param cacheName Cache name.
* @return {@code True} if node has near cache enabled.
*/
public boolean cacheNearNode(ClusterNode node, String cacheName) {
CachePredicate pred = registeredCaches.get(cacheName);
return pred != null && pred.nearNode(node);
}
/**
* @param node Node to check.
* @param cacheName Cache name.
* @return {@code True} if node has client cache (without near cache).
*/
public boolean cacheClientNode(ClusterNode node, String cacheName) {
CachePredicate pred = registeredCaches.get(cacheName);
return pred != null && pred.clientNode(node);
}
/**
* @param node Node to check.
* @param cacheName Cache name.
* @return If cache with the given name is accessible on the given node.
*/
public boolean cacheNode(ClusterNode node, String cacheName) {
CachePredicate pred = registeredCaches.get(cacheName);
return pred != null && pred.cacheNode(node);
}
/**
* @param node Node to check.
* @return Public cache names accessible on the given node.
*/
public Map<String, CacheMode> nodeCaches(ClusterNode node) {
Map<String, CacheMode> caches = U.newHashMap(registeredCaches.size());
for (Map.Entry<String, CachePredicate> entry : registeredCaches.entrySet()) {
String cacheName = entry.getKey();
CachePredicate pred = entry.getValue();
if (!CU.isSystemCache(cacheName) && !CU.isIgfsCache(ctx.config(), cacheName) &&
pred != null && pred.cacheNode(node))
caches.put(cacheName, pred.cacheMode);
}
return caches;
}
/**
* Checks if cache with given name has at least one node with near cache enabled.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return {@code True} if cache with given name has at least one node with near cache enabled.
*/
public boolean hasNearCache(@Nullable String cacheName, AffinityTopologyVersion topVer) {
return resolveDiscoCache(cacheName, topVer).hasNearCache(cacheName);
}
/**
* Gets discovery cache for given topology version.
*
* @param cacheName Cache name (participates in exception message).
* @param topVer Topology version.
* @return Discovery cache.
*/
private DiscoCache resolveDiscoCache(@Nullable String cacheName, AffinityTopologyVersion topVer) {
Snapshot snap = topSnap.get();
DiscoCache cache = AffinityTopologyVersion.NONE.equals(topVer) || topVer.equals(snap.topVer) ?
snap.discoCache : discoCacheHist.get(topVer);
if (cache == null) {
// Find the eldest acceptable discovery cache.
Map.Entry<AffinityTopologyVersion, DiscoCache> eldest = discoCacheHist.firstEntry();
if (eldest != null) {
if (topVer.compareTo(eldest.getKey()) < 0)
cache = eldest.getValue();
}
}
if (cache == null) {
throw new IgniteException("Failed to resolve nodes topology [cacheName=" + cacheName +
", topVer=" + topVer +
", history=" + discoCacheHist.keySet() +
", snap=" + snap +
", locNode=" + ctx.discovery().localNode() + ']');
}
return cache;
}
/**
* Gets topology by specified version from history storage.
*
* @param topVer Topology version.
* @return Topology nodes or {@code null} if there are no nodes for passed in version.
*/
@Nullable public Collection<ClusterNode> topology(long topVer) {
if (!histSupported)
throw new UnsupportedOperationException("Current discovery SPI does not support " +
"topology snapshots history (consider using TCP discovery SPI).");
Map<Long, Collection<ClusterNode>> snapshots = topHist;
return snapshots.get(topVer);
}
/** @return All daemon nodes in topology. */
public Collection<ClusterNode> daemonNodes() {
return discoCache().daemonNodes();
}
/** @return Local node. */
public ClusterNode localNode() {
return locNode == null ? getSpi().getLocalNode() : locNode;
}
/** @return Topology version. */
public long topologyVersion() {
return topSnap.get().topVer.topologyVersion();
}
/**
* @return Topology version.
*/
public AffinityTopologyVersion topologyVersionEx() {
return topSnap.get().topVer;
}
/** @return Event that represents a local node joined to topology. */
public DiscoveryEvent localJoinEvent() {
try {
return locJoinEvt.get();
}
catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
/**
* @param msg Custom message.
* @throws IgniteCheckedException If failed.
*/
public void sendCustomEvent(DiscoveryCustomMessage msg) throws IgniteCheckedException {
try {
getSpi().sendCustomEvent(new CustomMessageWrapper(msg));
}
catch (IgniteClientDisconnectedException e) {
IgniteFuture<?> reconnectFut = ctx.cluster().clientReconnectFuture();
throw new IgniteClientDisconnectedCheckedException(reconnectFut, e.getMessage());
}
catch (IgniteException e) {
throw new IgniteCheckedException(e);
}
}
/**
* Gets first grid node start time, see {@link DiscoverySpi#getGridStartTime()}.
*
* @return Start time of the first grid node.
*/
public long gridStartTime() {
return getSpi().getGridStartTime();
}
/**
* @param nodeId Node ID.
* @param warning Warning message to be shown on all nodes.
* @return Whether node is failed.
*/
public boolean tryFailNode(UUID nodeId, @Nullable String warning) {
if (!busyLock.enterBusy())
return false;
try {
if (!getSpi().pingNode(nodeId)) {
getSpi().failNode(nodeId, warning);
return true;
}
return false;
}
finally {
busyLock.leaveBusy();
}
}
/**
* @param nodeId Node ID to fail.
* @param warning Warning message to be shown on all nodes.
*/
public void failNode(UUID nodeId, @Nullable String warning) {
if (!busyLock.enterBusy())
return;
try {
getSpi().failNode(nodeId, warning);
}
finally {
busyLock.leaveBusy();
}
}
/**
* Updates topology version if current version is smaller than updated.
*
* @param updated Updated topology version.
* @param discoCache Discovery cache.
* @return {@code True} if topology was updated.
*/
private boolean updateTopologyVersionIfGreater(AffinityTopologyVersion updated, DiscoCache discoCache) {
while (true) {
Snapshot cur = topSnap.get();
if (updated.compareTo(cur.topVer) >= 0) {
if (topSnap.compareAndSet(cur, new Snapshot(updated, discoCache)))
return true;
}
else
return false;
}
}
/** Stops local node. */
private void stopNode() {
new Thread(
new Runnable() {
@Override public void run() {
ctx.markSegmented();
G.stop(ctx.gridName(), true);
}
}
).start();
}
/** Restarts JVM. */
private void restartJvm() {
new Thread(
new Runnable() {
@Override public void run() {
ctx.markSegmented();
G.restart(true);
}
}
).start();
}
/** Worker for network segment checks. */
private class SegmentCheckWorker extends GridWorker {
/** */
private final BlockingQueue<Object> queue = new LinkedBlockingQueue<>();
/**
*
*/
private SegmentCheckWorker() {
super(ctx.gridName(), "disco-net-seg-chk-worker", GridDiscoveryManager.this.log);
assert hasRslvrs;
assert segChkFreq > 0;
}
/**
*
*/
public void scheduleSegmentCheck() {
queue.add(new Object());
}
/** {@inheritDoc} */
@SuppressWarnings("StatementWithEmptyBody")
@Override protected void body() throws InterruptedException {
long lastChk = 0;
while (!isCancelled()) {
Object req = queue.poll(2000, MILLISECONDS);
long now = U.currentTimeMillis();
// Check frequency if segment check has not been requested.
if (req == null && (segChkFreq == 0 || lastChk + segChkFreq >= now)) {
if (log.isDebugEnabled())
log.debug("Skipping segment check as it has not been requested and it is not time to check.");
continue;
}
// We should always check segment if it has been explicitly
// requested (on any node failure or leave).
assert req != null || lastChk + segChkFreq < now;
// Drain queue.
while (queue.poll() != null) {
// No-op.
}
if (lastSegChkRes.get()) {
boolean segValid = ctx.segmentation().isValidSegment();
lastChk = now;
if (!segValid) {
discoWrk.addEvent(EVT_NODE_SEGMENTED, AffinityTopologyVersion.NONE, getSpi().getLocalNode(),
Collections.<ClusterNode>emptyList(), null);
lastSegChkRes.set(false);
}
if (log.isDebugEnabled())
log.debug("Segment has been checked [requested=" + (req != null) + ", valid=" + segValid + ']');
}
}
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(SegmentCheckWorker.class, this);
}
}
/** Worker for discovery events. */
private class DiscoveryWorker extends GridWorker {
/** Event queue. */
private final BlockingQueue<GridTuple5<Integer, AffinityTopologyVersion, ClusterNode, Collection<ClusterNode>,
DiscoveryCustomMessage>> evts = new LinkedBlockingQueue<>();
/** Node segmented event fired flag. */
private boolean nodeSegFired;
/**
*
*/
private DiscoveryWorker() {
super(ctx.gridName(), "disco-event-worker", GridDiscoveryManager.this.log);
}
/**
* Method is called when any discovery event occurs.
*
* @param type Discovery event type. See {@link DiscoveryEvent} for more details.
* @param topVer Topology version.
* @param node Remote node this event is connected with.
* @param topSnapshot Topology snapshot.
*/
@SuppressWarnings("RedundantTypeArguments")
private void recordEvent(int type, long topVer, ClusterNode node, Collection<ClusterNode> topSnapshot) {
assert node != null;
if (ctx.event().isRecordable(type)) {
DiscoveryEvent evt = new DiscoveryEvent();
evt.node(ctx.discovery().localNode());
evt.eventNode(node);
evt.type(type);
evt.topologySnapshot(topVer, U.<ClusterNode, ClusterNode>arrayList(topSnapshot, FILTER_DAEMON));
if (type == EVT_NODE_METRICS_UPDATED)
evt.message("Metrics were updated: " + node);
else if (type == EVT_NODE_JOINED)
evt.message("Node joined: " + node);
else if (type == EVT_NODE_LEFT)
evt.message("Node left: " + node);
else if (type == EVT_NODE_FAILED)
evt.message("Node failed: " + node);
else if (type == EVT_NODE_SEGMENTED)
evt.message("Node segmented: " + node);
else if (type == EVT_CLIENT_NODE_DISCONNECTED)
evt.message("Client node disconnected: " + node);
else if (type == EVT_CLIENT_NODE_RECONNECTED)
evt.message("Client node reconnected: " + node);
else
assert false;
ctx.event().record(evt);
}
}
/**
* @param type Event type.
* @param topVer Topology version.
* @param node Node.
* @param topSnapshot Topology snapshot.
* @param data Custom message.
*/
void addEvent(
int type,
AffinityTopologyVersion topVer,
ClusterNode node,
Collection<ClusterNode> topSnapshot,
@Nullable DiscoveryCustomMessage data
) {
assert node != null : data;
evts.add(new GridTuple5<>(type, topVer, node, topSnapshot, data));
}
/**
* @param node Node to get a short description for.
* @return Short description for the node to be used in 'quiet' mode.
*/
private String quietNode(ClusterNode node) {
assert node != null;
return "nodeId8=" + node.id().toString().substring(0, 8) + ", " +
"addrs=" + U.addressesAsString(node) + ", " +
"order=" + node.order() + ", " +
"CPUs=" + node.metrics().getTotalCpus();
}
/** {@inheritDoc} */
@Override protected void body() throws InterruptedException {
while (!isCancelled()) {
try {
body0();
}
catch (InterruptedException e) {
throw e;
}
catch (Throwable t) {
U.error(log, "Unexpected exception in discovery worker thread (ignored).", t);
if (t instanceof Error)
throw (Error)t;
}
}
}
/** @throws InterruptedException If interrupted. */
@SuppressWarnings("DuplicateCondition")
private void body0() throws InterruptedException {
GridTuple5<Integer, AffinityTopologyVersion, ClusterNode, Collection<ClusterNode>,
DiscoveryCustomMessage> evt = evts.take();
int type = evt.get1();
AffinityTopologyVersion topVer = evt.get2();
ClusterNode node = evt.get3();
boolean isDaemon = node.isDaemon();
boolean segmented = false;
switch (type) {
case EVT_NODE_JOINED: {
assert !discoOrdered || topVer.topologyVersion() == node.order() : "Invalid topology version [topVer=" + topVer +
", node=" + node + ']';
try {
checkAttributes(F.asList(node));
}
catch (IgniteCheckedException e) {
U.warn(log, e.getMessage()); // We a have well-formed attribute warning here.
}
if (!isDaemon) {
if (!isLocDaemon) {
if (log.isInfoEnabled())
log.info("Added new node to topology: " + node);
ackTopology(topVer.topologyVersion(), true);
}
else if (log.isDebugEnabled())
log.debug("Added new node to topology: " + node);
}
else if (log.isDebugEnabled())
log.debug("Added new daemon node to topology: " + node);
break;
}
case EVT_NODE_LEFT: {
// Check only if resolvers were configured.
if (hasRslvrs)
segChkWrk.scheduleSegmentCheck();
if (!isDaemon) {
if (!isLocDaemon) {
if (log.isInfoEnabled())
log.info("Node left topology: " + node);
ackTopology(topVer.topologyVersion(), true);
}
else if (log.isDebugEnabled())
log.debug("Node left topology: " + node);
}
else if (log.isDebugEnabled())
log.debug("Daemon node left topology: " + node);
break;
}
case EVT_CLIENT_NODE_DISCONNECTED: {
// No-op.
break;
}
case EVT_CLIENT_NODE_RECONNECTED: {
if (log.isInfoEnabled())
log.info("Client node reconnected to topology: " + node);
ackTopology(topVer.topologyVersion(), true);
break;
}
case EVT_NODE_FAILED: {
// Check only if resolvers were configured.
if (hasRslvrs)
segChkWrk.scheduleSegmentCheck();
if (!isDaemon) {
if (!isLocDaemon) {
U.warn(log, "Node FAILED: " + node);
ackTopology(topVer.topologyVersion(), true);
}
else if (log.isDebugEnabled())
log.debug("Node FAILED: " + node);
}
else if (log.isDebugEnabled())
log.debug("Daemon node FAILED: " + node);
break;
}
case EVT_NODE_SEGMENTED: {
assert F.eqNodes(localNode(), node);
if (nodeSegFired) {
if (log.isDebugEnabled()) {
log.debug("Ignored node segmented event [type=EVT_NODE_SEGMENTED, " +
"node=" + node + ']');
}
return;
}
// Ignore all further EVT_NODE_SEGMENTED events
// until EVT_NODE_RECONNECTED is fired.
nodeSegFired = true;
lastLoggedTop.set(0);
segmented = true;
if (!isLocDaemon)
U.warn(log, "Local node SEGMENTED: " + node);
else if (log.isDebugEnabled())
log.debug("Local node SEGMENTED: " + node);
break;
}
case DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT: {
if (ctx.event().isRecordable(DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT)) {
DiscoveryCustomEvent customEvt = new DiscoveryCustomEvent();
customEvt.node(ctx.discovery().localNode());
customEvt.eventNode(node);
customEvt.type(type);
customEvt.topologySnapshot(topVer.topologyVersion(), evt.get4());
customEvt.affinityTopologyVersion(topVer);
customEvt.customMessage(evt.get5());
ctx.event().record(customEvt);
}
return;
}
// Don't log metric update to avoid flooding the log.
case EVT_NODE_METRICS_UPDATED:
break;
default:
assert false : "Invalid discovery event: " + type;
}
recordEvent(type, topVer.topologyVersion(), node, evt.get4());
if (segmented)
onSegmentation();
}
/**
*
*/
private void onSegmentation() {
SegmentationPolicy segPlc = ctx.config().getSegmentationPolicy();
// Always disconnect first.
try {
getSpi().disconnect();
}
catch (IgniteSpiException e) {
U.error(log, "Failed to disconnect discovery SPI.", e);
}
switch (segPlc) {
case RESTART_JVM:
U.warn(log, "Restarting JVM according to configured segmentation policy.");
restartJvm();
break;
case STOP:
U.warn(log, "Stopping local node according to configured segmentation policy.");
stopNode();
break;
default:
assert segPlc == NOOP : "Unsupported segmentation policy value: " + segPlc;
}
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(DiscoveryWorker.class, this);
}
}
/**
*
*/
private class MetricsUpdater implements Runnable {
/** */
private long prevGcTime = -1;
/** */
private long prevCpuTime = -1;
/** {@inheritDoc} */
@Override public void run() {
gcCpuLoad = getGcCpuLoad();
cpuLoad = getCpuLoad();
}
/**
* @return GC CPU load.
*/
private double getGcCpuLoad() {
long gcTime = 0;
for (GarbageCollectorMXBean bean : gc) {
long colTime = bean.getCollectionTime();
if (colTime > 0)
gcTime += colTime;
}
gcTime /= metrics.getAvailableProcessors();
double gc = 0;
if (prevGcTime > 0) {
long gcTimeDiff = gcTime - prevGcTime;
gc = (double)gcTimeDiff / METRICS_UPDATE_FREQ;
}
prevGcTime = gcTime;
return gc;
}
/**
* @return CPU load.
*/
private double getCpuLoad() {
long cpuTime;
try {
cpuTime = U.<Long>property(os, "processCpuTime");
}
catch (IgniteException ignored) {
return -1;
}
// Method reports time in nanoseconds across all processors.
cpuTime /= 1000000 * metrics.getAvailableProcessors();
double cpu = 0;
if (prevCpuTime > 0) {
long cpuTimeDiff = cpuTime - prevCpuTime;
// CPU load could go higher than 100% because calculating of cpuTimeDiff also takes some time.
cpu = Math.min(1.0, (double)cpuTimeDiff / METRICS_UPDATE_FREQ);
}
prevCpuTime = cpuTime;
return cpu;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(MetricsUpdater.class, this, super.toString());
}
}
/** Discovery topology future. */
private static class DiscoTopologyFuture extends GridFutureAdapter<Long> implements GridLocalEventListener {
/** */
private static final long serialVersionUID = 0L;
/** */
private GridKernalContext ctx;
/** Topology await version. */
private long awaitVer;
/** Empty constructor required by {@link Externalizable}. */
private DiscoTopologyFuture() {
// No-op.
}
/**
* @param ctx Context.
* @param awaitVer Await version.
*/
private DiscoTopologyFuture(GridKernalContext ctx, long awaitVer) {
this.ctx = ctx;
this.awaitVer = awaitVer;
}
/** Initializes future. */
private void init() {
ctx.event().addLocalEventListener(this, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_NODE_FAILED);
// Close potential window.
long topVer = ctx.discovery().topologyVersion();
if (topVer >= awaitVer)
onDone(topVer);
}
/** {@inheritDoc} */
@Override public boolean onDone(@Nullable Long res, @Nullable Throwable err) {
if (super.onDone(res, err)) {
ctx.event().removeLocalEventListener(this, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_NODE_FAILED);
return true;
}
return false;
}
/** {@inheritDoc} */
@Override public void onEvent(Event evt) {
assert evt.type() == EVT_NODE_JOINED || evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED;
DiscoveryEvent discoEvt = (DiscoveryEvent)evt;
if (discoEvt.topologyVersion() >= awaitVer)
onDone(discoEvt.topologyVersion());
}
}
/**
*
*/
private static class Snapshot {
/** */
private final AffinityTopologyVersion topVer;
/** */
@GridToStringExclude
private final DiscoCache discoCache;
/**
* @param topVer Topology version.
* @param discoCache Disco cache.
*/
private Snapshot(AffinityTopologyVersion topVer, DiscoCache discoCache) {
this.topVer = topVer;
this.discoCache = discoCache;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(Snapshot.class, this);
}
}
/** Cache for discovery collections. */
private class DiscoCache {
/** Remote nodes. */
private final List<ClusterNode> rmtNodes;
/** All nodes. */
private final List<ClusterNode> allNodes;
/** All server nodes. */
private final List<ClusterNode> srvNodes;
/** All nodes with at least one cache configured. */
@GridToStringInclude
private final Collection<ClusterNode> allNodesWithCaches;
/** All nodes with at least one cache configured. */
@GridToStringInclude
private final Collection<ClusterNode> rmtNodesWithCaches;
/** Cache nodes by cache name. */
@GridToStringInclude
private final Map<String, Collection<ClusterNode>> allCacheNodes;
/** Remote cache nodes by cache name. */
@GridToStringInclude
private final Map<String, Collection<ClusterNode>> rmtCacheNodes;
/** Cache nodes by cache name. */
@GridToStringInclude
private final Map<String, Collection<ClusterNode>> affCacheNodes;
/** Caches where at least one node has near cache enabled. */
@GridToStringInclude
private final Set<String> nearEnabledCaches;
/** Nodes grouped by version. */
private final NavigableMap<IgniteProductVersion, Collection<ClusterNode>> nodesByVer;
/** Daemon nodes. */
private final List<ClusterNode> daemonNodes;
/** Node map. */
private final Map<UUID, ClusterNode> nodeMap;
/** Local node. */
private final ClusterNode loc;
/** Highest node order. */
private final long maxOrder;
/**
* Cached alive nodes list. As long as this collection doesn't accept {@code null}s use {@link
* #maskNull(String)} before passing raw cache names to it.
*/
private final ConcurrentMap<String, Collection<ClusterNode>> aliveCacheNodes;
/**
* Cached alive remote nodes list. As long as this collection doesn't accept {@code null}s use {@link
* #maskNull(String)} before passing raw cache names to it.
*/
private final ConcurrentMap<String, Collection<ClusterNode>> aliveRmtCacheNodes;
/**
* Cached alive remote nodes with caches.
*/
private final Collection<ClusterNode> aliveNodesWithCaches;
/**
* Cached alive server remote nodes with caches.
*/
private final Collection<ClusterNode> aliveSrvNodesWithCaches;
/**
* Cached alive remote server nodes with caches.
*/
private final Collection<ClusterNode> aliveRmtSrvNodesWithCaches;
/**
* @param loc Local node.
* @param rmts Remote nodes.
*/
private DiscoCache(ClusterNode loc, Collection<ClusterNode> rmts) {
this.loc = loc;
rmtNodes = Collections.unmodifiableList(new ArrayList<>(F.view(rmts, FILTER_DAEMON)));
assert !rmtNodes.contains(loc) : "Remote nodes collection shouldn't contain local node" +
" [rmtNodes=" + rmtNodes + ", loc=" + loc + ']';
List<ClusterNode> all = new ArrayList<>(rmtNodes.size() + 1);
if (!loc.isDaemon())
all.add(loc);
all.addAll(rmtNodes);
Collections.sort(all, GridNodeOrderComparator.INSTANCE);
allNodes = Collections.unmodifiableList(all);
Map<String, Collection<ClusterNode>> cacheMap = new HashMap<>(allNodes.size(), 1.0f);
Map<String, Collection<ClusterNode>> rmtCacheMap = new HashMap<>(allNodes.size(), 1.0f);
Map<String, Collection<ClusterNode>> dhtNodesMap = new HashMap<>(allNodes.size(), 1.0f);
Collection<ClusterNode> nodesWithCaches = new HashSet<>(allNodes.size());
Collection<ClusterNode> rmtNodesWithCaches = new HashSet<>(allNodes.size());
aliveCacheNodes = new ConcurrentHashMap8<>(allNodes.size(), 1.0f);
aliveRmtCacheNodes = new ConcurrentHashMap8<>(allNodes.size(), 1.0f);
aliveNodesWithCaches = new ConcurrentSkipListSet<>();
aliveSrvNodesWithCaches = new ConcurrentSkipListSet<>();
aliveRmtSrvNodesWithCaches = new ConcurrentSkipListSet<>();
nodesByVer = new TreeMap<>();
long maxOrder0 = 0;
Set<String> nearEnabledSet = new HashSet<>();
List<ClusterNode> srvNodes = new ArrayList<>();
for (ClusterNode node : allNodes) {
assert node.order() != 0 : "Invalid node order [locNode=" + loc + ", node=" + node + ']';
assert !node.isDaemon();
if (!CU.clientNode(node))
srvNodes.add(node);
if (node.order() > maxOrder0)
maxOrder0 = node.order();
boolean hasCaches = false;
for (Map.Entry<String, CachePredicate> entry : registeredCaches.entrySet()) {
String cacheName = entry.getKey();
CachePredicate filter = entry.getValue();
if (filter.cacheNode(node)) {
nodesWithCaches.add(node);
if (!loc.id().equals(node.id()))
rmtNodesWithCaches.add(node);
addToMap(cacheMap, cacheName, node);
if (alive(node.id()))
addToMap(aliveCacheNodes, maskNull(cacheName), node);
if (filter.dataNode(node))
addToMap(dhtNodesMap, cacheName, node);
if (filter.nearNode(node))
nearEnabledSet.add(cacheName);
if (!loc.id().equals(node.id())) {
addToMap(rmtCacheMap, cacheName, node);
if (alive(node.id()))
addToMap(aliveRmtCacheNodes, maskNull(cacheName), node);
}
hasCaches = true;
}
}
if (hasCaches) {
if (alive(node.id())) {
aliveNodesWithCaches.add(node);
if (!CU.clientNode(node)) {
aliveSrvNodesWithCaches.add(node);
if (!loc.id().equals(node.id()))
aliveRmtSrvNodesWithCaches.add(node);
}
}
}
IgniteProductVersion nodeVer = U.productVersion(node);
// Create collection for this version if it does not exist.
Collection<ClusterNode> nodes = nodesByVer.get(nodeVer);
if (nodes == null) {
nodes = new ArrayList<>(allNodes.size());
nodesByVer.put(nodeVer, nodes);
}
nodes.add(node);
}
Collections.sort(srvNodes, CU.nodeComparator(true));
// Need second iteration to add this node to all previous node versions.
for (ClusterNode node : allNodes) {
IgniteProductVersion nodeVer = U.productVersion(node);
// Get all versions lower or equal node's version.
NavigableMap<IgniteProductVersion, Collection<ClusterNode>> updateView =
nodesByVer.headMap(nodeVer, false);
for (Collection<ClusterNode> prevVersions : updateView.values())
prevVersions.add(node);
}
maxOrder = maxOrder0;
allCacheNodes = Collections.unmodifiableMap(cacheMap);
rmtCacheNodes = Collections.unmodifiableMap(rmtCacheMap);
affCacheNodes = Collections.unmodifiableMap(dhtNodesMap);
allNodesWithCaches = Collections.unmodifiableCollection(nodesWithCaches);
this.rmtNodesWithCaches = Collections.unmodifiableCollection(rmtNodesWithCaches);
nearEnabledCaches = Collections.unmodifiableSet(nearEnabledSet);
this.srvNodes = Collections.unmodifiableList(srvNodes);
daemonNodes = Collections.unmodifiableList(new ArrayList<>(
F.view(F.concat(false, loc, rmts), F0.not(FILTER_DAEMON))));
Map<UUID, ClusterNode> nodeMap = new HashMap<>(allNodes().size() + daemonNodes.size(), 1.0f);
for (ClusterNode n : F.concat(false, allNodes(), daemonNodes()))
nodeMap.put(n.id(), n);
this.nodeMap = nodeMap;
}
/**
* Adds node to map.
*
* @param cacheMap Map to add to.
* @param cacheName Cache name.
* @param rich Node to add
*/
private void addToMap(Map<String, Collection<ClusterNode>> cacheMap, String cacheName, ClusterNode rich) {
Collection<ClusterNode> cacheNodes = cacheMap.get(cacheName);
if (cacheNodes == null) {
cacheNodes = new ArrayList<>(allNodes.size());
cacheMap.put(cacheName, cacheNodes);
}
cacheNodes.add(rich);
}
/** @return Local node. */
ClusterNode localNode() {
return loc;
}
/** @return Remote nodes. */
Collection<ClusterNode> remoteNodes() {
return rmtNodes;
}
/** @return All nodes. */
Collection<ClusterNode> allNodes() {
return allNodes;
}
/**
* Gets collection of nodes which have version equal or greater than {@code ver}.
*
* @param ver Version to check.
* @return Collection of nodes with version equal or greater than {@code ver}.
*/
Collection<ClusterNode> elderNodes(IgniteProductVersion ver) {
Map.Entry<IgniteProductVersion, Collection<ClusterNode>> entry = nodesByVer.ceilingEntry(ver);
if (entry == null)
return Collections.emptyList();
return entry.getValue();
}
/**
* @return Versions map.
*/
NavigableMap<IgniteProductVersion, Collection<ClusterNode>> versionsMap() {
return nodesByVer;
}
/**
* Gets collection of nodes with at least one cache configured.
*
* @param topVer Topology version (maximum allowed node order).
* @return Collection of nodes.
*/
Collection<ClusterNode> allNodesWithCaches(final long topVer) {
return filter(topVer, allNodesWithCaches);
}
/**
* Gets all nodes that have cache with given name.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> cacheNodes(@Nullable String cacheName, final long topVer) {
return filter(topVer, allCacheNodes.get(cacheName));
}
/**
* Gets all remote nodes that have cache with given name.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> remoteCacheNodes(@Nullable String cacheName, final long topVer) {
return filter(topVer, rmtCacheNodes.get(cacheName));
}
/**
* Gets all remote nodes that have at least one cache configured.
*
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> remoteCacheNodes(final long topVer) {
return filter(topVer, rmtNodesWithCaches);
}
/**
* Gets all nodes that have cache with given name and should participate in affinity calculation. With
* partitioned cache nodes with near-only cache do not participate in affinity node calculation.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> cacheAffinityNodes(@Nullable String cacheName, final long topVer) {
return filter(topVer, affCacheNodes.get(cacheName));
}
/**
* Gets all alive nodes that have cache with given name.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> aliveCacheNodes(@Nullable String cacheName, final long topVer) {
return filter(topVer, aliveCacheNodes.get(maskNull(cacheName)));
}
/**
* Gets all alive remote nodes that have cache with given name.
*
* @param cacheName Cache name.
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> aliveRemoteCacheNodes(@Nullable String cacheName, final long topVer) {
return filter(topVer, aliveRmtCacheNodes.get(maskNull(cacheName)));
}
/**
* Gets all alive remote server nodes with at least one cache configured.
*
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> aliveRemoteServerNodesWithCaches(final long topVer) {
return filter(topVer, aliveRmtSrvNodesWithCaches);
}
/**
* Gets all alive server nodes with at least one cache configured.
*
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> aliveServerNodesWithCaches(final long topVer) {
return filter(topVer, aliveSrvNodesWithCaches);
}
/**
* Gets all alive remote nodes with at least one cache configured.
*
* @param topVer Topology version.
* @return Collection of nodes.
*/
Collection<ClusterNode> aliveNodesWithCaches(final long topVer) {
return filter(topVer, aliveNodesWithCaches);
}
/**
* Checks if cache with given name has at least one node with near cache enabled.
*
* @param cacheName Cache name.
* @return {@code True} if cache with given name has at least one node with near cache enabled.
*/
boolean hasNearCache(@Nullable String cacheName) {
return nearEnabledCaches.contains(cacheName);
}
/**
* Removes left node from cached alives lists.
*
* @param leftNode Left node.
*/
void updateAlives(ClusterNode leftNode) {
if (leftNode.order() > maxOrder)
return;
filterNodeMap(aliveCacheNodes, leftNode);
filterNodeMap(aliveRmtCacheNodes, leftNode);
aliveNodesWithCaches.remove(leftNode);
aliveSrvNodesWithCaches.remove(leftNode);
aliveRmtSrvNodesWithCaches.remove(leftNode);
}
/**
* Creates a copy of nodes map without the given node.
*
* @param map Map to copy.
* @param exclNode Node to exclude.
*/
private void filterNodeMap(ConcurrentMap<String, Collection<ClusterNode>> map, final ClusterNode exclNode) {
for (String cacheName : registeredCaches.keySet()) {
String maskedName = maskNull(cacheName);
while (true) {
Collection<ClusterNode> oldNodes = map.get(maskedName);
if (oldNodes == null || oldNodes.isEmpty())
break;
Collection<ClusterNode> newNodes = new ArrayList<>(oldNodes);
if (!newNodes.remove(exclNode))
break;
if (map.replace(maskedName, oldNodes, newNodes))
break;
}
}
}
/**
* Replaces {@code null} with {@code NULL_CACHE_NAME}.
*
* @param cacheName Cache name.
* @return Masked name.
*/
private String maskNull(@Nullable String cacheName) {
return cacheName == null ? NULL_CACHE_NAME : cacheName;
}
/**
* @param topVer Topology version.
* @param nodes Nodes.
* @return Filtered collection (potentially empty, but never {@code null}).
*/
private Collection<ClusterNode> filter(final long topVer, @Nullable Collection<ClusterNode> nodes) {
if (nodes == null)
return Collections.emptyList();
// If no filtering needed, return original collection.
return nodes.isEmpty() || topVer < 0 || topVer >= maxOrder ?
nodes :
F.view(nodes, new P1<ClusterNode>() {
@Override public boolean apply(ClusterNode node) {
return node.order() <= topVer;
}
});
}
/** @return Daemon nodes. */
Collection<ClusterNode> daemonNodes() {
return daemonNodes;
}
/**
* @param id Node ID.
* @return Node.
*/
@Nullable ClusterNode node(UUID id) {
return nodeMap.get(id);
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(DiscoCache.class, this, "allNodesWithDaemons", U.toShortString(allNodes));
}
}
/**
* Cache predicate.
*/
private static class CachePredicate {
/** Cache filter. */
private final IgnitePredicate<ClusterNode> cacheFilter;
/** If near cache is enabled on data nodes. */
private final boolean nearEnabled;
/** Cache mode. */
private final CacheMode cacheMode;
/** Collection of client near nodes. */
private final ConcurrentHashMap<UUID, Boolean> clientNodes;
/**
* @param cacheFilter Cache filter.
* @param nearEnabled Near enabled flag.
* @param cacheMode Cache mode.
*/
private CachePredicate(IgnitePredicate<ClusterNode> cacheFilter, boolean nearEnabled, CacheMode cacheMode) {
assert cacheFilter != null;
this.cacheFilter = cacheFilter;
this.nearEnabled = nearEnabled;
this.cacheMode = cacheMode;
clientNodes = new ConcurrentHashMap<>();
}
/**
* @param nodeId Near node ID to add.
* @param nearEnabled Near enabled flag.
* @return {@code True} if new node ID was added.
*/
public boolean addClientNode(UUID nodeId, boolean nearEnabled) {
assert nodeId != null;
Boolean old = clientNodes.putIfAbsent(nodeId, nearEnabled);
return old == null;
}
/**
* @param leftNodeId Left node ID.
* @return {@code True} if existing node ID was removed.
*/
public boolean onNodeLeft(UUID leftNodeId) {
assert leftNodeId != null;
Boolean old = clientNodes.remove(leftNodeId);
return old != null;
}
/**
* @param node Node to check.
* @return {@code True} if this node is a data node for given cache.
*/
public boolean dataNode(ClusterNode node) {
return !node.isDaemon() && CU.affinityNode(node, cacheFilter);
}
/**
* @param node Node to check.
* @return {@code True} if cache is accessible on the given node.
*/
public boolean cacheNode(ClusterNode node) {
return !node.isDaemon() && (CU.affinityNode(node, cacheFilter) || clientNodes.containsKey(node.id()));
}
/**
* @param node Node to check.
* @return {@code True} if near cache is present on the given nodes.
*/
public boolean nearNode(ClusterNode node) {
if (node.isDaemon())
return false;
if (CU.affinityNode(node, cacheFilter))
return nearEnabled;
Boolean near = clientNodes.get(node.id());
return near != null && near;
}
/**
* @param node Node to check.
* @return {@code True} if client cache is present on the given nodes.
*/
public boolean clientNode(ClusterNode node) {
if (node.isDaemon())
return false;
Boolean near = clientNodes.get(node.id());
return near != null && !near;
}
}
}
| apache-2.0 |
Neoskai/greycat | greycat/src/main/java/greycat/DeferCounter.java | 1462 | /**
* Copyright 2017 The GreyCat Authors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package greycat;
import greycat.plugin.Job;
/**
* DeferCounter provides a mean to wait for an amount of events before running a method.
*/
public interface DeferCounter {
/**
* Notifies the counter that an awaited event has occurred.<br>
* If the total amount of awaited events is reached, the task registered by the {@link #then(Job) then} method is executed.
*/
void count();
/**
* Get the number of events still expected
* @return the number of events still expected
*/
int getCount();
/**
* Registers the task, in form of a {@link Job}, to be called when all awaited events have occurred.
* @param job The task to be executed
*/
void then(Job job);
/**
* Wrap into a callback.
* @return the callback
*/
Callback wrap();
}
| apache-2.0 |
Fabryprog/camel | core/camel-core/src/main/java/org/apache/camel/model/ClaimCheckDefinition.java | 7537 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.model;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlTransient;
import org.apache.camel.AggregationStrategy;
import org.apache.camel.spi.Metadata;
/**
* The Claim Check EIP allows you to replace message content with a claim check (a unique key),
* which can be used to retrieve the message content at a later time.
*/
@Metadata(label = "eip,routing")
@XmlRootElement(name = "claimCheck")
@XmlAccessorType(XmlAccessType.FIELD)
public class ClaimCheckDefinition extends NoOutputDefinition<ClaimCheckDefinition> {
@XmlAttribute(required = true)
private ClaimCheckOperation operation;
@XmlAttribute
private String key;
@XmlAttribute
private String filter;
@XmlAttribute(name = "strategyRef") @Metadata(label = "advanced")
private String aggregationStrategyRef;
@XmlAttribute(name = "strategyMethodName") @Metadata(label = "advanced")
private String aggregationStrategyMethodName;
@XmlTransient
private AggregationStrategy aggregationStrategy;
public ClaimCheckDefinition() {
}
@Override
public String toString() {
if (operation != null) {
return "ClaimCheck[" + operation + "]";
} else {
return "ClaimCheck";
}
}
@Override
public String getShortName() {
return "claimCheck";
}
@Override
public String getLabel() {
return "claimCheck";
}
// Fluent API
//-------------------------------------------------------------------------
/**
* The claim check operation to use.
* The following operations is supported:
* <ul>
* <li>Get</li> - Gets (does not remove) the claim check by the given key.
* <li>GetAndRemove</li> - Gets and remove the claim check by the given key.
* <li>Set</li> - Sets a new (will override if key already exists) claim check with the given key.
* <li>Push</li> - Sets a new claim check on the stack (does not use key).
* <li>Pop</li> - Gets the latest claim check from the stack (does not use key).
* </ul>
*/
public ClaimCheckDefinition operation(ClaimCheckOperation operation) {
setOperation(operation);
return this;
}
/**
* To use a specific key for claim check id (for dynamic keys use simple language syntax as the key).
*/
public ClaimCheckDefinition key(String key) {
setKey(key);
return this;
}
/**
* Specified a filter to control what data gets merging data back from the claim check repository.
*
* The following syntax is supported:
* <ul>
* <li>body</li> - to aggregate the message body
* <li>attachments</li> - to aggregate all the message attachments
* <li>headers</li> - to aggregate all the message headers
* <li>header:pattern</li> - to aggregate all the message headers that matches the pattern.
* </ul>
* The pattern uses the following rules are applied in this order:
* <ul>
* <li>exact match, returns true</li>
* <li>wildcard match (pattern ends with a * and the name starts with the pattern), returns true</li>
* <li>regular expression match, returns true</li>
* <li>otherwise returns false</li>
* </ul>
* <p>
* You can specify multiple rules separated by comma. For example to include the message body and all headers starting with foo
* <tt>body,header:foo*</tt>.
* The syntax supports the following prefixes which can be used to specify include,exclude, or remove
* <ul>
* <li>+</li> - to include (which is the default mode)
* <li>-</li> - to exclude (exclude takes precedence over include)
* <li>--</li> - to remove (remove takes precedence)
* </ul>
* For example to exclude a header name foo, and remove all headers starting with bar
* <tt>-header:foo,--headers:bar*</tt>
* Note you cannot have both include and exclude <tt>header:pattern</tt> at the same time.
*/
public ClaimCheckDefinition filter(String filter) {
setFilter(filter);
return this;
}
/**
* To use a custom {@link AggregationStrategy} instead of the default implementation.
* Notice you cannot use both custom aggregation strategy and configure data at the same time.
*/
public ClaimCheckDefinition aggregationStrategy(AggregationStrategy aggregationStrategy) {
setAggregationStrategy(aggregationStrategy);
return this;
}
/**
* To use a custom {@link AggregationStrategy} instead of the default implementation.
* Notice you cannot use both custom aggregation strategy and configure data at the same time.
*/
public ClaimCheckDefinition aggregationStrategyRef(String aggregationStrategyRef) {
setAggregationStrategyRef(aggregationStrategyRef);
return this;
}
/**
* This option can be used to explicit declare the method name to use, when using POJOs as the AggregationStrategy.
*/
public ClaimCheckDefinition aggregationStrategyMethodName(String aggregationStrategyMethodName) {
setAggregationStrategyMethodName(aggregationStrategyMethodName);
return this;
}
// Properties
//-------------------------------------------------------------------------
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public ClaimCheckOperation getOperation() {
return operation;
}
public void setOperation(ClaimCheckOperation operation) {
this.operation = operation;
}
public String getFilter() {
return filter;
}
public void setFilter(String filter) {
this.filter = filter;
}
public String getAggregationStrategyRef() {
return aggregationStrategyRef;
}
public void setAggregationStrategyRef(String aggregationStrategyRef) {
this.aggregationStrategyRef = aggregationStrategyRef;
}
public String getAggregationStrategyMethodName() {
return aggregationStrategyMethodName;
}
public void setAggregationStrategyMethodName(String aggregationStrategyMethodName) {
this.aggregationStrategyMethodName = aggregationStrategyMethodName;
}
public AggregationStrategy getAggregationStrategy() {
return aggregationStrategy;
}
public void setAggregationStrategy(AggregationStrategy aggregationStrategy) {
this.aggregationStrategy = aggregationStrategy;
}
}
| apache-2.0 |
syany/uranoplums | src/main/java/org/uranoplums/typical/io/UraFileBuffer.java | 872 | /*
* Copyright 2013-2015 the Uranoplums Foundation and the Others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* $Id: UraFileBuffer.java$
*/
package org.uranoplums.typical.io;
/**
* UraFileBufferクラス。<br>
*
* @since 2015/11/10
* @author syany
*/
public interface UraFileBuffer {
public int getBufferSize();
}
| apache-2.0 |
pfirmstone/JGDMS | qa/src/org/apache/river/test/spec/security/proxytrust/util/TEImpl.java | 1334 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.river.test.spec.security.proxytrust.util;
import java.util.logging.Level;
// java
import java.io.Serializable;
// net.jini
import net.jini.security.proxytrust.TrustEquivalence;
/**
* Serializable class implementing TrustEquivalence interface.
*/
public class TEImpl implements Serializable, TrustEquivalence {
/**
* Method from TrustEquivalence interface. Does nothing.
*
* @return false
*/
public boolean checkTrustEquivalence(Object obj) {
return false;
}
}
| apache-2.0 |
tikue/jcs2-snapshot | src/java/org/apache/commons/jcs/auxiliary/disk/jdbc/mysql/MySQLDiskCacheFactory.java | 2608 | package org.apache.commons.jcs.auxiliary.disk.jdbc.mysql;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.Serializable;
import org.apache.commons.jcs.auxiliary.AuxiliaryCache;
import org.apache.commons.jcs.auxiliary.AuxiliaryCacheAttributes;
import org.apache.commons.jcs.auxiliary.AuxiliaryCacheFactory;
import org.apache.commons.jcs.engine.behavior.ICompositeCacheManager;
import org.apache.commons.jcs.engine.behavior.IElementSerializer;
import org.apache.commons.jcs.engine.logging.behavior.ICacheEventLogger;
/**
* This factory should create mysql disk caches.
* <p>
* @author Aaron Smuts
*/
public class MySQLDiskCacheFactory
implements AuxiliaryCacheFactory
{
/** name of the factory */
private String name = "MySQLDiskCacheFactory";
/**
* This factory method should create an instance of the mysqlcache.
* <p>
* @param rawAttr
* @param cacheManager
* @param cacheEventLogger
* @param elementSerializer
* @return AuxiliaryCache
*/
public <K extends Serializable, V extends Serializable> AuxiliaryCache<K, V> createCache( AuxiliaryCacheAttributes rawAttr, ICompositeCacheManager cacheManager,
ICacheEventLogger cacheEventLogger, IElementSerializer elementSerializer )
{
MySQLDiskCacheManager mgr = MySQLDiskCacheManager.getInstance( (MySQLDiskCacheAttributes) rawAttr, cacheManager, cacheEventLogger, elementSerializer );
return mgr.getCache( (MySQLDiskCacheAttributes) rawAttr );
}
/**
* The name of the factory.
* <p>
* @param nameArg
*/
public void setName( String nameArg )
{
name = nameArg;
}
/**
* Returns the display name.
* <p>
* @return factory name
*/
public String getName()
{
return name;
}
}
| apache-2.0 |
dejanb/activemq-artemis | artemis-core-client/src/main/java/org/apache/activemq/artemis/spi/core/remoting/SessionContext.java | 13873 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.spi.core.remoting;
import javax.transaction.xa.XAException;
import javax.transaction.xa.Xid;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Set;
import java.util.concurrent.Executor;
import org.apache.activemq.artemis.api.core.ActiveMQException;
import org.apache.activemq.artemis.api.core.ICoreMessage;
import org.apache.activemq.artemis.api.core.Message;
import org.apache.activemq.artemis.api.core.RoutingType;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.api.core.client.ClientConsumer;
import org.apache.activemq.artemis.api.core.client.ClientSession;
import org.apache.activemq.artemis.api.core.client.SendAcknowledgementHandler;
import org.apache.activemq.artemis.core.client.impl.ClientConsumerInternal;
import org.apache.activemq.artemis.core.client.impl.ClientLargeMessageInternal;
import org.apache.activemq.artemis.core.client.impl.ClientMessageInternal;
import org.apache.activemq.artemis.core.client.impl.ClientProducerCreditsImpl;
import org.apache.activemq.artemis.core.client.impl.ClientSessionInternal;
import org.apache.activemq.artemis.spi.core.protocol.RemotingConnection;
import org.apache.activemq.artemis.utils.IDGenerator;
import org.apache.activemq.artemis.utils.SimpleIDGenerator;
public abstract class SessionContext {
protected ClientSessionInternal session;
protected SendAcknowledgementHandler sendAckHandler;
protected volatile RemotingConnection remotingConnection;
protected final IDGenerator idGenerator = new SimpleIDGenerator(0);
public SessionContext(RemotingConnection remotingConnection) {
this.remotingConnection = remotingConnection;
}
public ClientSessionInternal getSession() {
return session;
}
public void setSession(ClientSessionInternal session) {
this.session = session;
}
public abstract void resetName(String name);
public abstract int getReconnectID();
/**
* it will either reattach or reconnect, preferably reattaching it.
*
* @param newConnection
* @return true if it was possible to reattach
* @throws ActiveMQException
*/
public abstract boolean reattachOnNewConnection(RemotingConnection newConnection) throws ActiveMQException;
public RemotingConnection getRemotingConnection() {
return remotingConnection;
}
public abstract void closeConsumer(ClientConsumer consumer) throws ActiveMQException;
public abstract void sendConsumerCredits(ClientConsumer consumer, int credits);
public abstract boolean supportsLargeMessage();
protected void handleReceiveLargeMessage(ConsumerContext consumerID,
ClientLargeMessageInternal clientLargeMessage,
long largeMessageSize) throws Exception {
ClientSessionInternal session = this.session;
if (session != null) {
session.handleReceiveLargeMessage(consumerID, clientLargeMessage, largeMessageSize);
}
}
protected void handleReceiveMessage(ConsumerContext consumerID,
ClientMessageInternal message) throws Exception {
ClientSessionInternal session = this.session;
if (session != null) {
session.handleReceiveMessage(consumerID, message);
}
}
protected void handleReceiveContinuation(ConsumerContext consumerID,
byte[] chunk,
int flowControlSize,
boolean isContinues) throws Exception {
ClientSessionInternal session = this.session;
if (session != null) {
session.handleReceiveContinuation(consumerID, chunk, flowControlSize, isContinues);
}
}
protected void handleReceiveProducerCredits(SimpleString address, int credits) {
ClientSessionInternal session = this.session;
if (session != null) {
session.handleReceiveProducerCredits(address, credits);
}
}
protected void handleReceiveProducerFailCredits(SimpleString address, int credits) {
ClientSessionInternal session = this.session;
if (session != null) {
session.handleReceiveProducerFailCredits(address, credits);
}
}
public abstract int getCreditsOnSendingFull(Message msgI);
public abstract void sendFullMessage(ICoreMessage msgI,
boolean sendBlocking,
SendAcknowledgementHandler handler,
SimpleString defaultAddress) throws ActiveMQException;
/**
* it should return the number of credits (or bytes) used to send this packet
*
* @param msgI
* @return
* @throws ActiveMQException
*/
public abstract int sendInitialChunkOnLargeMessage(Message msgI) throws ActiveMQException;
public abstract int sendLargeMessageChunk(Message msgI,
long messageBodySize,
boolean sendBlocking,
boolean lastChunk,
byte[] chunk,
int reconnectID,
SendAcknowledgementHandler messageHandler) throws ActiveMQException;
public abstract int sendServerLargeMessageChunk(Message msgI,
long messageBodySize,
boolean sendBlocking,
boolean lastChunk,
byte[] chunk,
SendAcknowledgementHandler messageHandler) throws ActiveMQException;
public abstract void setSendAcknowledgementHandler(SendAcknowledgementHandler handler);
/**
* Creates a shared queue using the routing type set by the Address. If the Address supports more than one type of delivery
* then the default delivery mode (MULTICAST) is used.
*
* @param address
* @param queueName
* @param routingType
* @param filterString
* @param durable
* @throws ActiveMQException
*/
public abstract void createSharedQueue(SimpleString address,
SimpleString queueName,
RoutingType routingType,
SimpleString filterString,
boolean durable) throws ActiveMQException;
public abstract void createSharedQueue(SimpleString address,
SimpleString queueName,
SimpleString filterString,
boolean durable) throws ActiveMQException;
public abstract void deleteQueue(SimpleString queueName) throws ActiveMQException;
@Deprecated
public abstract void createAddress(SimpleString address, Set<RoutingType> routingTypes, boolean autoCreated) throws ActiveMQException;
public abstract void createAddress(SimpleString address, EnumSet<RoutingType> routingTypes, boolean autoCreated) throws ActiveMQException;
@Deprecated
public abstract void createQueue(SimpleString address,
SimpleString queueName,
SimpleString filterString,
boolean durable,
boolean temp,
boolean autoCreated) throws ActiveMQException;
public abstract void createQueue(SimpleString address,
RoutingType routingType,
SimpleString queueName,
SimpleString filterString,
boolean durable,
boolean temp,
int maxConsumers,
boolean purgeOnNoConsumers,
boolean autoCreated) throws ActiveMQException;
public abstract ClientSession.QueueQuery queueQuery(SimpleString queueName) throws ActiveMQException;
public abstract void forceDelivery(ClientConsumer consumer, long sequence) throws ActiveMQException;
public abstract ClientSession.AddressQuery addressQuery(SimpleString address) throws ActiveMQException;
public abstract void simpleCommit() throws ActiveMQException;
public abstract void simpleCommit(boolean block) throws ActiveMQException;
/**
* If we are doing a simple rollback on the RA, we need to ack the last message sent to the consumer,
* otherwise DLQ won't work.
* <p>
* this is because we only ACK after on the RA, We may review this if we always acked earlier.
*
* @param lastMessageAsDelivered
* @throws ActiveMQException
*/
public abstract void simpleRollback(boolean lastMessageAsDelivered) throws ActiveMQException;
public abstract void sessionStart() throws ActiveMQException;
public abstract void sessionStop() throws ActiveMQException;
public abstract void sendACK(boolean individual,
boolean block,
ClientConsumer consumer,
Message message) throws ActiveMQException;
public abstract void expireMessage(ClientConsumer consumer, Message message) throws ActiveMQException;
public abstract void sessionClose() throws ActiveMQException;
public abstract void addSessionMetadata(String key, String data) throws ActiveMQException;
public abstract void addUniqueMetaData(String key, String data) throws ActiveMQException;
public abstract void sendProducerCreditsMessage(int credits, SimpleString address);
public abstract void xaCommit(Xid xid, boolean onePhase) throws XAException, ActiveMQException;
public abstract void xaEnd(Xid xid, int flags) throws XAException, ActiveMQException;
public abstract void xaForget(Xid xid) throws XAException, ActiveMQException;
public abstract int xaPrepare(Xid xid) throws XAException, ActiveMQException;
public abstract Xid[] xaScan() throws ActiveMQException;
public abstract void xaRollback(Xid xid, boolean wasStarted) throws ActiveMQException, XAException;
public abstract void xaStart(Xid xid, int flags) throws XAException, ActiveMQException;
public abstract boolean configureTransactionTimeout(int seconds) throws ActiveMQException;
public abstract ClientConsumerInternal createConsumer(SimpleString queueName,
SimpleString filterString,
int windowSize,
int maxRate,
int ackBatchSize,
boolean browseOnly,
Executor executor,
Executor flowControlExecutor) throws ActiveMQException;
/**
* Performs a round trip to the server requesting what is the current tx timeout on the session
*
* @return
*/
public abstract int recoverSessionTimeout() throws ActiveMQException;
public abstract int getServerVersion();
public abstract void recreateSession(String username,
String password,
int minLargeMessageSize,
boolean xa,
boolean autoCommitSends,
boolean autoCommitAcks,
boolean preAcknowledge) throws ActiveMQException;
public abstract void recreateConsumerOnServer(ClientConsumerInternal consumerInternal, long consumerId, boolean isSessionStarted) throws ActiveMQException;
public abstract void xaFailed(Xid xid) throws ActiveMQException;
public abstract void restartSession() throws ActiveMQException;
public abstract void resetMetadata(HashMap<String, String> metaDataToSend);
// Failover utility classes
/**
* Interrupt and return any blocked calls
*/
public abstract void returnBlocking(ActiveMQException cause);
/**
* it will lock the communication channel of the session avoiding anything to come while failover is happening.
* It happens on preFailover from ClientSessionImpl
*/
public abstract void lockCommunications();
public abstract void releaseCommunications();
public abstract void cleanup();
public abstract void linkFlowControl(SimpleString address, ClientProducerCreditsImpl clientProducerCredits);
public abstract boolean isWritable(ReadyListener callback);
}
| apache-2.0 |
GenericBreakGlass/GenericBreakGlass-XACML | src/com.sun.xacml/src/main/java/com/sun/xacml/attr/TypeIdentifierConstants.java | 2786 | package com.sun.xacml.attr;
import java.net.URI;
public class TypeIdentifierConstants {
public static final String ANYURI =
"http://www.w3.org/2001/XMLSchema#anyURI";
public static final String BASE64BINARY =
"http://www.w3.org/2001/XMLSchema#base64Binary";
public static final String BOOLEAN =
"http://www.w3.org/2001/XMLSchema#boolean";
public static final String DATE =
"http://www.w3.org/2001/XMLSchema#date";
public static final String DATETIME =
"http://www.w3.org/2001/XMLSchema#dateTime";
public static final String DAYTIMEDURATION =
"http://www.w3.org/TR/2002/WD-xquery-operators-20020816#dayTimeDuration";
public static final String DECISION =
"urn:oasis:names:tc:xacml:3.0:delegation:decision";
public static final String DNSNAME =
"urn:oasis:names:tc:xacml:2.0:data-type:dnsName";
public static final String DOUBLE =
"http://www.w3.org/2001/XMLSchema#double";
public static final String HEXBINARY =
"http://www.w3.org/2001/XMLSchema#hexBinary";
public static final String INTEGER =
"http://www.w3.org/2001/XMLSchema#integer";
public static final String IPADDRESS =
"urn:oasis:names:tc:xacml:2.0:data-type:ipAddress";
public static final String RFC822NAME =
"urn:oasis:names:tc:xacml:1.0:data-type:rfc822Name";
public static final String STRING =
"http://www.w3.org/2001/XMLSchema#string";
public static final String TIME =
"http://www.w3.org/2001/XMLSchema#time";
public static final String X500NAME =
"urn:oasis:names:tc:xacml:1.0:data-type:x500Name";
public static final String YEARMONTHDURATION =
"http://www.w3.org/TR/2002/WD-xquery-operators-20020816#yearMonthDuration";
public static final URI ANYURI_URI =
URI.create(ANYURI);
public static final URI BASE64BINARY_URI =
URI.create(BASE64BINARY);
public static final URI BOOLEAN_URI =
URI.create(BOOLEAN);
public static final URI DATE_URI =
URI.create(DATE);
public static final URI DATETIME_URI =
URI.create(DATETIME);
public static final URI DAYTIMEDURATION_URI =
URI.create(DAYTIMEDURATION);
public static final URI DECISION_URI =
URI.create(DECISION);
public static final URI DNSNAME_URI =
URI.create(DNSNAME);
public static final URI DOUBLE_URI =
URI.create(DOUBLE);
public static final URI HEXBINARY_URI =
URI.create(HEXBINARY);
public static final URI INTEGER_URI =
URI.create(INTEGER);
public static final URI IPADDRESS_URI =
URI.create(IPADDRESS);
public static final URI RFC822NAME_URI =
URI.create(RFC822NAME);
public static final URI STRING_URI =
URI.create(STRING);
public static final URI TIME_URI =
URI.create(TIME);
public static final URI X500NAME_URI =
URI.create(X500NAME);
public static final URI YEARMONTHDURATION_URI =
URI.create(YEARMONTHDURATION);
}
| apache-2.0 |
fredj/closure-compiler | src/com/google/javascript/jscomp/newtypes/QualifiedName.java | 2037 | /*
* Copyright 2013 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp.newtypes;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import com.google.javascript.rhino.Node;
/**
* Represents a qualified name.
* (e.g. namespace.inner.Foo)
*
* @author blickly@google.com (Ben Lickly)
* @author dimvar@google.com (Dimitris Vardoulakis)
*/
public class QualifiedName {
ImmutableList<String> parts;
private QualifiedName(ImmutableList<String> parts) {
this.parts = parts;
}
public QualifiedName(String s) {
this.parts = ImmutableList.of(s);
}
public static QualifiedName join(QualifiedName lhs, QualifiedName rhs) {
return new QualifiedName(ImmutableList.<String>builder()
.addAll(lhs.parts).addAll(rhs.parts).build());
}
public static QualifiedName fromGetprop(Node getprop) {
String qname = getprop.getQualifiedName();
if (qname == null) {
return null;
}
return new QualifiedName(
ImmutableList.copyOf(Splitter.on('.').split(qname)));
}
public boolean isIdentifier() {
return parts.size() == 1;
}
public QualifiedName getAllButLeftmost() {
Preconditions.checkArgument(!isIdentifier());
return new QualifiedName(parts.subList(1, parts.size()));
}
public String getLeftmostName() {
return parts.get(0);
}
public String toString() {
return parts.toString();
}
}
| apache-2.0 |
toshimasa-nasu/hbase | hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java | 9930 | /**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.thrift;
import sun.misc.BASE64Encoder;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import javax.security.auth.Subject;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
import org.apache.hadoop.hbase.thrift.generated.Hbase;
import org.apache.hadoop.hbase.thrift.generated.TCell;
import org.apache.hadoop.hbase.thrift.generated.TRowResult;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.transport.THttpClient;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
/**
* See the instructions under hbase-examples/README.txt
*/
public class HttpDoAsClient {
static protected int port;
static protected String host;
CharsetDecoder decoder = null;
private static boolean secure = false;
public static void main(String[] args) throws Exception {
if (args.length < 2 || args.length > 3) {
System.out.println("Invalid arguments!");
System.out.println("Usage: DemoClient host port [secure=false]");
System.exit(-1);
}
port = Integer.parseInt(args[1]);
host = args[0];
if (args.length > 2) {
secure = Boolean.parseBoolean(args[2]);
}
final HttpDoAsClient client = new HttpDoAsClient();
Subject.doAs(getSubject(),
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
client.run();
return null;
}
});
}
HttpDoAsClient() {
decoder = Charset.forName("UTF-8").newDecoder();
}
// Helper to translate byte[]'s to UTF8 strings
private String utf8(byte[] buf) {
try {
return decoder.decode(ByteBuffer.wrap(buf)).toString();
} catch (CharacterCodingException e) {
return "[INVALID UTF-8]";
}
}
// Helper to translate strings to UTF8 bytes
private byte[] bytes(String s) {
try {
return s.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
return null;
}
}
private void run() throws Exception {
TTransport transport = new TSocket(host, port);
transport.open();
String url = "http://" + host + ":" + port;
THttpClient httpClient = new THttpClient(url);
httpClient.open();
TProtocol protocol = new TBinaryProtocol(httpClient);
Hbase.Client client = new Hbase.Client(protocol);
byte[] t = bytes("demo_table");
//
// Scan all tables, look for the demo table and delete it.
//
System.out.println("scanning tables...");
for (ByteBuffer name : refresh(client, httpClient).getTableNames()) {
System.out.println(" found: " + utf8(name.array()));
if (utf8(name.array()).equals(utf8(t))) {
if (client.isTableEnabled(name)) {
System.out.println(" disabling table: " + utf8(name.array()));
refresh(client, httpClient).disableTable(name);
}
System.out.println(" deleting table: " + utf8(name.array()));
refresh(client, httpClient).deleteTable(name);
}
}
//
// Create the demo table with two column families, entry: and unused:
//
ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
ColumnDescriptor col;
col = new ColumnDescriptor();
col.name = ByteBuffer.wrap(bytes("entry:"));
col.timeToLive = Integer.MAX_VALUE;
col.maxVersions = 10;
columns.add(col);
col = new ColumnDescriptor();
col.name = ByteBuffer.wrap(bytes("unused:"));
col.timeToLive = Integer.MAX_VALUE;
columns.add(col);
System.out.println("creating table: " + utf8(t));
try {
refresh(client, httpClient).createTable(ByteBuffer.wrap(t), columns);
} catch (AlreadyExists ae) {
System.out.println("WARN: " + ae.message);
}
System.out.println("column families in " + utf8(t) + ": ");
Map<ByteBuffer, ColumnDescriptor> columnMap = refresh(client, httpClient)
.getColumnDescriptors(ByteBuffer.wrap(t));
for (ColumnDescriptor col2 : columnMap.values()) {
System.out.println(" column: " + utf8(col2.name.array()) + ", maxVer: " + Integer.toString(col2.maxVersions));
}
transport.close();
httpClient.close();
}
private Hbase.Client refresh(Hbase.Client client, THttpClient httpClient) {
if(secure) {
httpClient.setCustomHeader("doAs", "hbase");
try {
httpClient.setCustomHeader("Authorization", generateTicket());
} catch (GSSException e) {
e.printStackTrace();
}
}
return client;
}
private String generateTicket() throws GSSException {
final GSSManager manager = GSSManager.getInstance();
// Oid for kerberos principal name
Oid krb5PrincipalOid = new Oid("1.2.840.113554.1.2.2.1");
Oid KERB_V5_OID = new Oid("1.2.840.113554.1.2.2");
final GSSName clientName = manager.createName("hbase/node-1.internal@INTERNAL",
krb5PrincipalOid);
final GSSCredential clientCred = manager.createCredential(clientName,
8 * 3600,
KERB_V5_OID,
GSSCredential.INITIATE_ONLY);
final GSSName serverName = manager.createName("hbase/node-1.internal@INTERNAL", krb5PrincipalOid);
final GSSContext context = manager.createContext(serverName,
KERB_V5_OID,
clientCred,
GSSContext.DEFAULT_LIFETIME);
context.requestMutualAuth(true);
context.requestConf(false);
context.requestInteg(true);
final byte[] outToken = context.initSecContext(new byte[0], 0, 0);
StringBuffer outputBuffer = new StringBuffer();
outputBuffer.append("Negotiate ");
outputBuffer.append(new BASE64Encoder().encode(outToken).replace("\n", ""));
System.out.print("Ticket is: " + outputBuffer);
return outputBuffer.toString();
}
private void printVersions(ByteBuffer row, List<TCell> versions) {
StringBuilder rowStr = new StringBuilder();
for (TCell cell : versions) {
rowStr.append(utf8(cell.value.array()));
rowStr.append("; ");
}
System.out.println("row: " + utf8(row.array()) + ", values: " + rowStr);
}
private void printRow(TRowResult rowResult) {
// copy values into a TreeMap to get them in sorted order
TreeMap<String, TCell> sorted = new TreeMap<String, TCell>();
for (Map.Entry<ByteBuffer, TCell> column : rowResult.columns.entrySet()) {
sorted.put(utf8(column.getKey().array()), column.getValue());
}
StringBuilder rowStr = new StringBuilder();
for (SortedMap.Entry<String, TCell> entry : sorted.entrySet()) {
rowStr.append(entry.getKey());
rowStr.append(" => ");
rowStr.append(utf8(entry.getValue().value.array()));
rowStr.append("; ");
}
System.out.println("row: " + utf8(rowResult.row.array()) + ", cols: " + rowStr);
}
private void printRow(List<TRowResult> rows) {
for (TRowResult rowResult : rows) {
printRow(rowResult);
}
}
static Subject getSubject() throws Exception {
if (!secure) return new Subject();
/*
* To authenticate the DemoClient, kinit should be invoked ahead.
* Here we try to get the Kerberos credential from the ticket cache.
*/
LoginContext context = new LoginContext("", new Subject(), null,
new Configuration() {
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("useKeyTab", "false");
options.put("storeKey", "false");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule",
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options)};
}
});
context.login();
return context.getSubject();
}
}
| apache-2.0 |
cementsuf/hapi-fhir | hapi-fhir-structures-hl7org-dstu2/src/main/java/org/hl7/fhir/instance/model/valuesets/ClaimModifiers.java | 3990 | package org.hl7.fhir.instance.model.valuesets;
/*
Copyright (c) 2011+, HL7, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of HL7 nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
// Generated on Tue, Sep 1, 2015 19:08-0400 for FHIR v1.0.0
public enum ClaimModifiers {
/**
* Repair of prior service or installation
*/
A,
/**
* Temporary service or installation
*/
B,
/**
* Treatment associated with TMJ
*/
C,
/**
* Implant or associated with an implant
*/
E,
/**
* None
*/
X,
/**
* added to help the parsers
*/
NULL;
public static ClaimModifiers fromCode(String codeString) throws Exception {
if (codeString == null || "".equals(codeString))
return null;
if ("A".equals(codeString))
return A;
if ("B".equals(codeString))
return B;
if ("C".equals(codeString))
return C;
if ("E".equals(codeString))
return E;
if ("X".equals(codeString))
return X;
throw new Exception("Unknown ClaimModifiers code '"+codeString+"'");
}
public String toCode() {
switch (this) {
case A: return "A";
case B: return "B";
case C: return "C";
case E: return "E";
case X: return "X";
default: return "?";
}
}
public String getSystem() {
return "http://hl7.org/fhir/modifiers";
}
public String getDefinition() {
switch (this) {
case A: return "Repair of prior service or installation";
case B: return "Temporary service or installation";
case C: return "Treatment associated with TMJ";
case E: return "Implant or associated with an implant";
case X: return "None";
default: return "?";
}
}
public String getDisplay() {
switch (this) {
case A: return "Repair of prior service or installation";
case B: return "Temporary service or installation";
case C: return "TMJ treatment";
case E: return "Implant or associated with an implant";
case X: return "None";
default: return "?";
}
}
}
| apache-2.0 |
brmeyer/s-ramp | shell/src/main/java/org/artificer/shell/archive/PackArchiveCommand.java | 3272 | /*
* Copyright 2012 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.artificer.shell.archive;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.io.FileUtils;
import org.artificer.atom.archive.ArtificerArchive;
import org.artificer.shell.i18n.Messages;
import org.artificer.shell.util.FileNameCompleterDelegate;
import org.jboss.aesh.cl.Arguments;
import org.jboss.aesh.cl.CommandDefinition;
import org.jboss.aesh.cl.completer.OptionCompleter;
import org.jboss.aesh.console.command.CommandResult;
import org.jboss.aesh.console.command.completer.CompleterInvocation;
import org.jboss.aesh.console.command.invocation.CommandInvocation;
import java.io.File;
import java.util.List;
/**
* Removes an entry from the current S-RAMP batch archive.
*
* @author Brett Meyer
* @author eric.wittmann@redhat.com
*/
@CommandDefinition(name = "pack",
description = "The \"pack\" command packages up the currently open Artificer batch archive file. The Artificer batch archive is zip'd up and then copied to the output file location provided.\n")
public class PackArchiveCommand extends AbstractArchiveCommand {
@Arguments(description = "<output path>", completer = Completer.class)
private List<String> arguments;
@Override
protected String getName() {
return "archive pack";
}
@Override
protected CommandResult doExecute(CommandInvocation commandInvocation) throws Exception {
if (CollectionUtils.isEmpty(arguments)) {
return doHelp(commandInvocation);
}
ArtificerArchive archive = currentArchive(commandInvocation);
String outputLocationArg = requiredArgument(commandInvocation, arguments, 0);
File outputFile = new File(outputLocationArg);
if (outputFile.exists()) {
commandInvocation.getShell().out().println(Messages.i18n.format("PackArchive.OutputLocAlreadyExists"));
}
if (!outputFile.getParentFile().exists()) {
outputFile.mkdirs();
}
File packedFile = archive.pack();
FileUtils.copyFile(packedFile, outputFile);
commandInvocation.getShell().out().println(Messages.i18n.format("PackArchive.Packaged", outputFile.getCanonicalPath()));
return CommandResult.SUCCESS;
}
private static class Completer implements OptionCompleter<CompleterInvocation> {
@Override
public void complete(CompleterInvocation completerInvocation) {
PackArchiveCommand command = (PackArchiveCommand) completerInvocation.getCommand();
if (CollectionUtils.isEmpty(command.arguments)) {
FileNameCompleterDelegate.complete(completerInvocation);
}
}
}
}
| apache-2.0 |
Rajith90/carbon-apimgt | components/apimgt/org.wso2.carbon.apimgt.rest.api.service.catalog/src/main/java/org/wso2/carbon/apimgt/rest/api/service/catalog/impl/SettingsApiServiceImpl.java | 1779 | /*
* Copyright (c) 2021, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.apimgt.rest.api.service.catalog.impl;
import org.wso2.carbon.apimgt.rest.api.service.catalog.*;
import org.wso2.carbon.apimgt.rest.api.service.catalog.dto.*;
import org.apache.cxf.jaxrs.ext.multipart.Attachment;
import org.apache.cxf.jaxrs.ext.MessageContext;
import org.wso2.carbon.apimgt.rest.api.service.catalog.dto.ErrorDTO;
import org.wso2.carbon.apimgt.rest.api.service.catalog.dto.SettingsDTO;
import java.util.List;
import java.io.InputStream;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.SecurityContext;
public class SettingsApiServiceImpl implements SettingsApiService {
public Response getSettings(MessageContext messageContext) {
ErrorDTO errorObject = new ErrorDTO();
Response.Status status = Response.Status.NOT_IMPLEMENTED;
errorObject.setCode((long) status.getStatusCode());
errorObject.setMessage(status.toString());
errorObject.setDescription("The requested resource has not been implemented for this endpoint");
return Response.status(status).entity(errorObject).build();
}
}
| apache-2.0 |
kuujo/copycat | storage/src/main/java/io/atomix/storage/journal/FileChannelJournalSegmentReader.java | 5650 | /*
* Copyright 2017-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.atomix.storage.journal;
import io.atomix.storage.StorageException;
import io.atomix.storage.journal.index.JournalIndex;
import io.atomix.storage.journal.index.Position;
import io.atomix.utils.serializer.Namespace;
import java.io.IOException;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.NoSuchElementException;
import java.util.zip.CRC32;
import java.util.zip.Checksum;
/**
* Log segment reader.
*
* @author <a href="http://github.com/kuujo">Jordan Halterman</a>
*/
class FileChannelJournalSegmentReader<E> implements JournalReader<E> {
private final FileChannel channel;
private final int maxEntrySize;
private final JournalIndex index;
private final Namespace namespace;
private final ByteBuffer memory;
private final long firstIndex;
private Indexed<E> currentEntry;
private Indexed<E> nextEntry;
FileChannelJournalSegmentReader(
FileChannel channel,
JournalSegment<E> segment,
int maxEntrySize,
JournalIndex index,
Namespace namespace) {
this.channel = channel;
this.maxEntrySize = maxEntrySize;
this.index = index;
this.namespace = namespace;
this.memory = ByteBuffer.allocate((maxEntrySize + Integer.BYTES + Integer.BYTES) * 2);
this.firstIndex = segment.index();
reset();
}
@Override
public long getFirstIndex() {
return firstIndex;
}
@Override
public long getCurrentIndex() {
return currentEntry != null ? currentEntry.index() : 0;
}
@Override
public Indexed<E> getCurrentEntry() {
return currentEntry;
}
@Override
public long getNextIndex() {
return currentEntry != null ? currentEntry.index() + 1 : firstIndex;
}
@Override
public void reset(long index) {
reset();
Position position = this.index.lookup(index - 1);
if (position != null) {
currentEntry = new Indexed<>(position.index() - 1, null, 0);
try {
channel.position(position.position());
memory.clear().flip();
} catch (IOException e) {
throw new StorageException(e);
}
readNext();
}
while (getNextIndex() < index && hasNext()) {
next();
}
}
@Override
public void reset() {
try {
channel.position(JournalSegmentDescriptor.BYTES);
} catch (IOException e) {
throw new StorageException(e);
}
memory.clear().limit(0);
currentEntry = null;
nextEntry = null;
readNext();
}
@Override
public boolean hasNext() {
// If the next entry is null, check whether a next entry exists.
if (nextEntry == null) {
readNext();
}
return nextEntry != null;
}
@Override
public Indexed<E> next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
// Set the current entry to the next entry.
currentEntry = nextEntry;
// Reset the next entry to null.
nextEntry = null;
// Read the next entry in the segment.
readNext();
// Return the current entry.
return currentEntry;
}
/**
* Reads the next entry in the segment.
*/
@SuppressWarnings("unchecked")
private void readNext() {
// Compute the index of the next entry in the segment.
final long index = getNextIndex();
try {
// Read more bytes from the segment if necessary.
if (memory.remaining() < maxEntrySize) {
long position = channel.position() + memory.position();
channel.position(position);
memory.clear();
channel.read(memory);
channel.position(position);
memory.flip();
}
// Mark the buffer so it can be reset if necessary.
memory.mark();
try {
// Read the length of the entry.
final int length = memory.getInt();
// If the buffer length is zero then return.
if (length <= 0 || length > maxEntrySize) {
memory.reset().limit(memory.position());
nextEntry = null;
return;
}
// Read the checksum of the entry.
long checksum = memory.getInt() & 0xFFFFFFFFL;
// Compute the checksum for the entry bytes.
final Checksum crc32 = new CRC32();
crc32.update(memory.array(), memory.position(), length);
// If the stored checksum equals the computed checksum, return the entry.
if (checksum == crc32.getValue()) {
int limit = memory.limit();
memory.limit(memory.position() + length);
E entry = namespace.deserialize(memory);
memory.limit(limit);
nextEntry = new Indexed<>(index, entry, length);
} else {
memory.reset().limit(memory.position());
nextEntry = null;
}
} catch (BufferUnderflowException e) {
memory.reset().limit(memory.position());
nextEntry = null;
}
} catch (IOException e) {
throw new StorageException(e);
}
}
@Override
public void close() {
// Do nothing. The parent reader manages the channel.
}
}
| apache-2.0 |
permazen/permazen | permazen-coreapi/src/main/java/io/permazen/core/type/FloatArrayType.java | 3218 |
/*
* Copyright (C) 2015 Archie L. Cobbs. All rights reserved.
*/
package io.permazen.core.type;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Floats;
import com.google.common.reflect.TypeToken;
import io.permazen.core.FieldTypeRegistry;
import io.permazen.util.ByteReader;
import io.permazen.util.ByteWriter;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* {@code float[]} array type. Does not support null arrays.
*
* <p>
* Array elements are encoded using {@link FloatType}, and the array is terminated by {@code 0x00000000},
* which is an encoded value that can never be emitted by {@link FloatType}.
*/
public class FloatArrayType extends Base64ArrayType<float[], Float> {
private static final long serialVersionUID = 2791855034086017414L;
private static final int NUM_BYTES = 4;
private static final byte[] END = new byte[NUM_BYTES];
private final FloatType floatType = new FloatType();
@SuppressWarnings("serial")
public FloatArrayType() {
super(FieldTypeRegistry.FLOAT, new TypeToken<float[]>() { });
}
@Override
public float[] read(ByteReader reader) {
Preconditions.checkArgument(reader != null);
final ArrayList<Float> list = new ArrayList<>();
while (true) {
final byte[] next = reader.readBytes(NUM_BYTES);
if (Arrays.equals(next, END))
break;
list.add(this.floatType.read(new ByteReader(next)));
}
return this.createArray(list);
}
@Override
public void write(ByteWriter writer, float[] array) {
Preconditions.checkArgument(array != null, "null array");
Preconditions.checkArgument(writer != null);
final int length = this.getArrayLength(array);
for (int i = 0; i < length; i++)
this.floatType.write(writer, array[i]);
writer.write(END);
}
@Override
public void skip(ByteReader reader) {
Preconditions.checkArgument(reader != null);
while (true) {
final byte[] next = reader.readBytes(NUM_BYTES);
if (Arrays.equals(next, END))
break;
}
}
@Override
public boolean hasPrefix0xff() {
return this.floatType.hasPrefix0xff();
}
@Override
protected int getArrayLength(float[] array) {
return array.length;
}
@Override
protected Float getArrayElement(float[] array, int index) {
return array[index];
}
@Override
protected float[] createArray(List<Float> elements) {
return Floats.toArray(elements);
}
@Override
protected void encode(float[] array, DataOutputStream output) throws IOException {
for (float value : array)
output.writeFloat(value);
}
@Override
protected float[] decode(DataInputStream input, int numBytes) throws IOException {
final float[] array = this.checkDecodeLength(numBytes);
for (int i = 0; i < array.length; i++)
array[i] = input.readFloat();
return array;
}
}
| apache-2.0 |
cemo/flatten-maven-plugin | src/main/java/org/codehaus/mojo/flatten/CleanMojo.java | 1653 | /* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package org.codehaus.mojo.flatten;
import java.io.File;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugin.MojoFailureException;
import org.apache.maven.plugins.annotations.Mojo;
/**
* This MOJO realizes the goal <code>flatten:clean</code> that deletes any files created by
* <code>{@link FlattenMojo flatten:flatten}</code> (more specific the flattened POM file which is by default
* <code>.flattened-pom.xml</code>). See also <a href="http://jira.codehaus.org/browse/MOJO-2030">MOJO-2030</a> for
* further details.
*
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 1.0.0-beta-2
*/
@Mojo( name = "clean", requiresProject = true, requiresDirectInvocation = false, executionStrategy = "once-per-session" )
public class CleanMojo
extends AbstractFlattenMojo
{
/**
* The constructor.
*/
public CleanMojo()
{
super();
}
/**
* {@inheritDoc}
*/
public void execute()
throws MojoExecutionException, MojoFailureException
{
File flattenedPomFile = getFlattenedPomFile();
if ( flattenedPomFile.isFile() )
{
getLog().info( "Deleting " + flattenedPomFile.getPath() );
boolean deleted = flattenedPomFile.delete();
if ( !deleted )
{
throw new MojoFailureException( "Could not delete " + flattenedPomFile.getAbsolutePath() );
}
}
}
}
| apache-2.0 |
markosbg/debug | rabix-transport/src/main/java/org/rabix/transport/mechanism/impl/rabbitmq/TransportQueueRabbitMQ.java | 852 | package org.rabix.transport.mechanism.impl.rabbitmq;
import org.rabix.transport.mechanism.TransportQueue;
public class TransportQueueRabbitMQ implements TransportQueue {
private final String exchange;
private final String exchangeType;
private final String routingKey;
public TransportQueueRabbitMQ(String exchange, String exchangeType, String routingKey) {
this.exchange = exchange;
this.exchangeType = exchangeType;
this.routingKey = routingKey;
}
public String getExchange() {
return exchange;
}
public String getExchangeType() {
return exchangeType;
}
public String getRoutingKey() {
return routingKey;
}
@Override
public String toString() {
return "TransportQueueRabbitMQ [exchange=" + exchange + ", exchangeType=" + exchangeType + ", routingKey=" + routingKey + "]";
}
}
| apache-2.0 |
shuliangtao/apache-camel-2.13.0-src | camel-core/src/test/java/org/apache/camel/util/URISupportTest.java | 13474 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.util;
import java.net.URI;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import org.apache.camel.ContextTestSupport;
/**
* @version
*/
public class URISupportTest extends ContextTestSupport {
public void testNormalizeEndpointUri() throws Exception {
String out1 = URISupport.normalizeUri("smtp://localhost?username=davsclaus&password=secret");
String out2 = URISupport.normalizeUri("smtp://localhost?password=secret&username=davsclaus");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("smtp://localhost?username=davsclaus&password=secret");
out2 = URISupport.normalizeUri("smtp:localhost?password=secret&username=davsclaus");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("smtp:localhost?password=secret&username=davsclaus");
out2 = URISupport.normalizeUri("smtp://localhost?username=davsclaus&password=secret");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("seda:foo?concurrentConsumer=2");
out2 = URISupport.normalizeUri("seda:foo?concurrentConsumer=2");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("seda:foo?concurrentConsumer=2");
out2 = URISupport.normalizeUri("seda:foo");
assertNotSame(out1, out2);
out1 = URISupport.normalizeUri("foo:?test=1");
out2 = URISupport.normalizeUri("foo://?test=1");
assertEquals("foo://?test=1", out2);
assertEquals(out1, out2);
}
public void testNormalizeEndpointUriNoParam() throws Exception {
String out1 = URISupport.normalizeUri("direct:foo");
String out2 = URISupport.normalizeUri("direct:foo");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("direct://foo");
out2 = URISupport.normalizeUri("direct://foo");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("direct:foo");
out2 = URISupport.normalizeUri("direct://foo");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("direct://foo");
out2 = URISupport.normalizeUri("direct:foo");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("direct://foo");
out2 = URISupport.normalizeUri("direct:bar");
assertNotSame(out1, out2);
}
public void testNormalizeEndpointUriWithFragments() throws Exception {
String out1 = URISupport.normalizeUri("irc://someserver/#camel?user=davsclaus");
String out2 = URISupport.normalizeUri("irc:someserver/#camel?user=davsclaus");
assertEquals(out1, out2);
out1 = URISupport.normalizeUri("irc://someserver/#camel?user=davsclaus");
out2 = URISupport.normalizeUri("irc:someserver/#camel?user=hadrian");
assertNotSame(out1, out2);
}
public void testNormalizeHttpEndpoint() throws Exception {
String out1 = URISupport.normalizeUri("http://www.google.com?q=Camel");
String out2 = URISupport.normalizeUri("http:www.google.com?q=Camel");
assertEquals(out1, out2);
assertTrue("Should have //", out1.startsWith("http://"));
assertTrue("Should have //", out2.startsWith("http://"));
}
public void testNormalizeIPv6HttpEndpoint() throws Exception {
String result = URISupport.normalizeUri("http://[2a00:8a00:6000:40::1413]:30300/test");
assertEquals("http://[2a00:8a00:6000:40::1413]:30300/test", result);
}
public void testNormalizeHttpEndpointUnicodedParameter() throws Exception {
String out = URISupport.normalizeUri("http://www.google.com?q=S\u00F8ren");
assertEquals("http://www.google.com?q=S%C3%B8ren", out);
}
public void testParseParametersUnicodedValue() throws Exception {
String out = URISupport.normalizeUri("http://www.google.com?q=S\u00F8ren");
URI uri = new URI(out);
Map<String, Object> parameters = URISupport.parseParameters(uri);
assertEquals(1, parameters.size());
assertEquals("S\u00F8ren", parameters.get("q"));
}
public void testNormalizeHttpEndpointURLEncodedParameter() throws Exception {
String out = URISupport.normalizeUri("http://www.google.com?q=S%C3%B8ren%20Hansen");
assertEquals("http://www.google.com?q=S%C3%B8ren+Hansen", out);
}
public void testParseParametersURLEncodeddValue() throws Exception {
String out = URISupport.normalizeUri("http://www.google.com?q=S%C3%B8ren+Hansen");
URI uri = new URI(out);
Map<String, Object> parameters = URISupport.parseParameters(uri);
assertEquals(1, parameters.size());
assertEquals("S\u00F8ren Hansen", parameters.get("q"));
}
public void testNormalizeUriWhereParamererIsFaulty() throws Exception {
String out = URISupport.normalizeUri("stream:uri?file:///d:/temp/data/log/quickfix.log&scanStream=true");
assertNotNull(out);
}
public void testCreateRemaingURI() throws Exception {
URI original = new URI("http://camel.apache.org");
Map<String, Object> param = new HashMap<String, Object>();
param.put("foo", "123");
URI newUri = URISupport.createRemainingURI(original, param);
assertNotNull(newUri);
String s = newUri.toString();
assertEquals("http://camel.apache.org?foo=123", s);
}
public void testCreateURIWithQueryHasOneFragment() throws Exception {
URI uri = new URI("smtp://localhost#fragmentOne");
URI resultUri = URISupport.createURIWithQuery(uri, null);
assertNotNull(resultUri);
assertEquals("smtp://localhost#fragmentOne", resultUri.toString());
}
public void testNormalizeEndpointWithEqualSignInParameter() throws Exception {
String out = URISupport.normalizeUri("jms:queue:foo?selector=somekey='somevalue'&foo=bar");
assertNotNull(out);
// Camel will safe encode the URI
assertEquals("jms://queue:foo?foo=bar&selector=somekey%3D%27somevalue%27", out);
}
public void testNormalizeEndpointWithPercentSignInParameter() throws Exception {
String out = URISupport.normalizeUri("http://someendpoint?username=james&password=%25test");
assertNotNull(out);
// Camel will safe encode the URI
assertEquals("http://someendpoint?password=%25test&username=james", out);
}
public void testParseParameters() throws Exception {
URI u = new URI("quartz:myGroup/myTimerName?cron=0+0+*+*+*+?");
Map<String, Object> params = URISupport.parseParameters(u);
assertEquals(1, params.size());
assertEquals("0 0 * * * ?", params.get("cron"));
u = new URI("quartz:myGroup/myTimerName?cron=0+0+*+*+*+?&bar=123");
params = URISupport.parseParameters(u);
assertEquals(2, params.size());
assertEquals("0 0 * * * ?", params.get("cron"));
assertEquals("123", params.get("bar"));
}
public void testCreateRemainingURIEncoding() throws Exception {
// the uri is already encoded, but we create a new one with new query parameters
String uri = "http://localhost:23271/myapp/mytest?columns=name%2Ctotalsens%2Cupsens&username=apiuser";
// these are the parameters which is tricky to encode
Map<String, Object> map = new LinkedHashMap<String, Object>();
map.put("foo", "abc def");
map.put("bar", "123,456");
map.put("name", "S\u00F8ren"); // danish letter
// create new uri with the parameters
URI out = URISupport.createRemainingURI(new URI(uri), map);
assertNotNull(out);
assertEquals("http://localhost:23271/myapp/mytest?foo=abc+def&bar=123%2C456&name=S%C3%B8ren", out.toString());
assertEquals("http://localhost:23271/myapp/mytest?foo=abc+def&bar=123%2C456&name=S%C3%B8ren", out.toASCIIString());
}
public void testNormalizeEndpointUriWithDualParameters() throws Exception {
String out1 = URISupport.normalizeUri("smtp://localhost?to=foo&to=bar&from=me");
assertEquals("smtp://localhost?from=me&to=foo&to=bar", out1);
String out2 = URISupport.normalizeUri("smtp://localhost?to=foo&to=bar&from=me&from=you");
assertEquals("smtp://localhost?from=me&from=you&to=foo&to=bar", out2);
}
public void testSanitizeUriWithUserInfo() {
String uri = "jt400://GEORGE:HARRISON@LIVERPOOL/QSYS.LIB/BEATLES.LIB/PENNYLANE.DTAQ";
String expected = "jt400://GEORGE:xxxxxx@LIVERPOOL/QSYS.LIB/BEATLES.LIB/PENNYLANE.DTAQ";
assertEquals(expected, URISupport.sanitizeUri(uri));
}
public void testSanitizePathWithUserInfo() {
String path = "GEORGE:HARRISON@LIVERPOOL/QSYS.LIB/BEATLES.LIB/PENNYLANE.PGM";
String expected = "GEORGE:xxxxxx@LIVERPOOL/QSYS.LIB/BEATLES.LIB/PENNYLANE.PGM";
assertEquals(expected, URISupport.sanitizePath(path));
}
public void testSanitizePathWithoutSensitiveInfoIsUnchanged() {
String path = "myhost:8080/mypath";
assertEquals(path, URISupport.sanitizePath(path));
}
public void testNormalizeEndpointUriWithUserInfoSpecialSign() throws Exception {
String out1 = URISupport.normalizeUri("ftp://us%40r:t%st@localhost:21000/tmp3/camel?foo=us@r");
assertEquals("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us%40r", out1);
String out2 = URISupport.normalizeUri("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us@r");
assertEquals("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us%40r", out2);
String out3 = URISupport.normalizeUri("ftp://us@r:t%st@localhost:21000/tmp3/camel?foo=us@r");
assertEquals("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us%40r", out3);
String out4 = URISupport.normalizeUri("ftp://us@r:t%25st@localhost:21000/tmp3/camel?foo=us@r");
assertEquals("ftp://us%40r:t%25st@localhost:21000/tmp3/camel?foo=us%40r", out4);
}
public void testSpecialUriFromXmppComponent() throws Exception {
String out1 = URISupport.normalizeUri("xmpp://camel-user@localhost:123/test-user@localhost?password=secret&serviceName=someCoolChat");
assertEquals("xmpp://camel-user@localhost:123/test-user@localhost?password=secret&serviceName=someCoolChat", out1);
}
public void testRawParameter() throws Exception {
String out = URISupport.normalizeUri("xmpp://camel-user@localhost:123/test-user@localhost?password=RAW(++?w0rd)&serviceName=some chat");
assertEquals("xmpp://camel-user@localhost:123/test-user@localhost?password=RAW(++?w0rd)&serviceName=some+chat", out);
String out2 = URISupport.normalizeUri("xmpp://camel-user@localhost:123/test-user@localhost?password=RAW(foo %% bar)&serviceName=some chat");
assertEquals("xmpp://camel-user@localhost:123/test-user@localhost?password=RAW(foo %% bar)&serviceName=some+chat", out2);
}
public void testParseQuery() throws Exception {
Map<String, Object> map = URISupport.parseQuery("password=secret&serviceName=somechat");
assertEquals(2, map.size());
assertEquals("secret", map.get("password"));
assertEquals("somechat", map.get("serviceName"));
map = URISupport.parseQuery("password=RAW(++?w0rd)&serviceName=somechat");
assertEquals(2, map.size());
assertEquals("RAW(++?w0rd)", map.get("password"));
assertEquals("somechat", map.get("serviceName"));
map = URISupport.parseQuery("password=RAW(++?)w&rd)&serviceName=somechat");
assertEquals(2, map.size());
assertEquals("RAW(++?)w&rd)", map.get("password"));
assertEquals("somechat", map.get("serviceName"));
}
public void testResolveRawParameterValues() throws Exception {
Map<String, Object> map = URISupport.parseQuery("password=secret&serviceName=somechat");
URISupport.resolveRawParameterValues(map);
assertEquals(2, map.size());
assertEquals("secret", map.get("password"));
assertEquals("somechat", map.get("serviceName"));
map = URISupport.parseQuery("password=RAW(++?w0rd)&serviceName=somechat");
URISupport.resolveRawParameterValues(map);
assertEquals(2, map.size());
assertEquals("++?w0rd", map.get("password"));
assertEquals("somechat", map.get("serviceName"));
map = URISupport.parseQuery("password=RAW(++?)w&rd)&serviceName=somechat");
URISupport.resolveRawParameterValues(map);
assertEquals(2, map.size());
assertEquals("++?)w&rd", map.get("password"));
assertEquals("somechat", map.get("serviceName"));
}
} | apache-2.0 |
WeiDianzhao1989/AndroidAppLib | plugin/src/main/java/com/weidian/plugin/util/IOUtil.java | 2916 | package com.weidian.plugin.util;
import android.database.Cursor;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.Writer;
public class IOUtil {
private IOUtil() {
}
public static void closeQuietly(Closeable closeable) {
if (closeable != null) {
try {
closeable.close();
} catch (Throwable e) {
}
}
}
public static void closeQuietly(Cursor cursor) {
if (cursor != null) {
try {
cursor.close();
} catch (Throwable e) {
}
}
}
public static byte[] readBytes(InputStream in) throws IOException {
ByteArrayOutputStream out = null;
try {
out = new ByteArrayOutputStream();
byte[] buf = new byte[1024];
int len;
while ((len = in.read(buf)) != -1) {
out.write(buf, 0, len);
}
} finally {
closeQuietly(out);
}
return out.toByteArray();
}
public static String readStr(InputStream in) throws IOException {
Reader reader = new InputStreamReader(in, "utf-8");
StringBuilder sb = new StringBuilder();
char[] buf = new char[1024];
int len;
while ((len = reader.read(buf)) >= 0) {
sb.append(buf, 0, len);
}
return sb.toString();
}
public static void writeStr(OutputStream out, String str) throws IOException {
Writer writer = new OutputStreamWriter(out, "utf-8");
writer.write(str);
writer.flush();
}
public static void copy(InputStream in, OutputStream out) throws IOException {
if (!(in instanceof BufferedInputStream)) {
in = new BufferedInputStream(in);
}
if (!(out instanceof BufferedOutputStream)) {
out = new BufferedOutputStream(out);
}
int len = 0;
byte[] buffer = ByteArrayPoolUtil.getBuf(2048);
try {
while ((len = in.read(buffer)) != -1) {
out.write(buffer, 0, len);
}
} finally {
ByteArrayPoolUtil.returnBuf(buffer);
out.flush();
}
}
public static boolean deleteFileOrDir(File path) {
if (!path.exists()) {
return true;
}
if (path.isFile()) {
return path.delete();
}
File[] files = path.listFiles();
if (files != null) {
for (File file : files) {
deleteFileOrDir(file);
}
}
return path.delete();
}
}
| apache-2.0 |
consulo/consulo-android | tools-base/sdklib/src/main/java/com/android/sdklib/repository/RepoConstants.java | 11185 | /*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Eclipse Public License, Version 1.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.eclipse.org/org/documents/epl-v10.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.sdklib.repository;
import java.io.InputStream;
/**
* Public constants common to the sdk-repository and sdk-addon XML Schemas.
* @deprecated moved to studio
*/
public class RepoConstants {
/** The license definition. */
public static final String NODE_LICENSE = "license"; //$NON-NLS-1$
/** The optional uses-license for all packages or for a lib. */
public static final String NODE_USES_LICENSE = "uses-license"; //$NON-NLS-1$
/** The revision, an int > 0, for all packages. */
public static final String NODE_REVISION = "revision"; //$NON-NLS-1$
/** The optional description for all packages or for a lib. */
public static final String NODE_DESCRIPTION = "description"; //$NON-NLS-1$
/** The optional description URL for all packages. */
public static final String NODE_DESC_URL = "desc-url"; //$NON-NLS-1$
/** The optional release note for all packages. */
public static final String NODE_RELEASE_NOTE = "release-note"; //$NON-NLS-1$
/** The optional release note URL for all packages. */
public static final String NODE_RELEASE_URL = "release-url"; //$NON-NLS-1$
/** The optional obsolete qualifier for all packages. */
public static final String NODE_OBSOLETE = "obsolete"; //$NON-NLS-1$
/** The optional project-files provided by extra packages. */
public static final String NODE_PROJECT_FILES = "project-files"; //$NON-NLS-1$
/** A system-image package. */
public static final String NODE_SYSTEM_IMAGE = "system-image"; //$NON-NLS-1$
/* An included-ABI element for a system-image package. */
public static final String NODE_ABI_INCLUDED = "included-abi"; //$NON-NLS-1$
/* An ABI element for a system-image package. */
public static final String NODE_ABI = "abi"; //$NON-NLS-1$
/** The optional minimal tools revision required by platform & extra packages. */
public static final String NODE_MIN_TOOLS_REV = "min-tools-rev"; //$NON-NLS-1$
/** The optional minimal platform-tools revision required by tool packages. */
public static final String NODE_MIN_PLATFORM_TOOLS_REV = "min-platform-tools-rev"; //$NON-NLS-1$
/** The optional minimal API level required by extra packages. */
public static final String NODE_MIN_API_LEVEL = "min-api-level"; //$NON-NLS-1$
/** The version, a string, for platform packages. */
public static final String NODE_VERSION = "version"; //$NON-NLS-1$
/** The api-level, an int > 0, for platform, add-on and doc packages. */
public static final String NODE_API_LEVEL = "api-level"; //$NON-NLS-1$
/** The codename, a string, for platform packages. */
public static final String NODE_CODENAME = "codename"; //$NON-NLS-1$
/** The *old* vendor, a string, for add-on and extra packages.
* Replaced by {@link #NODE_VENDOR_DISPLAY} and {@link #NODE_VENDOR_ID} in addon-v4.xsd. */
public static final String NODE_VENDOR = "vendor"; //$NON-NLS-1$
/** The vendor display string, for add-on and extra packages. */
public static final String NODE_VENDOR_DISPLAY = "vendor-display"; //$NON-NLS-1$
/** The unique vendor id string, for add-on and extra packages. */
public static final String NODE_VENDOR_ID = "vendor-id"; //$NON-NLS-1$
/** The name, a string, for add-on packages or for libraries.
* Replaced by {@link #NODE_NAME_DISPLAY} and {@link #NODE_NAME_ID} in addon-v4.xsd. */
public static final String NODE_NAME = "name"; //$NON-NLS-1$
/** The name display string, for add-on packages or for libraries. */
public static final String NODE_NAME_DISPLAY = "name-display"; //$NON-NLS-1$
/** The unique name id string, for add-on packages or for libraries. */
public static final String NODE_NAME_ID = "name-id"; //$NON-NLS-1$
/** The optional string used to display a package in a list view. */
public static final String NODE_LIST_DISPLAY = "list-display"; //$NON-NLS-1$
/** A layoutlib package. */
public static final String NODE_LAYOUT_LIB = "layoutlib"; //$NON-NLS-1$
/** The API integer for a layoutlib element. */
public static final String NODE_API = "api"; //$NON-NLS-1$
/** The libs container, optional for an add-on. */
public static final String NODE_LIBS = "libs"; //$NON-NLS-1$
/** A lib element in a libs container. */
public static final String NODE_LIB = "lib"; //$NON-NLS-1$
/** The path segment, a string, for extra packages. */
public static final String NODE_PATH = "path"; //$NON-NLS-1$
/** The old_path segments, a string, for extra packages. */
public static final String NODE_OLD_PATHS = "old-paths"; //$NON-NLS-1$
/** The archives container, for all packages. */
public static final String NODE_ARCHIVES = "archives"; //$NON-NLS-1$
/** An archive element, for the archives container. */
public static final String NODE_ARCHIVE = "archive"; //$NON-NLS-1$
/** An archive size, an int > 0. */
public static final String NODE_SIZE = "size"; //$NON-NLS-1$
/** A sha1 archive checksum, as a 40-char hex. */
public static final String NODE_CHECKSUM = "checksum"; //$NON-NLS-1$
/** A download archive URL, either absolute or relative to the repository xml. */
public static final String NODE_URL = "url"; //$NON-NLS-1$
/**
* Optional element to indicate an archive is only suitable for the specified OS. <br/>
* Values: windows | macosx | linux.
* @since repo-10, addon-7 and sys-img-3.
* @replaces {@link #LEGACY_ATTR_OS}
*/
public static final String NODE_HOST_OS = "host-os"; //$NON-NLS-1$
/**
* Optional element to indicate an archive is only suitable for the specified host bit size.<br/>
* Values: 32 | 64.
* @since repo-10, addon-7 and sys-img-3.
*/
public static final String NODE_HOST_BITS = "host-bits"; //$NON-NLS-1$
/**
* Optional element to indicate an archive is only suitable for the specified JVM bit size.<br/>
* Values: 32 | 64.
* @since repo-10, addon-7 and sys-img-3.
* @replaces {@link #LEGACY_ATTR_ARCH}
*/
public static final String NODE_JVM_BITS = "jvm-bits"; //$NON-NLS-1$
/**
* Optional element to indicate an archive is only suitable for a JVM equal or greater than
* the specified value. <br/>
* Value format: [1-9](\.[0-9]{1,2}){0,2}, e.g. "1.6", "1.7.0", "1.10" or "2"
* @since repo-10, addon-7 and sys-img-3.
*/
public static final String NODE_MIN_JVM_VERSION = "min-jvm-version"; //$NON-NLS-1$
/** An archive checksum type, mandatory. */
public static final String ATTR_TYPE = "type"; //$NON-NLS-1$
/**
* An archive OS attribute, mandatory. <br/>
* Use {@link #NODE_HOST_OS} instead in repo-10, addon-7 and sys-img-3.
*/
public static final String LEGACY_ATTR_OS = "os"; //$NON-NLS-1$
/**
* An optional archive Architecture attribute. <br/>
* Use {@link #NODE_JVM_BITS} instead in repo-10, addon-7 and sys-img-3.
*/
public static final String LEGACY_ATTR_ARCH = "arch"; //$NON-NLS-1$
/** A license definition ID. */
public static final String ATTR_ID = "id"; //$NON-NLS-1$
/** A license reference. */
public static final String ATTR_REF = "ref"; //$NON-NLS-1$
/** Type of a sha1 checksum. */
public static final String SHA1_TYPE = "sha1"; //$NON-NLS-1$
/** Length of a string representing a SHA1 checksum; always 40 characters long. */
public static final int SHA1_CHECKSUM_LEN = 40;
/**
* Temporary folder used to hold downloads and extract archives during installation.
* This folder will be located in the SDK.
*/
public static final String FD_TEMP = "temp"; //$NON-NLS-1$
/**
* Returns a stream to the requested XML Schema.
* This is an internal helper. Users of the library should call
* {@link SdkRepoConstants#getXsdStream(String, int)} or
* {@link SdkAddonConstants#getXsdStream(String, int)}.
*
* @param rootElement The root of the filename of the XML schema.
* This is by convention the same as the root element declared by the schema.
* @param version The XML schema revision number, an integer >= 1.
* @return An {@link InputStream} object for the local XSD file or
* null if there is no schema for the requested version.
* @see SdkRepoConstants#getXsdStream(int)
* @see SdkAddonConstants#getXsdStream(int)
*/
protected static InputStream getXsdStream(String rootElement, int version) {
String filename = String.format("%1$s-%2$02d.xsd", rootElement, version); //$NON-NLS-1$
InputStream stream = null;
try {
stream = RepoConstants.class.getResourceAsStream(filename);
} catch (Exception e) {
// Some implementations seem to return null on failure,
// others throw an exception. We want to return null.
}
if (stream == null) {
// Try the alternate schemas that are not published yet.
// This allows us to internally test with new schemas before the
// public repository uses it.
filename = String.format("-%1$s-%2$02d.xsd", rootElement, version); //$NON-NLS-1$
try {
stream = RepoConstants.class.getResourceAsStream(filename);
} catch (Exception e) {
// Some implementations seem to return null on failure,
// others throw an exception. We want to return null.
}
}
return stream;
}
}
| apache-2.0 |
mars137/Insight-project | kafka-streams-application/src/main/java/com/atif/kafka/Message/MessageTimestampExtractor.java | 370 | package com.atif.kafka.Message;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.streams.processor.TimestampExtractor;
public class MessageTimestampExtractor implements TimestampExtractor {
@Override
public long extract(ConsumerRecord<Object, Object> consumerRecord, long l) {
return consumerRecord.timestamp();
}
}
| apache-2.0 |
biddyweb/gateway | transport/wsn/src/test/java/org/kaazing/gateway/transport/wsn/handshake/RFC6455ChromeExtendedHandshakeTestIT.java | 4386 | /**
* Copyright (c) 2007-2014 Kaazing Corporation. All rights reserved.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.kaazing.gateway.transport.wsn.handshake;
import static org.kaazing.test.util.ITUtil.createRuleChain;
import java.io.File;
import java.io.FileInputStream;
import java.net.URI;
import java.security.KeyStore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestRule;
import org.kaazing.gateway.server.test.GatewayRule;
import org.kaazing.gateway.server.test.config.GatewayConfiguration;
import org.kaazing.gateway.server.test.config.builder.GatewayConfigurationBuilder;
import org.kaazing.k3po.junit.annotation.Specification;
import org.kaazing.k3po.junit.rules.K3poRule;
public class RFC6455ChromeExtendedHandshakeTestIT {
private K3poRule robot = new K3poRule();
KeyStore keyStore = null;
char[] password = "ab987c".toCharArray();
File keyStorePwFile = new File("target/truststore/keystore.pw");
public GatewayRule gateway = new GatewayRule() {
{
try {
keyStore = KeyStore.getInstance("JCEKS");
FileInputStream in = new FileInputStream(
"target/truststore/keystore.db");
keyStore.load(in, password);
} catch (Exception e) {
throw new RuntimeException(e);
}
GatewayConfiguration configuration = new GatewayConfigurationBuilder()
.service()
.accept(URI.create("ws://localhost:8001/echo"))
.accept(URI.create("wss://localhost:9001/echo"))
.type("echo")
.crossOrigin()
.allowOrigin("http://localhost:8000")
.done()
.crossOrigin()
.allowOrigin("https://localhost:9000")
.done()
.done()
.service()
.accept(URI.create("ws://localhost:8001/echoAuth"))
.accept(URI.create("wss://localhost:9001/echoAuth"))
.type("echo")
.realmName("demo")
.authorization()
.requireRole("AUTHORIZED")
.done()
.crossOrigin()
.allowOrigin("http://localhost:8000")
.done()
.crossOrigin()
.allowOrigin("https://localhost:9000")
.done()
.done()
.service()
.accept(URI.create("ws://localhost:8003/echo8003"))
.type("echo")
.crossOrigin()
.allowOrigin("*")
.done()
.done()
.security()
// TODO: keyStoreFile and keyStorePasswordFile are
// deprecated method which will be removed eventually(4.0.1
// time frame) and keyStore + keyStorePassword should be
// sufficient.
// KG-8840
.keyStore(keyStore)
.keyStorePassword(password)
.keyStorePasswordFile(keyStorePwFile)
.realm()
.name("demo")
.description("Kaazing WebSocket Gateway Demo")
.httpChallengeScheme("Basic")
.done()
.done()
.done();
init(configuration);
}
};
@Rule
public TestRule chain = createRuleChain(gateway, robot);
@Specification("canCompleteAnExtendedHandshakeAndNegotiateAnApplicationExtension")
@Test
public void canCompleteAnExtendedHandshakeAndNegotiateAnApplicationExtension()
throws Exception {
robot.finish();
}
}
| apache-2.0 |
awhitford/Resteasy | testsuite/integration-tests/src/test/java/org/jboss/resteasy/test/resource/param/resource/StringParamUnmarshallerService.java | 1321 | package org.jboss.resteasy.test.resource.param.resource;
import org.jboss.resteasy.test.resource.param.StringParamUnmarshallerTest;
import org.junit.Assert;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import java.util.Calendar;
import java.util.Date;
@Path("/")
public class StringParamUnmarshallerService {
@GET
@Produces("text/plain")
@Path("/datetest/{date}")
public String get(@PathParam("date") @StringParamUnmarshallerTest.StringParamUnmarshallerDateFormat("MM-dd-yyyy") Date date) {
Calendar c = Calendar.getInstance();
c.setTime(date);
Assert.assertEquals("Wrong date", 3, c.get(Calendar.MONTH));
Assert.assertEquals("Wrong date", 23, c.get(Calendar.DAY_OF_MONTH));
Assert.assertEquals("Wrong date", 1977, c.get(Calendar.YEAR));
return date.toString();
}
@GET
@Produces("text/plain")
@Path("fromstring/{fruit}/{sport}")
public String getFromString(@PathParam("fruit") StringParamUnmarshallerFruit fruit, @PathParam("sport") StringParamUnmarshallerSport sport) {
Assert.assertEquals("Wrong fruit", fruit, StringParamUnmarshallerFruit.ORANGE);
Assert.assertEquals("Wrong sport", "football", sport.name);
return sport.name + fruit;
}
}
| apache-2.0 |
nectec-wisru/android-ThaiWidget | thai-widget/src/main/java/nectec/thai/address/AddressEntity.java | 774 | /*
* Copyright (c) 2016 NECTEC
* National Electronics and Computer Technology Center, Thailand
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package nectec.thai.address;
public interface AddressEntity {
String getCode();
String getName();
}
| apache-2.0 |
kalimatas/elasticsearch | server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java | 43979 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.automaton.Operations;
import org.elasticsearch.Version;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.query.support.QueryParsers;
import org.elasticsearch.index.search.QueryParserHelper;
import org.elasticsearch.index.search.QueryStringQueryParser;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.TreeMap;
/**
* A query that parses a query string and runs it. There are two modes that this operates. The first,
* when no field is added (using {@link #field(String)}, will run the query once and non prefixed fields
* will use the {@link #defaultField(String)} set. The second, when one or more fields are added
* (using {@link #field(String)}), will run the parsed query against the provided fields, and combine
* them using Dismax.
*/
public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQueryBuilder> {
public static final String NAME = "query_string";
public static final int DEFAULT_MAX_DETERMINED_STATES = Operations.DEFAULT_MAX_DETERMINIZED_STATES;
public static final boolean DEFAULT_ENABLE_POSITION_INCREMENTS = true;
public static final boolean DEFAULT_ESCAPE = false;
public static final int DEFAULT_FUZZY_PREFIX_LENGTH = FuzzyQuery.defaultPrefixLength;
public static final int DEFAULT_FUZZY_MAX_EXPANSIONS = FuzzyQuery.defaultMaxExpansions;
public static final int DEFAULT_PHRASE_SLOP = 0;
public static final Fuzziness DEFAULT_FUZZINESS = Fuzziness.AUTO;
public static final Operator DEFAULT_OPERATOR = Operator.OR;
public static final MultiMatchQueryBuilder.Type DEFAULT_TYPE = MultiMatchQueryBuilder.Type.BEST_FIELDS;
public static final boolean DEFAULT_FUZZY_TRANSPOSITIONS = FuzzyQuery.defaultTranspositions;
private static final ParseField QUERY_FIELD = new ParseField("query");
private static final ParseField FIELDS_FIELD = new ParseField("fields");
private static final ParseField DEFAULT_FIELD_FIELD = new ParseField("default_field");
private static final ParseField DEFAULT_OPERATOR_FIELD = new ParseField("default_operator");
private static final ParseField ANALYZER_FIELD = new ParseField("analyzer");
private static final ParseField QUOTE_ANALYZER_FIELD = new ParseField("quote_analyzer");
private static final ParseField ALLOW_LEADING_WILDCARD_FIELD = new ParseField("allow_leading_wildcard");
private static final ParseField MAX_DETERMINIZED_STATES_FIELD = new ParseField("max_determinized_states");
private static final ParseField ENABLE_POSITION_INCREMENTS_FIELD = new ParseField("enable_position_increments");
private static final ParseField ESCAPE_FIELD = new ParseField("escape");
private static final ParseField FUZZY_PREFIX_LENGTH_FIELD = new ParseField("fuzzy_prefix_length");
private static final ParseField FUZZY_MAX_EXPANSIONS_FIELD = new ParseField("fuzzy_max_expansions");
private static final ParseField FUZZY_REWRITE_FIELD = new ParseField("fuzzy_rewrite");
private static final ParseField PHRASE_SLOP_FIELD = new ParseField("phrase_slop");
private static final ParseField TIE_BREAKER_FIELD = new ParseField("tie_breaker");
private static final ParseField ANALYZE_WILDCARD_FIELD = new ParseField("analyze_wildcard");
private static final ParseField REWRITE_FIELD = new ParseField("rewrite");
private static final ParseField MINIMUM_SHOULD_MATCH_FIELD = new ParseField("minimum_should_match");
private static final ParseField QUOTE_FIELD_SUFFIX_FIELD = new ParseField("quote_field_suffix");
private static final ParseField LENIENT_FIELD = new ParseField("lenient");
private static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone");
private static final ParseField TYPE_FIELD = new ParseField("type");
private static final ParseField GENERATE_SYNONYMS_PHRASE_QUERY = new ParseField("auto_generate_synonyms_phrase_query");
private static final ParseField FUZZY_TRANSPOSITIONS_FIELD = new ParseField("fuzzy_transpositions");
private final String queryString;
private String defaultField;
/**
* Fields to query against. If left empty will query default field,
* currently _ALL. Uses a TreeMap to hold the fields so boolean clauses are
* always sorted in same order for generated Lucene query for easier
* testing.
*
* Can be changed back to HashMap once https://issues.apache.org/jira/browse/LUCENE-6305 is fixed.
*/
private final Map<String, Float> fieldsAndWeights = new TreeMap<>();
private Operator defaultOperator = DEFAULT_OPERATOR;
private String analyzer;
private String quoteAnalyzer;
private String quoteFieldSuffix;
private Boolean allowLeadingWildcard;
private Boolean analyzeWildcard;
private boolean enablePositionIncrements = DEFAULT_ENABLE_POSITION_INCREMENTS;
private Fuzziness fuzziness = DEFAULT_FUZZINESS;
private int fuzzyPrefixLength = DEFAULT_FUZZY_PREFIX_LENGTH;
private int fuzzyMaxExpansions = DEFAULT_FUZZY_MAX_EXPANSIONS;
private String rewrite;
private String fuzzyRewrite;
private boolean escape = DEFAULT_ESCAPE;
private int phraseSlop = DEFAULT_PHRASE_SLOP;
private MultiMatchQueryBuilder.Type type = DEFAULT_TYPE;
private Float tieBreaker;
private String minimumShouldMatch;
private Boolean lenient;
private DateTimeZone timeZone;
/** To limit effort spent determinizing regexp queries. */
private int maxDeterminizedStates = DEFAULT_MAX_DETERMINED_STATES;
private boolean autoGenerateSynonymsPhraseQuery = true;
private boolean fuzzyTranspositions = DEFAULT_FUZZY_TRANSPOSITIONS;
public QueryStringQueryBuilder(String queryString) {
if (queryString == null) {
throw new IllegalArgumentException("query text missing");
}
this.queryString = queryString;
}
/**
* Read from a stream.
*/
public QueryStringQueryBuilder(StreamInput in) throws IOException {
super(in);
queryString = in.readString();
defaultField = in.readOptionalString();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
fieldsAndWeights.put(in.readString(), in.readFloat());
}
defaultOperator = Operator.readFromStream(in);
analyzer = in.readOptionalString();
quoteAnalyzer = in.readOptionalString();
quoteFieldSuffix = in.readOptionalString();
if (in.getVersion().before(Version.V_6_0_0_beta1)) {
in.readBoolean(); // auto_generate_phrase_query
}
allowLeadingWildcard = in.readOptionalBoolean();
analyzeWildcard = in.readOptionalBoolean();
enablePositionIncrements = in.readBoolean();
fuzziness = new Fuzziness(in);
fuzzyPrefixLength = in.readVInt();
fuzzyMaxExpansions = in.readVInt();
fuzzyRewrite = in.readOptionalString();
phraseSlop = in.readVInt();
if (in.getVersion().before(Version.V_6_0_0_beta1)) {
in.readBoolean(); // use_dismax
tieBreaker = in.readFloat();
type = DEFAULT_TYPE;
} else {
type = MultiMatchQueryBuilder.Type.readFromStream(in);
tieBreaker = in.readOptionalFloat();
}
rewrite = in.readOptionalString();
minimumShouldMatch = in.readOptionalString();
lenient = in.readOptionalBoolean();
timeZone = in.readOptionalTimeZone();
escape = in.readBoolean();
maxDeterminizedStates = in.readVInt();
if (in.getVersion().onOrAfter(Version.V_5_1_1) && in.getVersion().before(Version.V_6_0_0_beta1)) {
in.readBoolean(); // split_on_whitespace
Boolean useAllField = in.readOptionalBoolean();
if (useAllField != null && useAllField) {
defaultField = "*";
}
}
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
autoGenerateSynonymsPhraseQuery = in.readBoolean();
fuzzyTranspositions = in.readBoolean();
}
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeString(this.queryString);
out.writeOptionalString(this.defaultField);
out.writeVInt(this.fieldsAndWeights.size());
for (Map.Entry<String, Float> fieldsEntry : this.fieldsAndWeights.entrySet()) {
out.writeString(fieldsEntry.getKey());
out.writeFloat(fieldsEntry.getValue());
}
this.defaultOperator.writeTo(out);
out.writeOptionalString(this.analyzer);
out.writeOptionalString(this.quoteAnalyzer);
out.writeOptionalString(this.quoteFieldSuffix);
if (out.getVersion().before(Version.V_6_0_0_beta1)) {
out.writeBoolean(false); // auto_generate_phrase_query
}
out.writeOptionalBoolean(this.allowLeadingWildcard);
out.writeOptionalBoolean(this.analyzeWildcard);
out.writeBoolean(this.enablePositionIncrements);
this.fuzziness.writeTo(out);
out.writeVInt(this.fuzzyPrefixLength);
out.writeVInt(this.fuzzyMaxExpansions);
out.writeOptionalString(this.fuzzyRewrite);
out.writeVInt(this.phraseSlop);
if (out.getVersion().before(Version.V_6_0_0_beta1)) {
out.writeBoolean(true); // use_dismax
out.writeFloat(tieBreaker != null ? tieBreaker : 0.0f);
} else {
type.writeTo(out);
out.writeOptionalFloat(tieBreaker);
}
out.writeOptionalString(this.rewrite);
out.writeOptionalString(this.minimumShouldMatch);
out.writeOptionalBoolean(this.lenient);
out.writeOptionalTimeZone(timeZone);
out.writeBoolean(this.escape);
out.writeVInt(this.maxDeterminizedStates);
if (out.getVersion().onOrAfter(Version.V_5_1_1) && out.getVersion().before(Version.V_6_0_0_beta1)) {
out.writeBoolean(false); // split_on_whitespace
Boolean useAllFields = defaultField == null ? null : Regex.isMatchAllPattern(defaultField);
out.writeOptionalBoolean(useAllFields);
}
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
out.writeBoolean(autoGenerateSynonymsPhraseQuery);
out.writeBoolean(fuzzyTranspositions);
}
}
public String queryString() {
return this.queryString;
}
/**
* The default field to run against when no prefix field is specified. Only relevant when
* not explicitly adding fields the query string will run against.
*/
public QueryStringQueryBuilder defaultField(String defaultField) {
this.defaultField = defaultField;
return this;
}
public String defaultField() {
return this.defaultField;
}
/**
* This setting is deprecated, set {@link #defaultField(String)} to "*" instead.
*/
@Deprecated
public QueryStringQueryBuilder useAllFields(Boolean useAllFields) {
if (useAllFields != null && useAllFields) {
this.defaultField = "*";
}
return this;
}
@Deprecated
public Boolean useAllFields() {
return defaultField == null ? null : Regex.isMatchAllPattern(defaultField);
}
/**
* Adds a field to run the query string against. The field will be associated with the
* default boost of {@link AbstractQueryBuilder#DEFAULT_BOOST}.
* Use {@link #field(String, float)} to set a specific boost for the field.
*/
public QueryStringQueryBuilder field(String field) {
this.fieldsAndWeights.put(field, AbstractQueryBuilder.DEFAULT_BOOST);
return this;
}
/**
* Adds a field to run the query string against with a specific boost.
*/
public QueryStringQueryBuilder field(String field, float boost) {
this.fieldsAndWeights.put(field, boost);
return this;
}
/**
* Add several fields to run the query against with a specific boost.
*/
public QueryStringQueryBuilder fields(Map<String, Float> fields) {
this.fieldsAndWeights.putAll(fields);
return this;
}
/** Returns the fields including their respective boosts to run the query against. */
public Map<String, Float> fields() {
return this.fieldsAndWeights;
}
/**
* @param type Sets how multiple fields should be combined to build textual part queries.
*/
public void type(MultiMatchQueryBuilder.Type type) {
this.type = type;
}
/**
* Use {@link QueryStringQueryBuilder#tieBreaker} instead.
*/
@Deprecated
public QueryStringQueryBuilder useDisMax(boolean useDisMax) {
return this;
}
/**
* Use {@link QueryStringQueryBuilder#tieBreaker} instead.
*/
@Deprecated
public boolean useDisMax() {
return true;
}
/**
* When more than one field is used with the query string, and combined queries are using
* dis max, control the tie breaker for it.
*/
public QueryStringQueryBuilder tieBreaker(float tieBreaker) {
this.tieBreaker = tieBreaker;
return this;
}
public float tieBreaker() {
return this.tieBreaker;
}
/**
* Sets the boolean operator of the query parser used to parse the query string.
* <p>
* In default mode ({@link Operator#OR}) terms without any modifiers
* are considered optional: for example <code>capital of Hungary</code> is equal to
* <code>capital OR of OR Hungary</code>.
* <p>
* In {@link Operator#AND} mode terms are considered to be in conjunction: the
* above mentioned query is parsed as <code>capital AND of AND Hungary</code>
*/
public QueryStringQueryBuilder defaultOperator(Operator defaultOperator) {
this.defaultOperator = defaultOperator == null ? DEFAULT_OPERATOR : defaultOperator;
return this;
}
public Operator defaultOperator() {
return this.defaultOperator;
}
/**
* The optional analyzer used to analyze the query string. Note, if a field has search analyzer
* defined for it, then it will be used automatically. Defaults to the smart search analyzer.
*/
public QueryStringQueryBuilder analyzer(String analyzer) {
this.analyzer = analyzer;
return this;
}
/**
* The optional analyzer used to analyze the query string for phrase searches. Note, if a field has search (quote) analyzer
* defined for it, then it will be used automatically. Defaults to the smart search analyzer.
*/
public QueryStringQueryBuilder quoteAnalyzer(String quoteAnalyzer) {
this.quoteAnalyzer = quoteAnalyzer;
return this;
}
/**
* This setting is ignored
*/
@Deprecated
public QueryStringQueryBuilder autoGeneratePhraseQueries(boolean autoGeneratePhraseQueries) {
return this;
}
/**
* This setting is ignored
*/
@Deprecated
public boolean autoGeneratePhraseQueries() {
return false;
}
/**
* Protects against too-difficult regular expression queries.
*/
public QueryStringQueryBuilder maxDeterminizedStates(int maxDeterminizedStates) {
this.maxDeterminizedStates = maxDeterminizedStates;
return this;
}
public int maxDeterminizedStates() {
return this.maxDeterminizedStates;
}
/**
* Should leading wildcards be allowed or not. Defaults to <tt>true</tt>.
*/
public QueryStringQueryBuilder allowLeadingWildcard(Boolean allowLeadingWildcard) {
this.allowLeadingWildcard = allowLeadingWildcard;
return this;
}
public Boolean allowLeadingWildcard() {
return this.allowLeadingWildcard;
}
/**
* Set to <tt>true</tt> to enable position increments in result query. Defaults to
* <tt>true</tt>.
* <p>
* When set, result phrase and multi-phrase queries will be aware of position increments.
* Useful when e.g. a StopFilter increases the position increment of the token that follows an omitted token.
*/
public QueryStringQueryBuilder enablePositionIncrements(boolean enablePositionIncrements) {
this.enablePositionIncrements = enablePositionIncrements;
return this;
}
public boolean enablePositionIncrements() {
return this.enablePositionIncrements;
}
/**
* Set the edit distance for fuzzy queries. Default is "AUTO".
*/
public QueryStringQueryBuilder fuzziness(Fuzziness fuzziness) {
this.fuzziness = fuzziness == null ? DEFAULT_FUZZINESS : fuzziness;
return this;
}
public Fuzziness fuzziness() {
return this.fuzziness;
}
/**
* Set the minimum prefix length for fuzzy queries. Default is 1.
*/
public QueryStringQueryBuilder fuzzyPrefixLength(int fuzzyPrefixLength) {
this.fuzzyPrefixLength = fuzzyPrefixLength;
return this;
}
public int fuzzyPrefixLength() {
return fuzzyPrefixLength;
}
public QueryStringQueryBuilder fuzzyMaxExpansions(int fuzzyMaxExpansions) {
this.fuzzyMaxExpansions = fuzzyMaxExpansions;
return this;
}
public int fuzzyMaxExpansions() {
return fuzzyMaxExpansions;
}
public QueryStringQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
this.fuzzyRewrite = fuzzyRewrite;
return this;
}
public String fuzzyRewrite() {
return fuzzyRewrite;
}
/**
* Sets the default slop for phrases. If zero, then exact phrase matches
* are required. Default value is zero.
*/
public QueryStringQueryBuilder phraseSlop(int phraseSlop) {
this.phraseSlop = phraseSlop;
return this;
}
public int phraseSlop() {
return phraseSlop;
}
public QueryStringQueryBuilder rewrite(String rewrite) {
this.rewrite = rewrite;
return this;
}
/**
* Set to <tt>true</tt> to enable analysis on wildcard and prefix queries.
*/
public QueryStringQueryBuilder analyzeWildcard(Boolean analyzeWildcard) {
this.analyzeWildcard = analyzeWildcard;
return this;
}
public Boolean analyzeWildcard() {
return this.analyzeWildcard;
}
public String rewrite() {
return this.rewrite;
}
public QueryStringQueryBuilder minimumShouldMatch(String minimumShouldMatch) {
this.minimumShouldMatch = minimumShouldMatch;
return this;
}
public String minimumShouldMatch() {
return this.minimumShouldMatch;
}
/**
* An optional field name suffix to automatically try and add to the field searched when using quoted text.
*/
public QueryStringQueryBuilder quoteFieldSuffix(String quoteFieldSuffix) {
this.quoteFieldSuffix = quoteFieldSuffix;
return this;
}
public String quoteFieldSuffix() {
return this.quoteFieldSuffix;
}
/**
* Sets the query string parser to be lenient when parsing field values, defaults to the index
* setting and if not set, defaults to false.
*/
public QueryStringQueryBuilder lenient(Boolean lenient) {
this.lenient = lenient;
return this;
}
public Boolean lenient() {
return this.lenient;
}
/**
* In case of date field, we can adjust the from/to fields using a timezone
*/
public QueryStringQueryBuilder timeZone(String timeZone) {
if (timeZone != null) {
this.timeZone = DateTimeZone.forID(timeZone);
} else {
this.timeZone = null;
}
return this;
}
public QueryStringQueryBuilder timeZone(DateTimeZone timeZone) {
this.timeZone = timeZone;
return this;
}
public DateTimeZone timeZone() {
return this.timeZone;
}
/**
* Set to <tt>true</tt> to enable escaping of the query string
*/
public QueryStringQueryBuilder escape(boolean escape) {
this.escape = escape;
return this;
}
public boolean escape() {
return this.escape;
}
/**
* This setting is ignored, this query parser splits on operator only.
*/
@Deprecated
public QueryStringQueryBuilder splitOnWhitespace(boolean value) {
return this;
}
/**
* This setting is ignored, this query parser splits on operator only.
*/
@Deprecated
public boolean splitOnWhitespace() {
return false;
}
public QueryStringQueryBuilder autoGenerateSynonymsPhraseQuery(boolean value) {
this.autoGenerateSynonymsPhraseQuery = value;
return this;
}
/**
* Whether phrase queries should be automatically generated for multi terms synonyms.
* Defaults to <tt>true</tt>.
*/
public boolean autoGenerateSynonymsPhraseQuery() {
return autoGenerateSynonymsPhraseQuery;
}
public boolean fuzzyTranspositions() {
return fuzzyTranspositions;
}
/**
* Sets whether transpositions are supported in fuzzy queries.<p>
* The default metric used by fuzzy queries to determine a match is the Damerau-Levenshtein
* distance formula which supports transpositions. Setting transposition to false will
* switch to classic Levenshtein distance.<br>
* If not set, Damerau-Levenshtein distance metric will be used.
*/
public QueryStringQueryBuilder fuzzyTranspositions(boolean fuzzyTranspositions) {
this.fuzzyTranspositions = fuzzyTranspositions;
return this;
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.field(QUERY_FIELD.getPreferredName(), this.queryString);
if (this.defaultField != null) {
builder.field(DEFAULT_FIELD_FIELD.getPreferredName(), this.defaultField);
}
builder.startArray(FIELDS_FIELD.getPreferredName());
for (Map.Entry<String, Float> fieldEntry : this.fieldsAndWeights.entrySet()) {
builder.value(fieldEntry.getKey() + "^" + fieldEntry.getValue());
}
builder.endArray();
if (this.type != null) {
builder.field(TYPE_FIELD.getPreferredName(), type.toString().toLowerCase(Locale.ENGLISH));
}
if (tieBreaker != null) {
builder.field(TIE_BREAKER_FIELD.getPreferredName(), this.tieBreaker);
}
builder.field(DEFAULT_OPERATOR_FIELD.getPreferredName(),
this.defaultOperator.name().toLowerCase(Locale.ROOT));
if (this.analyzer != null) {
builder.field(ANALYZER_FIELD.getPreferredName(), this.analyzer);
}
if (this.quoteAnalyzer != null) {
builder.field(QUOTE_ANALYZER_FIELD.getPreferredName(), this.quoteAnalyzer);
}
builder.field(MAX_DETERMINIZED_STATES_FIELD.getPreferredName(), this.maxDeterminizedStates);
if (this.allowLeadingWildcard != null) {
builder.field(ALLOW_LEADING_WILDCARD_FIELD.getPreferredName(), this.allowLeadingWildcard);
}
builder.field(ENABLE_POSITION_INCREMENTS_FIELD.getPreferredName(), this.enablePositionIncrements);
this.fuzziness.toXContent(builder, params);
builder.field(FUZZY_PREFIX_LENGTH_FIELD.getPreferredName(), this.fuzzyPrefixLength);
builder.field(FUZZY_MAX_EXPANSIONS_FIELD.getPreferredName(), this.fuzzyMaxExpansions);
if (this.fuzzyRewrite != null) {
builder.field(FUZZY_REWRITE_FIELD.getPreferredName(), this.fuzzyRewrite);
}
builder.field(PHRASE_SLOP_FIELD.getPreferredName(), this.phraseSlop);
if (this.analyzeWildcard != null) {
builder.field(ANALYZE_WILDCARD_FIELD.getPreferredName(), this.analyzeWildcard);
}
if (this.rewrite != null) {
builder.field(REWRITE_FIELD.getPreferredName(), this.rewrite);
}
if (this.minimumShouldMatch != null) {
builder.field(MINIMUM_SHOULD_MATCH_FIELD.getPreferredName(), this.minimumShouldMatch);
}
if (this.quoteFieldSuffix != null) {
builder.field(QUOTE_FIELD_SUFFIX_FIELD.getPreferredName(), this.quoteFieldSuffix);
}
if (this.lenient != null) {
builder.field(LENIENT_FIELD.getPreferredName(), this.lenient);
}
if (this.timeZone != null) {
builder.field(TIME_ZONE_FIELD.getPreferredName(), this.timeZone.getID());
}
builder.field(ESCAPE_FIELD.getPreferredName(), this.escape);
builder.field(GENERATE_SYNONYMS_PHRASE_QUERY.getPreferredName(), autoGenerateSynonymsPhraseQuery);
builder.field(FUZZY_TRANSPOSITIONS_FIELD.getPreferredName(), fuzzyTranspositions);
printBoostAndQueryName(builder);
builder.endObject();
}
public static QueryStringQueryBuilder fromXContent(XContentParser parser) throws IOException {
String currentFieldName = null;
XContentParser.Token token;
String queryString = null;
String defaultField = null;
String analyzer = null;
String quoteAnalyzer = null;
String queryName = null;
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
int maxDeterminizedStates = QueryStringQueryBuilder.DEFAULT_MAX_DETERMINED_STATES;
boolean enablePositionIncrements = QueryStringQueryBuilder.DEFAULT_ENABLE_POSITION_INCREMENTS;
boolean escape = QueryStringQueryBuilder.DEFAULT_ESCAPE;
int fuzzyPrefixLength = QueryStringQueryBuilder.DEFAULT_FUZZY_PREFIX_LENGTH;
int fuzzyMaxExpansions = QueryStringQueryBuilder.DEFAULT_FUZZY_MAX_EXPANSIONS;
int phraseSlop = QueryStringQueryBuilder.DEFAULT_PHRASE_SLOP;
MultiMatchQueryBuilder.Type type = DEFAULT_TYPE;
Float tieBreaker = null;
Boolean analyzeWildcard = null;
Boolean allowLeadingWildcard = null;
String minimumShouldMatch = null;
String quoteFieldSuffix = null;
Boolean lenient = null;
Operator defaultOperator = QueryStringQueryBuilder.DEFAULT_OPERATOR;
String timeZone = null;
Fuzziness fuzziness = QueryStringQueryBuilder.DEFAULT_FUZZINESS;
String fuzzyRewrite = null;
String rewrite = null;
Map<String, Float> fieldsAndWeights = null;
boolean autoGenerateSynonymsPhraseQuery = true;
boolean fuzzyTranspositions = DEFAULT_FUZZY_TRANSPOSITIONS;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if (FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
List<String> fields = new ArrayList<>();
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
fields.add(parser.text());
}
fieldsAndWeights = QueryParserHelper.parseFieldsAndWeights(fields);
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME +
"] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
queryString = parser.text();
} else if (DEFAULT_FIELD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
defaultField = parser.text();
} else if (DEFAULT_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
defaultOperator = Operator.fromString(parser.text());
} else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
analyzer = parser.text();
} else if (QUOTE_ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
quoteAnalyzer = parser.text();
} else if (ALLOW_LEADING_WILDCARD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
allowLeadingWildcard = parser.booleanValue();
} else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
maxDeterminizedStates = parser.intValue();
} else if (ENABLE_POSITION_INCREMENTS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
enablePositionIncrements = parser.booleanValue();
} else if (ESCAPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
escape = parser.booleanValue();
} else if (FUZZY_PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
fuzzyPrefixLength = parser.intValue();
} else if (FUZZY_MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
fuzzyMaxExpansions = parser.intValue();
} else if (FUZZY_REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
fuzzyRewrite = parser.textOrNull();
} else if (PHRASE_SLOP_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
phraseSlop = parser.intValue();
} else if (Fuzziness.FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
fuzziness = Fuzziness.parse(parser);
} else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
boost = parser.floatValue();
} else if (TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
type = MultiMatchQueryBuilder.Type.parse(parser.text(), parser.getDeprecationHandler());
} else if (TIE_BREAKER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
tieBreaker = parser.floatValue();
} else if (ANALYZE_WILDCARD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
analyzeWildcard = parser.booleanValue();
} else if (REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
rewrite = parser.textOrNull();
} else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
minimumShouldMatch = parser.textOrNull();
} else if (QUOTE_FIELD_SUFFIX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
quoteFieldSuffix = parser.textOrNull();
} else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
lenient = parser.booleanValue();
} else if (MAX_DETERMINIZED_STATES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
maxDeterminizedStates = parser.intValue();
} else if (TIME_ZONE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
try {
timeZone = parser.text();
} catch (IllegalArgumentException e) {
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME +
"] time_zone [" + parser.text() + "] is unknown");
}
} else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
queryName = parser.text();
} else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName, parser.getDeprecationHandler())) {
autoGenerateSynonymsPhraseQuery = parser.booleanValue();
} else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
fuzzyTranspositions = parser.booleanValue();
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME +
"] query does not support [" + currentFieldName + "]");
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME +
"] unknown token [" + token + "] after [" + currentFieldName + "]");
}
}
if (queryString == null) {
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME + "] must be provided with a [query]");
}
QueryStringQueryBuilder queryStringQuery = new QueryStringQueryBuilder(queryString);
if (fieldsAndWeights != null) {
queryStringQuery.fields(fieldsAndWeights);
}
queryStringQuery.defaultField(defaultField);
queryStringQuery.defaultOperator(defaultOperator);
queryStringQuery.analyzer(analyzer);
queryStringQuery.quoteAnalyzer(quoteAnalyzer);
queryStringQuery.allowLeadingWildcard(allowLeadingWildcard);
queryStringQuery.maxDeterminizedStates(maxDeterminizedStates);
queryStringQuery.enablePositionIncrements(enablePositionIncrements);
queryStringQuery.escape(escape);
queryStringQuery.fuzzyPrefixLength(fuzzyPrefixLength);
queryStringQuery.fuzzyMaxExpansions(fuzzyMaxExpansions);
queryStringQuery.fuzzyRewrite(fuzzyRewrite);
queryStringQuery.phraseSlop(phraseSlop);
queryStringQuery.fuzziness(fuzziness);
queryStringQuery.type(type);
if (tieBreaker != null) {
queryStringQuery.tieBreaker(tieBreaker);
}
queryStringQuery.analyzeWildcard(analyzeWildcard);
queryStringQuery.rewrite(rewrite);
queryStringQuery.minimumShouldMatch(minimumShouldMatch);
queryStringQuery.quoteFieldSuffix(quoteFieldSuffix);
queryStringQuery.lenient(lenient);
queryStringQuery.timeZone(timeZone);
queryStringQuery.boost(boost);
queryStringQuery.queryName(queryName);
queryStringQuery.autoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery);
queryStringQuery.fuzzyTranspositions(fuzzyTranspositions);
return queryStringQuery;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
protected boolean doEquals(QueryStringQueryBuilder other) {
return Objects.equals(queryString, other.queryString) &&
Objects.equals(defaultField, other.defaultField) &&
Objects.equals(fieldsAndWeights, other.fieldsAndWeights) &&
Objects.equals(defaultOperator, other.defaultOperator) &&
Objects.equals(analyzer, other.analyzer) &&
Objects.equals(quoteAnalyzer, other.quoteAnalyzer) &&
Objects.equals(quoteFieldSuffix, other.quoteFieldSuffix) &&
Objects.equals(allowLeadingWildcard, other.allowLeadingWildcard) &&
Objects.equals(enablePositionIncrements, other.enablePositionIncrements) &&
Objects.equals(analyzeWildcard, other.analyzeWildcard) &&
Objects.equals(fuzziness, other.fuzziness) &&
Objects.equals(fuzzyPrefixLength, other.fuzzyPrefixLength) &&
Objects.equals(fuzzyMaxExpansions, other.fuzzyMaxExpansions) &&
Objects.equals(fuzzyRewrite, other.fuzzyRewrite) &&
Objects.equals(phraseSlop, other.phraseSlop) &&
Objects.equals(type, other.type) &&
Objects.equals(tieBreaker, other.tieBreaker) &&
Objects.equals(rewrite, other.rewrite) &&
Objects.equals(minimumShouldMatch, other.minimumShouldMatch) &&
Objects.equals(lenient, other.lenient) &&
timeZone == null ? other.timeZone == null : other.timeZone != null &&
Objects.equals(timeZone.getID(), other.timeZone.getID()) &&
Objects.equals(escape, other.escape) &&
Objects.equals(maxDeterminizedStates, other.maxDeterminizedStates) &&
Objects.equals(autoGenerateSynonymsPhraseQuery, other.autoGenerateSynonymsPhraseQuery) &&
Objects.equals(fuzzyTranspositions, other.fuzzyTranspositions);
}
@Override
protected int doHashCode() {
return Objects.hash(queryString, defaultField, fieldsAndWeights, defaultOperator, analyzer, quoteAnalyzer,
quoteFieldSuffix, allowLeadingWildcard, analyzeWildcard,
enablePositionIncrements, fuzziness, fuzzyPrefixLength,
fuzzyMaxExpansions, fuzzyRewrite, phraseSlop, type, tieBreaker, rewrite, minimumShouldMatch, lenient,
timeZone == null ? 0 : timeZone.getID(), escape, maxDeterminizedStates, autoGenerateSynonymsPhraseQuery,
fuzzyTranspositions);
}
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
String rewrittenQueryString = escape ? org.apache.lucene.queryparser.classic.QueryParser.escape(this.queryString) : queryString;
if (fieldsAndWeights.size() > 0 && this.defaultField != null) {
throw addValidationError("cannot use [fields] parameter in conjunction with [default_field]", null);
}
QueryStringQueryParser queryParser;
boolean isLenient = lenient == null ? context.queryStringLenient() : lenient;
if (defaultField != null) {
if (Regex.isMatchAllPattern(defaultField)) {
queryParser = new QueryStringQueryParser(context, lenient == null ? true : lenient);
} else {
queryParser = new QueryStringQueryParser(context, defaultField, isLenient);
}
} else if (fieldsAndWeights.size() > 0) {
final Map<String, Float> resolvedFields = QueryParserHelper.resolveMappingFields(context, fieldsAndWeights);
queryParser = new QueryStringQueryParser(context, resolvedFields, isLenient);
} else {
List<String> defaultFields = context.defaultFields();
boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0));
if (isAllField) {
queryParser = new QueryStringQueryParser(context, lenient == null ? true : lenient);
} else {
final Map<String, Float> resolvedFields = QueryParserHelper.resolveMappingFields(context,
QueryParserHelper.parseFieldsAndWeights(defaultFields));
queryParser = new QueryStringQueryParser(context, resolvedFields, isLenient);
}
}
if (analyzer != null) {
NamedAnalyzer namedAnalyzer = context.getIndexAnalyzers().get(analyzer);
if (namedAnalyzer == null) {
throw new QueryShardException(context, "[query_string] analyzer [" + analyzer + "] not found");
}
queryParser.setForceAnalyzer(namedAnalyzer);
}
if (quoteAnalyzer != null) {
NamedAnalyzer forceQuoteAnalyzer = context.getIndexAnalyzers().get(quoteAnalyzer);
if (forceQuoteAnalyzer == null) {
throw new QueryShardException(context, "[query_string] quote_analyzer [" + quoteAnalyzer + "] not found");
}
queryParser.setForceQuoteAnalyzer(forceQuoteAnalyzer);
}
queryParser.setDefaultOperator(defaultOperator.toQueryParserOperator());
queryParser.setType(type);
if (tieBreaker != null) {
queryParser.setGroupTieBreaker(tieBreaker);
} else {
queryParser.setGroupTieBreaker(type.tieBreaker());
}
queryParser.setPhraseSlop(phraseSlop);
queryParser.setQuoteFieldSuffix(quoteFieldSuffix);
queryParser.setAllowLeadingWildcard(allowLeadingWildcard == null ?
context.queryStringAllowLeadingWildcard() : allowLeadingWildcard);
queryParser.setAnalyzeWildcard(analyzeWildcard == null ? context.queryStringAnalyzeWildcard() : analyzeWildcard);
queryParser.setEnablePositionIncrements(enablePositionIncrements);
queryParser.setFuzziness(fuzziness);
queryParser.setFuzzyPrefixLength(fuzzyPrefixLength);
queryParser.setFuzzyMaxExpansions(fuzzyMaxExpansions);
queryParser.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(this.fuzzyRewrite, LoggingDeprecationHandler.INSTANCE));
queryParser.setMultiTermRewriteMethod(QueryParsers.parseRewriteMethod(this.rewrite, LoggingDeprecationHandler.INSTANCE));
queryParser.setTimeZone(timeZone);
queryParser.setMaxDeterminizedStates(maxDeterminizedStates);
queryParser.setAutoGenerateMultiTermSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery);
queryParser.setFuzzyTranspositions(fuzzyTranspositions);
Query query;
try {
query = queryParser.parse(rewrittenQueryString);
} catch (org.apache.lucene.queryparser.classic.ParseException e) {
throw new QueryShardException(context, "Failed to parse query [" + this.queryString + "]", e);
}
if (query == null) {
return null;
}
//save the BoostQuery wrapped structure if present
List<Float> boosts = new ArrayList<>();
while(query instanceof BoostQuery) {
BoostQuery boostQuery = (BoostQuery) query;
boosts.add(boostQuery.getBoost());
query = boostQuery.getQuery();
}
query = Queries.fixNegativeQueryIfNeeded(query);
query = Queries.maybeApplyMinimumShouldMatch(query, this.minimumShouldMatch);
//restore the previous BoostQuery wrapping
for (int i = boosts.size() - 1; i >= 0; i--) {
query = new BoostQuery(query, boosts.get(i));
}
return query;
}
}
| apache-2.0 |
lukhnos/j2objc | jre_emul/android/platform/libcore/ojluni/src/main/java/java/io/BufferedWriter.java | 9069 | /*
* Copyright (c) 1996, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.io;
/**
* Writes text to a character-output stream, buffering characters so as to
* provide for the efficient writing of single characters, arrays, and strings.
*
* <p> The buffer size may be specified, or the default size may be accepted.
* The default is large enough for most purposes.
*
* <p> A newLine() method is provided, which uses the platform's own notion of
* line separator as defined by the system property <tt>line.separator</tt>.
* Not all platforms use the newline character ('\n') to terminate lines.
* Calling this method to terminate each output line is therefore preferred to
* writing a newline character directly.
*
* <p> In general, a Writer sends its output immediately to the underlying
* character or byte stream. Unless prompt output is required, it is advisable
* to wrap a BufferedWriter around any Writer whose write() operations may be
* costly, such as FileWriters and OutputStreamWriters. For example,
*
* <pre>
* PrintWriter out
* = new PrintWriter(new BufferedWriter(new FileWriter("foo.out")));
* </pre>
*
* will buffer the PrintWriter's output to the file. Without buffering, each
* invocation of a print() method would cause characters to be converted into
* bytes that would then be written immediately to the file, which can be very
* inefficient.
*
* @see PrintWriter
* @see FileWriter
* @see OutputStreamWriter
* @see java.nio.file.Files#newBufferedWriter
*
* @author Mark Reinhold
* @since JDK1.1
*/
public class BufferedWriter extends Writer {
private Writer out;
private char cb[];
private int nChars, nextChar;
private static int defaultCharBufferSize = 8192;
/**
* Line separator string. This is the value of the line.separator
* property at the moment that the stream was created.
*/
private String lineSeparator;
/**
* Creates a buffered character-output stream that uses a default-sized
* output buffer.
*
* @param out A Writer
*/
public BufferedWriter(Writer out) {
this(out, defaultCharBufferSize);
}
/**
* Creates a new buffered character-output stream that uses an output
* buffer of the given size.
*
* @param out A Writer
* @param sz Output-buffer size, a positive integer
*
* @exception IllegalArgumentException If {@code sz <= 0}
*/
public BufferedWriter(Writer out, int sz) {
super(out);
if (sz <= 0)
throw new IllegalArgumentException("Buffer size <= 0");
this.out = out;
cb = new char[sz];
nChars = sz;
nextChar = 0;
lineSeparator = System.getProperty("line.separator");
}
/** Checks to make sure that the stream has not been closed */
private void ensureOpen() throws IOException {
if (out == null)
throw new IOException("Stream closed");
}
/**
* Flushes the output buffer to the underlying character stream, without
* flushing the stream itself. This method is non-private only so that it
* may be invoked by PrintStream.
*/
void flushBuffer() throws IOException {
synchronized (lock) {
ensureOpen();
if (nextChar == 0)
return;
out.write(cb, 0, nextChar);
nextChar = 0;
}
}
/**
* Writes a single character.
*
* @exception IOException If an I/O error occurs
*/
public void write(int c) throws IOException {
synchronized (lock) {
ensureOpen();
if (nextChar >= nChars)
flushBuffer();
cb[nextChar++] = (char) c;
}
}
/**
* Our own little min method, to avoid loading java.lang.Math if we've run
* out of file descriptors and we're trying to print a stack trace.
*/
private int min(int a, int b) {
if (a < b) return a;
return b;
}
/**
* Writes a portion of an array of characters.
*
* <p> Ordinarily this method stores characters from the given array into
* this stream's buffer, flushing the buffer to the underlying stream as
* needed. If the requested length is at least as large as the buffer,
* however, then this method will flush the buffer and write the characters
* directly to the underlying stream. Thus redundant
* <code>BufferedWriter</code>s will not copy data unnecessarily.
*
* @param cbuf A character array
* @param off Offset from which to start reading characters
* @param len Number of characters to write
*
* @exception IOException If an I/O error occurs
*/
public void write(char cbuf[], int off, int len) throws IOException {
synchronized (lock) {
ensureOpen();
if ((off < 0) || (off > cbuf.length) || (len < 0) ||
((off + len) > cbuf.length) || ((off + len) < 0)) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
if (len >= nChars) {
/* If the request length exceeds the size of the output buffer,
flush the buffer and then write the data directly. In this
way buffered streams will cascade harmlessly. */
flushBuffer();
out.write(cbuf, off, len);
return;
}
int b = off, t = off + len;
while (b < t) {
int d = min(nChars - nextChar, t - b);
System.arraycopy(cbuf, b, cb, nextChar, d);
b += d;
nextChar += d;
if (nextChar >= nChars)
flushBuffer();
}
}
}
/**
* Writes a portion of a String.
*
* <p> If the value of the <tt>len</tt> parameter is negative then no
* characters are written. This is contrary to the specification of this
* method in the {@linkplain java.io.Writer#write(java.lang.String,int,int)
* superclass}, which requires that an {@link IndexOutOfBoundsException} be
* thrown.
*
* @param s String to be written
* @param off Offset from which to start reading characters
* @param len Number of characters to be written
*
* @exception IOException If an I/O error occurs
*/
public void write(String s, int off, int len) throws IOException {
synchronized (lock) {
ensureOpen();
int b = off, t = off + len;
while (b < t) {
int d = min(nChars - nextChar, t - b);
s.getChars(b, b + d, cb, nextChar);
b += d;
nextChar += d;
if (nextChar >= nChars)
flushBuffer();
}
}
}
/**
* Writes a line separator. The line separator string is defined by the
* system property <tt>line.separator</tt>, and is not necessarily a single
* newline ('\n') character.
*
* @exception IOException If an I/O error occurs
*/
public void newLine() throws IOException {
write(lineSeparator);
}
/**
* Flushes the stream.
*
* @exception IOException If an I/O error occurs
*/
public void flush() throws IOException {
synchronized (lock) {
flushBuffer();
out.flush();
}
}
@SuppressWarnings("try")
public void close() throws IOException {
synchronized (lock) {
if (out == null) {
return;
}
try (Writer w = out) {
flushBuffer();
} finally {
out = null;
cb = null;
}
}
}
}
| apache-2.0 |
RoundSparrow/Uoccin | app/src/main/java/net/ggelardi/uoccin/comp/IntEditPreference.java | 729 | package net.ggelardi.uoccin.comp;
import android.content.Context;
import android.preference.EditTextPreference;
import android.util.AttributeSet;
public class IntEditPreference extends EditTextPreference {
public IntEditPreference(Context context) {
super(context);
}
public IntEditPreference(Context context, AttributeSet attrs) {
super(context, attrs);
}
public IntEditPreference(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
}
@Override
protected String getPersistedString(String defaultReturnValue) {
return String.valueOf(getPersistedInt(-1));
}
@Override
protected boolean persistString(String value) {
return persistInt(Integer.valueOf(value));
}
} | apache-2.0 |
engagepoint/camel | camel-core/src/test/java/org/apache/camel/component/file/FileConsumeCharsetTest.java | 2201 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.file;
import java.io.File;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.Exchange;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.mock.MockEndpoint;
/**
*
*/
public class FileConsumeCharsetTest extends ContextTestSupport {
@Override
protected void setUp() throws Exception {
deleteDirectory("target/files");
super.setUp();
template.sendBodyAndHeader("file://target/files?charset=UTF-8", "Hello World \u4f60\u597d", Exchange.FILE_NAME, "report.txt");
}
public void testConsumeAndDelete() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World \u4f60\u597d");
assertMockEndpointsSatisfied();
oneExchangeDone.matchesMockWaitTime();
// file should not exists
assertFalse("File should been deleted", new File("target/files/report.txt").getAbsoluteFile().exists());
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
public void configure() throws Exception {
from("file://target/files/?fileName=report.txt&delete=true&charset=UTF-8")
.convertBodyTo(String.class)
.to("mock:result");
}
};
}
}
| apache-2.0 |
retomerz/intellij-community | plugins/maven/src/test/java/org/jetbrains/idea/maven/project/importing/MavenProjectsManagerTest.java | 45189 | /*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.idea.maven.project.importing;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.command.WriteCommandAction;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.module.ModuleManager;
import com.intellij.openapi.roots.LibraryOrderEntry;
import com.intellij.openapi.roots.ModifiableRootModel;
import com.intellij.openapi.roots.ModuleRootManager;
import com.intellij.openapi.roots.OrderEntry;
import com.intellij.openapi.util.Pair;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.util.FileContentUtil;
import org.jetbrains.idea.maven.MavenImportingTestCase;
import org.jetbrains.idea.maven.importing.MavenRootModelAdapter;
import org.jetbrains.idea.maven.model.MavenExplicitProfiles;
import org.jetbrains.idea.maven.project.*;
import org.jetbrains.idea.maven.server.NativeMavenProjectHolder;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class MavenProjectsManagerTest extends MavenImportingTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
initProjectsManager(true);
getMavenImporterSettings().setImportAutomatically(true);
}
public void testShouldReturnNullForUnprocessedFiles() throws Exception {
// this pom file doesn't belong to any of the modules, this is won't be processed
// by MavenProjectProjectsManager and won't occur in its projects list.
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>");
// shouldn't throw
assertNull(myProjectsManager.findProject(myProjectPom));
}
public void testUpdatingProjectsWhenAbsentManagedProjectFileAppears() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>parent</artifactId>" +
"<version>1</version>" +
"<modules>" +
" <module>m</module>" +
"</modules>");
assertEquals(1, myProjectsTree.getRootProjects().size());
new WriteCommandAction.Simple(myProject) {
@Override
protected void run() throws Throwable {
myProjectPom.delete(this);
}
}.execute().throwException();
waitForReadingCompletion();
assertEquals(0, myProjectsTree.getRootProjects().size());
createProjectPom("<groupId>test</groupId>" +
"<artifactId>parent</artifactId>" +
"<version>1</version>" +
"<modules>" +
" <module>m</module>" +
"</modules>");
waitForReadingCompletion();
assertEquals(1, myProjectsTree.getRootProjects().size());
}
public void testUpdatingProjectsWhenRenaming() throws Exception {
VirtualFile p1 = createModulePom("project1",
"<groupId>test</groupId>" +
"<artifactId>project1</artifactId>" +
"<version>1</version>");
final VirtualFile p2 = createModulePom("project2",
"<groupId>test</groupId>" +
"<artifactId>project2</artifactId>" +
"<version>1</version>");
importProjects(p1, p2);
assertEquals(2, myProjectsTree.getRootProjects().size());
new WriteCommandAction.Simple(myProject) {
@Override
protected void run() throws Throwable {
p2.rename(this, "foo.bar");
waitForReadingCompletion();
assertEquals(1, myProjectsTree.getRootProjects().size());
p2.rename(this, "pom.xml");
}
}.execute().throwException();
waitForReadingCompletion();
assertEquals(2, myProjectsTree.getRootProjects().size());
}
public void testUpdatingProjectsWhenMoving() throws Exception {
VirtualFile p1 = createModulePom("project1",
"<groupId>test</groupId>" +
"<artifactId>project1</artifactId>" +
"<version>1</version>");
final VirtualFile p2 = createModulePom("project2",
"<groupId>test</groupId>" +
"<artifactId>project2</artifactId>" +
"<version>1</version>");
importProjects(p1, p2);
final VirtualFile oldDir = p2.getParent();
new WriteCommandAction.Simple(myProject) {
@Override
protected void run() throws Throwable {
VirtualFile newDir = myProjectRoot.createChildDirectory(this, "foo");
assertEquals(2, myProjectsTree.getRootProjects().size());
p2.move(this, newDir);
waitForReadingCompletion();
assertEquals(1, myProjectsTree.getRootProjects().size());
p2.move(this, oldDir);
}
}.execute().throwException();
waitForReadingCompletion();
assertEquals(2, myProjectsTree.getRootProjects().size());
}
public void testUpdatingProjectsWhenMovingModuleFile() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>parent</artifactId>" +
"<version>1</version>" +
"<modules>" +
" <module>m1</module>" +
" <module>m2</module>" +
"</modules>");
final VirtualFile m = createModulePom("m1",
"<groupId>test</groupId>" +
"<artifactId>m</artifactId>" +
"<version>1</version>");
importProject();
final VirtualFile oldDir = m.getParent();
new WriteCommandAction.Simple(myProject) {
@Override
protected void run() throws Throwable {
VirtualFile newDir = myProjectRoot.createChildDirectory(this, "m2");
assertEquals(1, myProjectsTree.getRootProjects().size());
assertEquals(1, myProjectsTree.getModules(myProjectsTree.getRootProjects().get(0)).size());
m.move(this, newDir);
waitForReadingCompletion();
assertEquals(1, myProjectsTree.getModules(myProjectsTree.getRootProjects().get(0)).size());
m.move(this, oldDir);
waitForReadingCompletion();
assertEquals(1, myProjectsTree.getModules(myProjectsTree.getRootProjects().get(0)).size());
m.move(this, myProjectRoot.createChildDirectory(this, "xxx"));
}
}.execute().throwException();
waitForReadingCompletion();
assertEquals(0, myProjectsTree.getModules(myProjectsTree.getRootProjects().get(0)).size());
}
public void testUpdatingProjectsWhenAbsentModuleFileAppears() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>parent</artifactId>" +
"<version>1</version>" +
"<modules>" +
" <module>m</module>" +
"</modules>");
List<MavenProject> roots = myProjectsTree.getRootProjects();
MavenProject parentNode = roots.get(0);
assertNotNull(parentNode);
assertTrue(myProjectsTree.getModules(roots.get(0)).isEmpty());
VirtualFile m = createModulePom("m",
"<groupId>test</groupId>" +
"<artifactId>m</artifactId>" +
"<version>1</version>");
waitForReadingCompletion();
List<MavenProject> children = myProjectsTree.getModules(roots.get(0));
assertEquals(1, children.size());
assertEquals(m, children.get(0).getFile());
}
public void testAddingAndRemovingManagedFiles() throws Exception {
VirtualFile m1 = createModulePom("m1",
"<groupId>test</groupId>" +
"<artifactId>m1</artifactId>" +
"<version>1</version>");
VirtualFile m2 = createModulePom("m2",
"<groupId>test</groupId>" +
"<artifactId>m2</artifactId>" +
"<version>1</version>");
importProject(m1);
assertUnorderedElementsAreEqual(myProjectsTree.getRootProjectsFiles(), m1);
myProjectsManager.addManagedFiles(Arrays.asList(m2));
waitForReadingCompletion();
assertUnorderedElementsAreEqual(myProjectsTree.getRootProjectsFiles(), m1, m2);
myProjectsManager.removeManagedFiles(Arrays.asList(m2));
waitForReadingCompletion();
assertUnorderedElementsAreEqual(myProjectsTree.getRootProjectsFiles(), m1);
}
public void testAddingAndRemovingManagedFilesAddsAndRemovesModules() throws Exception {
doTestAddingAndRemovingAddsAndRemovesModules(true);
}
public void testAddingAndRemovingManagedFilesAddsAndRemovesModulesInNonAutoImportMode() throws Exception {
doTestAddingAndRemovingAddsAndRemovesModules(false);
}
private void doTestAddingAndRemovingAddsAndRemovesModules(boolean autoImport) throws IOException {
VirtualFile m1 = createModulePom("m1",
"<groupId>test</groupId>" +
"<artifactId>m1</artifactId>" +
"<version>1</version>");
final VirtualFile m2 = createModulePom("m2",
"<groupId>test</groupId>" +
"<artifactId>m2</artifactId>" +
"<version>1</version>");
importProject(m1);
assertModules("m1");
resolveDependenciesAndImport(); // ensure no pending imports
getMavenImporterSettings().setImportAutomatically(autoImport);
myProjectsManager.addManagedFiles(Collections.singletonList(m2));
waitForReadingCompletion();
resolveDependenciesAndImport();
assertModules("m1", "m2");
configConfirmationForYesAnswer();
myProjectsManager.removeManagedFiles(Collections.singletonList(m2));
waitForReadingCompletion();
resolveDependenciesAndImport();
assertModules("m1");
}
public void testAddingManagedFileAndChangingAggregation() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>parent</artifactId>" +
"<version>1</version>" +
"<modules>" +
" <module>m</module>" +
"</modules>");
VirtualFile m = createModulePom("m",
"<groupId>test</groupId>" +
"<artifactId>m</artifactId>" +
"<version>1</version>");
waitForReadingCompletion();
assertEquals(1, myProjectsTree.getRootProjects().size());
assertEquals(1, myProjectsTree.getModules(myProjectsTree.getRootProjects().get(0)).size());
myProjectsManager.addManagedFiles(Arrays.asList(m));
waitForReadingCompletion();
assertEquals(1, myProjectsTree.getRootProjects().size());
assertEquals(1, myProjectsTree.getModules(myProjectsTree.getRootProjects().get(0)).size());
createProjectPom("<groupId>test</groupId>" +
"<artifactId>parent</artifactId>" +
"<version>1</version>");
waitForReadingCompletion();
assertEquals(2, myProjectsTree.getRootProjects().size());
assertEquals(0, myProjectsTree.getModules(myProjectsTree.getRootProjects().get(0)).size());
assertEquals(0, myProjectsTree.getModules(myProjectsTree.getRootProjects().get(1)).size());
}
public void testUpdatingProjectsOnSettingsXmlChange() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<packaging>pom</packaging>" +
"<modules>" +
" <module>m</module>" +
"</modules>" +
"<build>" +
" <sourceDirectory>${prop}</sourceDirectory>" +
"</build>");
createModulePom("m",
"<groupId>test</groupId>" +
"<artifactId>m</artifactId>" +
"<version>1</version>" +
"<parent>" +
" <groupId>test</groupId>" +
" <artifactId>project</artifactId>" +
" <version>1</version>" +
"</parent>" +
"<build>" +
" <sourceDirectory>${prop}</sourceDirectory>" +
"</build>");
updateSettingsXml("<profiles>" +
" <profile>" +
" <id>one</id>" +
" <activation>" +
" <activeByDefault>true</activeByDefault>" +
" </activation>" +
" <properties>" +
" <prop>value1</prop>" +
" </properties>" +
" </profile>" +
"</profiles>");
importProject();
List<MavenProject> roots = myProjectsTree.getRootProjects();
MavenProject parentNode = roots.get(0);
MavenProject childNode = myProjectsTree.getModules(roots.get(0)).get(0);
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/value1")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/value1")));
updateSettingsXml("<profiles>" +
" <profile>" +
" <id>one</id>" +
" <activation>" +
" <activeByDefault>true</activeByDefault>" +
" </activation>" +
" <properties>" +
" <prop>value2</prop>" +
" </properties>" +
" </profile>" +
"</profiles>");
waitForReadingCompletion();
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/value2")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/value2")));
deleteSettingsXml();
waitForReadingCompletion();
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/${prop}")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/${prop}")));
updateSettingsXml("<profiles>" +
" <profile>" +
" <id>one</id>" +
" <activation>" +
" <activeByDefault>true</activeByDefault>" +
" </activation>" +
" <properties>" +
" <prop>value2</prop>" +
" </properties>" +
" </profile>" +
"</profiles>");
waitForReadingCompletion();
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/value2")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/value2")));
}
public void testUpdatingProjectsWhenSettingsXmlLocationIsChanged() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<packaging>pom</packaging>" +
"<modules>" +
" <module>m</module>" +
"</modules>" +
"<build>" +
" <sourceDirectory>${prop}</sourceDirectory>" +
"</build>");
createModulePom("m",
"<groupId>test</groupId>" +
"<artifactId>m</artifactId>" +
"<version>1</version>" +
"<parent>" +
" <groupId>test</groupId>" +
" <artifactId>project</artifactId>" +
" <version>1</version>" +
"</parent>" +
"<build>" +
" <sourceDirectory>${prop}</sourceDirectory>" +
"</build>");
updateSettingsXml("<profiles>" +
" <profile>" +
" <id>one</id>" +
" <activation>" +
" <activeByDefault>true</activeByDefault>" +
" </activation>" +
" <properties>" +
" <prop>value1</prop>" +
" </properties>" +
" </profile>" +
"</profiles>");
importProject();
List<MavenProject> roots = myProjectsTree.getRootProjects();
MavenProject parentNode = roots.get(0);
MavenProject childNode = myProjectsTree.getModules(roots.get(0)).get(0);
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/value1")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/value1")));
getMavenGeneralSettings().setUserSettingsFile("");
waitForReadingCompletion();
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/${prop}")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/${prop}")));
getMavenGeneralSettings().setUserSettingsFile(new File(myDir, "settings.xml").getPath());
waitForReadingCompletion();
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/value1")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/value1")));
}
public void testUpdatingProjectsOnSettingsXmlCreationAndDeletion() throws Exception {
deleteSettingsXml();
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>");
importProject();
assertUnorderedElementsAreEqual(myProjectsTree.getAvailableProfiles());
updateSettingsXml("<profiles>" +
" <profile>" +
" <id>one</id>" +
" </profile>" +
"</profiles>");
waitForReadingCompletion();
assertUnorderedElementsAreEqual(myProjectsTree.getAvailableProfiles(), "one");
deleteSettingsXml();
waitForReadingCompletion();
assertUnorderedElementsAreEqual(myProjectsTree.getAvailableProfiles());
}
public void testUpdatingMavenPathsWhenSettingsChanges() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>");
File repo1 = new File(myDir, "localRepo1");
updateSettingsXml("<localRepository>" + repo1.getPath() + "</localRepository>");
waitForReadingCompletion();
assertEquals(repo1, getMavenGeneralSettings().getEffectiveLocalRepository());
File repo2 = new File(myDir, "localRepo2");
updateSettingsXml("<localRepository>" + repo2.getPath() + "</localRepository>");
waitForReadingCompletion();
assertEquals(repo2, getMavenGeneralSettings().getEffectiveLocalRepository());
}
public void testResolvingEnvVariableInRepositoryPath() throws Exception {
String temp = System.getenv(getEnvVar());
updateSettingsXml("<localRepository>${env." + getEnvVar() + "}/tmpRepo</localRepository>");
File repo = new File(temp + "/tmpRepo").getCanonicalFile();
assertEquals(repo.getPath(), getMavenGeneralSettings().getEffectiveLocalRepository().getPath());
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<dependencies>" +
" <dependency>" +
" <groupId>junit</groupId>" +
" <artifactId>junit</artifactId>" +
" <version>4.0</version>" +
" </dependency>" +
"</dependencies>");
assertModuleLibDep("project", "Maven: junit:junit:4.0",
"jar://" + FileUtil.toSystemIndependentName(repo.getPath()) + "/junit/junit/4.0/junit-4.0.jar!/");
}
public void testUpdatingProjectsOnProfilesXmlChange() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<packaging>pom</packaging>" +
"<modules>" +
" <module>m</module>" +
"</modules>" +
"<build>" +
" <sourceDirectory>${prop}</sourceDirectory>" +
"</build>");
createModulePom("m",
"<groupId>test</groupId>" +
"<artifactId>m</artifactId>" +
"<version>1</version>" +
"<parent>" +
" <groupId>test</groupId>" +
" <artifactId>project</artifactId>" +
" <version>1</version>" +
"</parent>" +
"<build>" +
" <sourceDirectory>${prop}</sourceDirectory>" +
"</build>");
createProfilesXmlOldStyle("<profile>" +
" <id>one</id>" +
" <activation>" +
" <activeByDefault>true</activeByDefault>" +
" </activation>" +
" <properties>" +
" <prop>value1</prop>" +
" </properties>" +
"</profile>");
importProject();
List<MavenProject> roots = myProjectsTree.getRootProjects();
MavenProject parentNode = roots.get(0);
MavenProject childNode = myProjectsTree.getModules(roots.get(0)).get(0);
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/value1")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/value1")));
createProfilesXmlOldStyle("<profile>" +
" <id>one</id>" +
" <activation>" +
" <activeByDefault>true</activeByDefault>" +
" </activation>" +
" <properties>" +
" <prop>value2</prop>" +
" </properties>" +
"</profile>");
waitForReadingCompletion();
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/value2")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/value2")));
deleteProfilesXml();
waitForReadingCompletion();
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/${prop}")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/${prop}")));
createProfilesXmlOldStyle("<profile>" +
" <id>one</id>" +
" <activation>" +
" <activeByDefault>true</activeByDefault>" +
" </activation>" +
" <properties>" +
" <prop>value2</prop>" +
" </properties>" +
"</profile>");
waitForReadingCompletion();
assertUnorderedPathsAreEqual(parentNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/value2")));
assertUnorderedPathsAreEqual(childNode.getSources(), Arrays.asList(FileUtil.toSystemDependentName(getProjectPath() + "/m/value2")));
}
public void testHandlingDirectoryWithPomFileDeletion() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<packaging>pom</packaging>" +
"<version>1</version>");
createModulePom("dir/module", "<groupId>test</groupId>" +
"<artifactId>module</artifactId>" +
"<version>1</version>");
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<packaging>pom</packaging>" +
"<version>1</version>" +
"<modules>" +
" <module>dir/module</module>" +
"</modules>");
waitForReadingCompletion();
assertEquals(2, MavenProjectsManager.getInstance(myProject).getProjects().size());
final VirtualFile dir = myProjectRoot.findChild("dir");
new WriteCommandAction.Simple(myProject) {
@Override
protected void run() throws Throwable {
dir.delete(null);
}
}.execute().throwException();
waitForReadingCompletion();
assertEquals(1, MavenProjectsManager.getInstance(myProject).getProjects().size());
}
public void testSavingAndLoadingState() throws Exception {
MavenProjectsManagerState state = myProjectsManager.getState();
assertTrue(state.originalFiles.isEmpty());
assertTrue(MavenWorkspaceSettingsComponent.getInstance(myProject).getSettings().enabledProfiles.isEmpty());
assertTrue(state.ignoredFiles.isEmpty());
assertTrue(state.ignoredPathMasks.isEmpty());
VirtualFile p1 = createModulePom("project1",
"<groupId>test</groupId>" +
"<artifactId>project1</artifactId>" +
"<version>1</version>");
VirtualFile p2 = createModulePom("project2",
"<groupId>test</groupId>" +
"<artifactId>project2</artifactId>" +
"<version>1</version>" +
"<packaging>pom</packaging>" +
"<modules>" +
" <module>../project3</module>" +
"</modules>");
VirtualFile p3 = createModulePom("project3",
"<groupId>test</groupId>" +
"<artifactId>project3</artifactId>" +
"<version>1</version>");
importProjects(p1, p2);
myProjectsManager.setExplicitProfiles(new MavenExplicitProfiles(Arrays.asList("one", "two")));
myProjectsManager.setIgnoredFilesPaths(Arrays.asList(p1.getPath()));
myProjectsManager.setIgnoredFilesPatterns(Arrays.asList("*.xxx"));
state = myProjectsManager.getState();
assertUnorderedPathsAreEqual(state.originalFiles, Arrays.asList(p1.getPath(), p2.getPath()));
assertUnorderedElementsAreEqual(MavenWorkspaceSettingsComponent.getInstance(myProject).getState().enabledProfiles, "one", "two");
assertUnorderedPathsAreEqual(state.ignoredFiles, Arrays.asList(p1.getPath()));
assertUnorderedElementsAreEqual(state.ignoredPathMasks, "*.xxx");
MavenProjectsManagerState newState = new MavenProjectsManagerState();
newState.originalFiles = Arrays.asList(p1.getPath(), p3.getPath());
MavenWorkspaceSettingsComponent.getInstance(myProject).getSettings().setEnabledProfiles(Arrays.asList("three"));
newState.ignoredFiles = Collections.singleton(p1.getPath());
newState.ignoredPathMasks = Arrays.asList("*.zzz");
myProjectsManager.loadState(newState);
assertUnorderedPathsAreEqual(myProjectsManager.getProjectsTreeForTests().getManagedFilesPaths(),
Arrays.asList(p1.getPath(), p3.getPath()));
assertUnorderedElementsAreEqual(myProjectsManager.getExplicitProfiles().getEnabledProfiles(), "three");
assertUnorderedPathsAreEqual(myProjectsManager.getIgnoredFilesPaths(), Arrays.asList(p1.getPath()));
assertUnorderedElementsAreEqual(myProjectsManager.getIgnoredFilesPatterns(), "*.zzz");
waitForReadingCompletion();
assertUnorderedElementsAreEqual(myProjectsManager.getProjectsTreeForTests().getRootProjectsFiles(),
p1, p3);
}
public void testSchedulingReimportWhenPomFileIsDeleted() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<packaging>pom</packaging>" +
"<modules>" +
" <module>m</module>" +
"</modules>");
final VirtualFile m = createModulePom("m",
"<groupId>test</groupId>" +
"<artifactId>m</artifactId>" +
"<version>1</version>");
importProject();
myProjectsManager.performScheduledImportInTests(); // ensure no pending requests
assertModules("project", "m");
configConfirmationForYesAnswer();
new WriteCommandAction.Simple(myProject) {
@Override
protected void run() throws Throwable {
m.delete(this);
}
}.execute().throwException();
waitForReadingCompletion();
resolveDependenciesAndImport();
assertModules("project");
}
public void testSchedulingResolveOfDependentProjectWhenDependencyChanges() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<packaging>pom</packaging>" +
"<version>1</version>" +
"<modules>" +
" <module>m1</module>" +
" <module>m2</module>" +
"</modules>");
createModulePom("m1", "<groupId>test</groupId>" +
"<artifactId>m1</artifactId>" +
"<version>1</version>" +
"<dependencies>" +
" <dependency>" +
" <groupId>test</groupId>" +
" <artifactId>m2</artifactId>" +
" <version>1</version>" +
" </dependency>" +
"</dependencies>");
createModulePom("m2", "<groupId>test</groupId>" +
"<artifactId>m2</artifactId>" +
"<version>1</version>");
importProject();
assertModuleModuleDeps("m1", "m2");
assertModuleLibDeps("m1");
createModulePom("m2", "<groupId>test</groupId>" +
"<artifactId>m2</artifactId>" +
"<version>1</version>" +
"<dependencies>" +
" <dependency>" +
" <groupId>junit</groupId>" +
" <artifactId>junit</artifactId>" +
" <version>4.0</version>" +
" </dependency>" +
"</dependencies>");
waitForReadingCompletion();
resolveDependenciesAndImport();
assertModuleModuleDeps("m1", "m2");
assertModuleLibDeps("m1", "Maven: junit:junit:4.0");
}
public void testSchedulingResolveOfDependentProjectWhenDependencyIsDeleted() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<packaging>pom</packaging>" +
"<version>1</version>" +
"<modules>" +
" <module>m1</module>" +
" <module>m2</module>" +
"</modules>");
createModulePom("m1", "<groupId>test</groupId>" +
"<artifactId>m1</artifactId>" +
"<version>1</version>" +
"<dependencies>" +
" <dependency>" +
" <groupId>test</groupId>" +
" <artifactId>m2</artifactId>" +
" <version>1</version>" +
" </dependency>" +
"</dependencies>");
final VirtualFile m2 = createModulePom("m2", "<groupId>test</groupId>" +
"<artifactId>m2</artifactId>" +
"<version>1</version>" +
"<dependencies>" +
" <dependency>" +
" <groupId>junit</groupId>" +
" <artifactId>junit</artifactId>" +
" <version>4.0</version>" +
" </dependency>" +
"</dependencies>");
importProject();
assertModules("project", "m1", "m2");
assertModuleModuleDeps("m1", "m2");
assertModuleLibDeps("m1", "Maven: junit:junit:4.0");
new WriteCommandAction.Simple(myProject) {
@Override
protected void run() throws Throwable {
m2.delete(this);
}
}.execute().throwException();
configConfirmationForYesAnswer();// should update deps even if module is not removed
waitForReadingCompletion();
resolveDependenciesAndImport();
assertModules("project", "m1");
assertModuleModuleDeps("m1");
assertModuleLibDeps("m1", "Maven: test:m2:1");
}
public void testDoNotScheduleResolveOfInvalidProjectsDeleted() throws Exception {
final boolean[] called = new boolean[1];
myProjectsManager.addProjectsTreeListener(new MavenProjectsTree.ListenerAdapter() {
@Override
public void projectResolved(Pair<MavenProject, MavenProjectChanges> projectWithChanges,
NativeMavenProjectHolder nativeMavenProject) {
called[0] = true;
}
});
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1");
importProject();
assertModules("project");
assertFalse(called[0]); // on import
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>2");
waitForReadingCompletion();
resolveDependenciesAndImport();
assertFalse(called[0]); // on update
}
public void testUpdatingFoldersAfterFoldersResolving() throws Exception {
createStdProjectFolders();
createProjectSubDirs("src1", "src2");
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<build>" +
" <plugins>" +
" <plugin>" +
" <groupId>org.codehaus.mojo</groupId>" +
" <artifactId>build-helper-maven-plugin</artifactId>" +
" <version>1.3</version>" +
" <executions>" +
" <execution>" +
" <id>someId</id>" +
" <phase>generate-sources</phase>" +
" <goals>" +
" <goal>add-source</goal>" +
" </goals>" +
" <configuration>" +
" <sources>" +
" <source>${basedir}/src1</source>" +
" <source>${basedir}/src2</source>" +
" </sources>" +
" </configuration>" +
" </execution>" +
" </executions>" +
" </plugin>" +
" </plugins>" +
"</build>");
assertSources("project", "src/main/java", "src1", "src2");
assertResources("project", "src/main/resources");
}
public void testForceReimport() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<dependencies>" +
" <dependency>" +
" <groupId>junit</groupId>" +
" <artifactId>junit</artifactId>" +
" <version>4.0</version>" +
" </dependency>" +
"</dependencies>");
importProject();
assertModules("project");
createProjectSubDir("src/main/java");
ApplicationManager.getApplication().runWriteAction(new Runnable() {
public void run() {
ModifiableRootModel model = ModuleRootManager.getInstance(getModule("project")).getModifiableModel();
for (OrderEntry each : model.getOrderEntries()) {
if (each instanceof LibraryOrderEntry && MavenRootModelAdapter.isMavenLibrary(((LibraryOrderEntry)each).getLibrary())) {
model.removeOrderEntry(each);
}
}
model.commit();
}
});
assertSources("project");
assertModuleLibDeps("project");
myProjectsManager.forceUpdateAllProjectsOrFindAllAvailablePomFiles();
waitForReadingCompletion();
myProjectsManager.waitForResolvingCompletion();
myProjectsManager.performScheduledImportInTests();
assertSources("project", "src/main/java");
assertModuleLibDeps("project", "Maven: junit:junit:4.0");
}
public void testScheduleReimportWhenPluginConfigurationChangesInTagName() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<build>" +
" <plugins>" +
" <plugin>" +
" <groupId>group</groupId>" +
" <artifactId>id</artifactId>" +
" <version>1</version>" +
" <configuration>" +
" <foo>value</foo>" +
" </configuration>" +
" </plugin>" +
" </plugins>" +
"</build>");
myProjectsManager.performScheduledImportInTests();
assertFalse(myProjectsManager.hasScheduledImportsInTests());
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<build>" +
" <plugins>" +
" <plugin>" +
" <groupId>group</groupId>" +
" <artifactId>id</artifactId>" +
" <version>1</version>" +
" <configuration>" +
" <bar>value</bar>" +
" </configuration>" +
" </plugin>" +
" </plugins>" +
"</build>");
myProjectsManager.waitForResolvingCompletion();
assertTrue(myProjectsManager.hasScheduledImportsInTests());
}
public void testScheduleReimportWhenPluginConfigurationChangesInValue() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<build>" +
" <plugins>" +
" <plugin>" +
" <groupId>group</groupId>" +
" <artifactId>id</artifactId>" +
" <version>1</version>" +
" <configuration>" +
" <foo>value</foo>" +
" </configuration>" +
" </plugin>" +
" </plugins>" +
"</build>");
myProjectsManager.performScheduledImportInTests();
assertFalse(myProjectsManager.hasScheduledImportsInTests());
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<build>" +
" <plugins>" +
" <plugin>" +
" <groupId>group</groupId>" +
" <artifactId>id</artifactId>" +
" <version>1</version>" +
" <configuration>" +
" <foo>value2</foo>" +
" </configuration>" +
" </plugin>" +
" </plugins>" +
"</build>");
myProjectsManager.waitForResolvingCompletion();
assertTrue(myProjectsManager.hasScheduledImportsInTests());
}
public void testIgnoringProjectsForDeletedModules() throws Exception {
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<packaging>pom</packaging>" +
"<modules>" +
" <module>m</module>" +
"</modules>");
VirtualFile m = createModulePom("m",
"<groupId>test</groupId>" +
"<artifactId>m</artifactId>" +
"<version>1</version>");
importProject();
Module module = getModule("m");
assertNotNull(module);
assertFalse(myProjectsManager.isIgnored(myProjectsManager.findProject(m)));
ModuleManager.getInstance(myProject).disposeModule(module);
myProjectsManager.performScheduledImportInTests();
assertNull(ModuleManager.getInstance(myProject).findModuleByName("m"));
assertTrue(myProjectsManager.isIgnored(myProjectsManager.findProject(m)));
}
public void testDoNotRemoveMavenProjectsOnReparse() throws Exception {
// this pom file doesn't belong to any of the modules, this is won't be processed
// by MavenProjectProjectsManager and won't occur in its projects list.
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>");
final StringBuilder log = new StringBuilder();
myProjectsManager.performScheduledImportInTests();
myProjectsManager.addProjectsTreeListener(new MavenProjectsTree.ListenerAdapter() {
@Override
public void projectsUpdated(List<Pair<MavenProject, MavenProjectChanges>> updated, List<MavenProject> deleted) {
for (Pair<MavenProject, MavenProjectChanges> each : updated) {
log.append("updated: " + each.first.getDisplayName() + " ");
}
for (MavenProject each : deleted) {
log.append("deleted: " + each.getDisplayName() + " ");
}
}
});
FileContentUtil.reparseFiles(myProject, myProjectsManager.getProjectsFiles(), true);
myProjectsManager.waitForReadingCompletion();
assertTrue(log.toString(), log.length() == 0);
}
}
| apache-2.0 |
zuoyebushiwo/elasticsearch1.7-study | src/main/java/org/elasticsearch/node/internal/InternalNode.java | 19568 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.node.internal;
import org.apache.lucene.codecs.Codec;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchIllegalStateException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionModule;
import org.elasticsearch.bulk.udp.BulkUdpModule;
import org.elasticsearch.bulk.udp.BulkUdpService;
import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.cache.recycler.CacheRecyclerModule;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.cache.recycler.PageCacheRecyclerModule;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClientModule;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterNameModule;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.CachedStreams;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.util.BigArraysModule;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.NodeEnvironmentModule;
import org.elasticsearch.gateway.GatewayModule;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.local.LocalGatewayAllocator;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerModule;
import org.elasticsearch.index.search.shape.ShapeModule;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.memory.IndexingMemoryController;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.monitor.MonitorModule;
import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.percolator.PercolatorModule;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.plugins.PluginsModule;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.RepositoriesModule;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestModule;
import org.elasticsearch.river.RiversManager;
import org.elasticsearch.river.RiversModule;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPoolModule;
import org.elasticsearch.transport.TransportModule;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.tribe.TribeModule;
import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.watcher.ResourceWatcherModule;
import org.elasticsearch.watcher.ResourceWatcherService;
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
/**
*
*/
public final class InternalNode implements Node {
private static final String CLIENT_TYPE = "node";
public static final String HTTP_ENABLED = "http.enabled";
private final Lifecycle lifecycle = new Lifecycle();
private final Injector injector;
private final Settings settings;
private final Environment environment;
private final PluginsService pluginsService;
private final Client client;
public InternalNode() throws ElasticsearchException {
this(ImmutableSettings.Builder.EMPTY_SETTINGS, true);
}
public InternalNode(Settings preparedSettings, boolean loadConfigSettings) throws ElasticsearchException {
final Settings pSettings = settingsBuilder().put(preparedSettings)
.put(Client.CLIENT_TYPE_SETTING, CLIENT_TYPE).build();
Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings);
tuple = new Tuple<>(TribeService.processSettings(tuple.v1()), tuple.v2());
// The only place we can actually fake the version a node is running on:
Version version = pSettings.getAsVersion("tests.mock.version", Version.CURRENT);
ESLogger logger = Loggers.getLogger(Node.class, tuple.v1().get("name"));
logger.info("version[{}], pid[{}], build[{}/{}]", version, JvmInfo.jvmInfo().pid(), Build.CURRENT.hashShort(), Build.CURRENT.timestamp());
logger.info("initializing ...");
if (logger.isDebugEnabled()) {
Environment env = tuple.v2();
logger.debug("using home [{}], config [{}], data [{}], logs [{}], work [{}], plugins [{}]",
env.homeFile(), env.configFile(), Arrays.toString(env.dataFiles()), env.logsFile(),
env.workFile(), env.pluginsFile());
}
// workaround for LUCENE-6482
Codec.availableCodecs();
this.pluginsService = new PluginsService(tuple.v1(), tuple.v2());
this.settings = pluginsService.updatedSettings();
// create the environment based on the finalized (processed) view of the settings
this.environment = new Environment(this.settings());
CompressorFactory.configure(settings);
final NodeEnvironment nodeEnvironment;
try {
nodeEnvironment = new NodeEnvironment(this.settings, this.environment);
} catch (IOException ex) {
throw new ElasticsearchIllegalStateException("Failed to created node environment", ex);
}
final ThreadPool threadPool = new ThreadPool(settings);
boolean success = false;
try {
ModulesBuilder modules = new ModulesBuilder();
modules.add(new Version.Module(version));
modules.add(new CacheRecyclerModule(settings));
modules.add(new PageCacheRecyclerModule(settings));
modules.add(new CircuitBreakerModule(settings));
modules.add(new BigArraysModule(settings));
modules.add(new PluginsModule(settings, pluginsService));
modules.add(new SettingsModule(settings));
modules.add(new NodeModule(this));
modules.add(new NetworkModule());
modules.add(new ScriptModule(settings));
modules.add(new EnvironmentModule(environment));
modules.add(new NodeEnvironmentModule(nodeEnvironment));
modules.add(new ClusterNameModule(settings));
modules.add(new ThreadPoolModule(threadPool));
modules.add(new DiscoveryModule(settings));
modules.add(new ClusterModule(settings));
modules.add(new RestModule(settings));
modules.add(new TransportModule(settings));
if (settings.getAsBoolean(HTTP_ENABLED, true)) {
modules.add(new HttpServerModule(settings));
}
modules.add(new RiversModule(settings));
modules.add(new IndicesModule(settings));
modules.add(new SearchModule());
modules.add(new ActionModule(false));
modules.add(new MonitorModule(settings));
modules.add(new GatewayModule(settings));
modules.add(new NodeClientModule());
modules.add(new BulkUdpModule());
modules.add(new ShapeModule());
modules.add(new PercolatorModule());
modules.add(new ResourceWatcherModule());
modules.add(new RepositoriesModule());
modules.add(new TribeModule());
injector = modules.createInjector();
client = injector.getInstance(Client.class);
threadPool.setNodeSettingsService(injector.getInstance(NodeSettingsService.class));
success = true;
} finally {
if (!success) {
nodeEnvironment.close();
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
}
logger.info("initialized");
}
@Override
public Settings settings() {
return this.settings;
}
@Override
public Client client() {
return client;
}
public Node start() {
if (!lifecycle.moveToStarted()) {
return this;
}
ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
logger.info("starting ...");
// hack around dependency injection problem (for now...)
injector.getInstance(Discovery.class).setRoutingService(injector.getInstance(RoutingService.class));
for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
injector.getInstance(plugin).start();
}
injector.getInstance(MappingUpdatedAction.class).start();
injector.getInstance(IndicesService.class).start();
injector.getInstance(IndexingMemoryController.class).start();
injector.getInstance(IndicesClusterStateService.class).start();
injector.getInstance(IndicesTTLService.class).start();
injector.getInstance(RiversManager.class).start();
injector.getInstance(SnapshotsService.class).start();
injector.getInstance(TransportService.class).start();
injector.getInstance(ClusterService.class).start();
injector.getInstance(RoutingService.class).start();
injector.getInstance(SearchService.class).start();
injector.getInstance(MonitorService.class).start();
injector.getInstance(RestController.class).start();
// TODO hack around circular dependecncies problems
injector.getInstance(LocalGatewayAllocator.class).setReallocation(injector.getInstance(ClusterService.class), injector.getInstance(RoutingService.class));
DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();
discoService.waitForInitialState();
// gateway should start after disco, so it can try and recovery from gateway on "start"
injector.getInstance(GatewayService.class).start();
if (settings.getAsBoolean("http.enabled", true)) {
injector.getInstance(HttpServer.class).start();
}
injector.getInstance(BulkUdpService.class).start();
injector.getInstance(ResourceWatcherService.class).start();
injector.getInstance(TribeService.class).start();
logger.info("started");
return this;
}
@Override
public Node stop() {
if (!lifecycle.moveToStopped()) {
return this;
}
ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
logger.info("stopping ...");
injector.getInstance(TribeService.class).stop();
injector.getInstance(BulkUdpService.class).stop();
injector.getInstance(ResourceWatcherService.class).stop();
if (settings.getAsBoolean("http.enabled", true)) {
injector.getInstance(HttpServer.class).stop();
}
injector.getInstance(MappingUpdatedAction.class).stop();
injector.getInstance(RiversManager.class).stop();
injector.getInstance(SnapshotsService.class).stop();
// stop any changes happening as a result of cluster state changes
injector.getInstance(IndicesClusterStateService.class).stop();
// we close indices first, so operations won't be allowed on it
injector.getInstance(IndexingMemoryController.class).stop();
injector.getInstance(IndicesTTLService.class).stop();
injector.getInstance(RoutingService.class).stop();
injector.getInstance(ClusterService.class).stop();
injector.getInstance(DiscoveryService.class).stop();
injector.getInstance(MonitorService.class).stop();
injector.getInstance(GatewayService.class).stop();
injector.getInstance(SearchService.class).stop();
injector.getInstance(RestController.class).stop();
injector.getInstance(TransportService.class).stop();
for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
injector.getInstance(plugin).stop();
}
// we should stop this last since it waits for resources to get released
// if we had scroll searchers etc or recovery going on we wait for to finish.
injector.getInstance(IndicesService.class).stop();
logger.info("stopped");
return this;
}
// During concurrent close() calls we want to make sure that all of them return after the node has completed it's shutdown cycle.
// If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be executed, in case another (for example api) call
// to close() has already set some lifecycles to stopped. In this case the process will be terminated even if the first call to close() has not finished yet.
public synchronized void close() {
if (lifecycle.started()) {
stop();
}
if (!lifecycle.moveToClosed()) {
return;
}
ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
logger.info("closing ...");
StopWatch stopWatch = new StopWatch("node_close");
stopWatch.start("tribe");
injector.getInstance(TribeService.class).close();
stopWatch.stop().start("bulk.udp");
injector.getInstance(BulkUdpService.class).close();
stopWatch.stop().start("http");
if (settings.getAsBoolean("http.enabled", true)) {
injector.getInstance(HttpServer.class).close();
}
stopWatch.stop().start("rivers");
injector.getInstance(RiversManager.class).close();
stopWatch.stop().start("snapshot_service");
injector.getInstance(SnapshotsService.class).close();
stopWatch.stop().start("client");
Releasables.close(injector.getInstance(Client.class));
stopWatch.stop().start("indices_cluster");
injector.getInstance(IndicesClusterStateService.class).close();
stopWatch.stop().start("indices");
injector.getInstance(IndicesFilterCache.class).close();
injector.getInstance(IndicesFieldDataCache.class).close();
injector.getInstance(IndexingMemoryController.class).close();
injector.getInstance(IndicesTTLService.class).close();
injector.getInstance(IndicesService.class).close();
injector.getInstance(IndicesStore.class).close();
stopWatch.stop().start("routing");
injector.getInstance(RoutingService.class).close();
stopWatch.stop().start("cluster");
injector.getInstance(ClusterService.class).close();
stopWatch.stop().start("discovery");
injector.getInstance(DiscoveryService.class).close();
stopWatch.stop().start("monitor");
injector.getInstance(MonitorService.class).close();
stopWatch.stop().start("gateway");
injector.getInstance(GatewayService.class).close();
stopWatch.stop().start("search");
injector.getInstance(SearchService.class).close();
stopWatch.stop().start("rest");
injector.getInstance(RestController.class).close();
stopWatch.stop().start("transport");
injector.getInstance(TransportService.class).close();
stopWatch.stop().start("percolator_service");
injector.getInstance(PercolatorService.class).close();
for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
stopWatch.stop().start("plugin(" + plugin.getName() + ")");
injector.getInstance(plugin).close();
}
stopWatch.stop().start("script");
try {
injector.getInstance(ScriptService.class).close();
} catch(IOException e) {
logger.warn("ScriptService close failed", e);
}
stopWatch.stop().start("thread_pool");
// TODO this should really use ThreadPool.terminate()
injector.getInstance(ThreadPool.class).shutdown();
try {
injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
stopWatch.stop().start("thread_pool_force_shutdown");
try {
injector.getInstance(ThreadPool.class).shutdownNow();
} catch (Exception e) {
// ignore
}
stopWatch.stop();
if (logger.isTraceEnabled()) {
logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
}
injector.getInstance(NodeEnvironment.class).close();
injector.getInstance(CacheRecycler.class).close();
injector.getInstance(PageCacheRecycler.class).close();
CachedStreams.clear();
logger.info("closed");
}
@Override
public boolean isClosed() {
return lifecycle.closed();
}
public Injector injector() {
return this.injector;
}
public static void main(String[] args) throws Exception {
final InternalNode node = new InternalNode();
node.start();
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
node.close();
}
});
}
} | apache-2.0 |
pecko/cft | org.eclipse.cft.server.tests/src/org/eclipse/cft/server/tests/core/BehaviourOperationsTest.java | 15835 | /*******************************************************************************
* Copyright (c) 2015 Pivotal Software, Inc.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Apache License v2.0 which accompanies this distribution.
*
* The Eclipse Public License is available at
*
* http://www.eclipse.org/legal/epl-v10.html
*
* and the Apache License v2.0 is available at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* You may elect to redistribute this code under either of these licenses.
*
* Contributors:
* Pivotal Software, Inc. - initial API and implementation
********************************************************************************/
package org.eclipse.cft.server.tests.core;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.cloudfoundry.client.lib.domain.CloudApplication;
import org.cloudfoundry.client.lib.domain.CloudApplication.AppState;
import org.eclipse.cft.server.core.internal.ApplicationAction;
import org.eclipse.cft.server.core.internal.CloudServerEvent;
import org.eclipse.cft.server.core.internal.CloudUtil;
import org.eclipse.cft.server.core.internal.application.EnvironmentVariable;
import org.eclipse.cft.server.core.internal.client.CloudFoundryApplicationModule;
import org.eclipse.cft.server.core.internal.client.CloudFoundryServerBehaviour;
import org.eclipse.cft.server.core.internal.client.DeploymentInfoWorkingCopy;
import org.eclipse.cft.server.core.internal.client.ICloudFoundryOperation;
import org.eclipse.cft.server.tests.util.CloudFoundryTestFixture;
import org.eclipse.cft.server.tests.util.WaitForApplicationToStopOp;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.NullProgressMonitor;
import org.eclipse.wst.server.core.IModule;
import org.eclipse.wst.server.core.internal.Server;
/**
* Tests {@link ICloudFoundryOperation} in a target
* {@link CloudFoundryServerBehaviour} obtained through
* {@link CloudFoundryServerBehaviour#operations()} as well as refresh events
* triggered by each of the operations.
* <p/>
* This may be a long running test suite as it involves multiple application
* deployments as well as waiting for refresh operations to complete.
*
*/
public class BehaviourOperationsTest extends AbstractRefreshCloudTest {
@Override
protected CloudFoundryTestFixture getTestFixture() throws CoreException {
return CloudFoundryTestFixture.getTestFixture();
}
public void testAsynchInstanceUpdate() throws Exception {
// Test asynchronous Application instance update and that it triggers
// a module refresh event
String prefix = "testAsynchInstanceUpdate";
String expectedAppName = harness.getDefaultWebAppName(prefix);
createWebApplicationProject();
CloudFoundryApplicationModule appModule = deployAndWaitForDeploymentEvent(prefix);
assertEquals(1, appModule.getApplicationStats().getRecords().size());
assertEquals(1, appModule.getInstanceCount());
assertEquals(1, appModule.getInstancesInfo().getInstances().size());
assertEquals(1, appModule.getDeploymentInfo().getInstances());
asynchExecuteOperationWaitForRefresh(cloudServer.getBehaviour().operations().instancesUpdate(appModule, 2),
prefix, CloudServerEvent.EVENT_INSTANCES_UPDATED);
// Get updated module
appModule = cloudServer.getExistingCloudModule(expectedAppName);
assertEquals(2, appModule.getApplicationStats().getRecords().size());
assertEquals(2, appModule.getInstanceCount());
assertEquals(2, appModule.getInstancesInfo().getInstances().size());
assertEquals(2, appModule.getDeploymentInfo().getInstances());
CloudApplication actualApp = getUpdatedApplication(expectedAppName);
assertEquals(2, actualApp.getInstances());
}
public void testAsynchMemoryUpdate() throws Exception {
// Test asynchronous app memory update and that it triggers
// a module refresh event
String prefix = "testAsynchMemoryUpdate";
String expectedAppName = harness.getDefaultWebAppName(prefix);
createWebApplicationProject();
CloudFoundryApplicationModule appModule = deployAndWaitForDeploymentEvent(prefix);
final int changedMemory = 678;
asynchExecuteOperationWaitForRefresh(
cloudServer.getBehaviour().operations().memoryUpdate(appModule, changedMemory), prefix,
CloudServerEvent.EVENT_APPLICATION_REFRESHED);
// Get updated module
appModule = cloudServer.getExistingCloudModule(expectedAppName);
// Verify that the same module has been updated
assertEquals(changedMemory, appModule.getDeploymentInfo().getMemory());
assertEquals(changedMemory, appModule.getApplication().getMemory());
assertEquals(changedMemory, appModule.getApplication().getMemory());
}
public void testAsynchEnvVarUpdate() throws Exception {
// Test asynchronous app memory update and that it triggers
// a module refresh event
String prefix = "testAsynchEnvVarUpdate";
String expectedAppName = harness.getDefaultWebAppName(prefix);
createWebApplicationProject();
CloudFoundryApplicationModule appModule = deployAndWaitForDeploymentEvent(prefix);
EnvironmentVariable variable = new EnvironmentVariable();
variable.setVariable("JAVA_OPTS");
variable.setValue("-Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=4000,suspend=n");
List<EnvironmentVariable> vars = new ArrayList<EnvironmentVariable>();
vars.add(variable);
DeploymentInfoWorkingCopy cp = appModule.resolveDeploymentInfoWorkingCopy(new NullProgressMonitor());
cp.setEnvVariables(vars);
cp.save();
asynchExecuteOperationWaitForRefresh(
cloudServer.getBehaviour().operations().environmentVariablesUpdate(appModule.getLocalModule(),
appModule.getDeployedApplicationName(), cp.getEnvVariables()),
prefix, CloudServerEvent.EVENT_APPLICATION_REFRESHED);
// Get updated module
appModule = cloudServer.getExistingCloudModule(appModule.getDeployedApplicationName());
Map<String, String> actualVars = getUpdatedApplication(expectedAppName).getEnvAsMap();
assertEquals(vars.size(), actualVars.size());
Map<String, String> expectedAsMap = new HashMap<String, String>();
for (EnvironmentVariable v : vars) {
String actualValue = actualVars.get(v.getVariable());
assertEquals(v.getValue(), actualValue);
expectedAsMap.put(v.getVariable(), v.getValue());
}
// Also verify that the env vars are set in deployment info
assertEquals(vars.size(), appModule.getDeploymentInfo().getEnvVariables().size());
List<EnvironmentVariable> deploymentInfoVars = appModule.getDeploymentInfo().getEnvVariables();
for (EnvironmentVariable var : deploymentInfoVars) {
String expectedValue = expectedAsMap.get(var.getVariable());
assertEquals(var.getValue(), expectedValue);
}
}
public void testAsynchAppURLUpdate() throws Exception {
// Test asynchronous URL update of an application and that it triggers
// a module refresh event
String prefix = "testAsynchAppURLUpdate";
final String expectedAppName = harness.getDefaultWebAppName(prefix);
createWebApplicationProject();
CloudFoundryApplicationModule appModule = deployAndWaitForDeploymentEvent(prefix);
String expectedURL = harness.getExpectedDefaultURL(prefix);
assertEquals(expectedURL, appModule.getDeploymentInfo().getUris().get(0));
String changedURL = harness.getExpectedDefaultURL("changedURtestCloudModuleRefreshURLUpdate");
final List<String> expectedUrls = new ArrayList<String>();
expectedUrls.add(changedURL);
asynchExecuteOperationWaitForRefresh(
cloudServer.getBehaviour().operations().mappedUrlsUpdate(expectedAppName, expectedUrls), prefix,
CloudServerEvent.EVENT_APPLICATION_REFRESHED);
// Get updated module
appModule = cloudServer.getExistingCloudModule(expectedAppName);
assertEquals(expectedUrls, appModule.getDeploymentInfo().getUris());
assertEquals(expectedUrls, appModule.getApplication().getUris());
}
public void testAsynchStopApplication() throws Exception {
// Test asynchronous application stop and that it triggers
// a module refresh event
String prefix = "testAsynchStopApplication";
createWebApplicationProject();
// Deploy and start the app without the refresh listener
CloudFoundryApplicationModule appModule = deployAndWaitForAppStart(prefix);
asynchExecuteOperationWaitForRefresh(
cloudServer.getBehaviour().operations().applicationDeployment(appModule, ApplicationAction.STOP),
prefix, CloudServerEvent.EVENT_APP_DEPLOYMENT_CHANGED);
appModule = cloudServer.getExistingCloudModule(appModule.getDeployedApplicationName());
boolean stopped = new WaitForApplicationToStopOp(cloudServer, appModule).run(new NullProgressMonitor());
assertTrue("Expected application to be stopped", stopped);
assertTrue("Expected application to be stopped",
appModule.getApplication().getState().equals(AppState.STOPPED));
assertTrue("Expected application to be stopped", appModule.getState() == Server.STATE_STOPPED);
}
public void testAsynchStartApplication() throws Exception {
// Test asynchronous application stop and that it triggers
// a module refresh event
String prefix = "testAsynchStartApplication";
createWebApplicationProject();
CloudFoundryApplicationModule appModule = deployApplication(prefix, true);
assertTrue("Expected application to be stopped",
appModule.getApplication().getState().equals(AppState.STOPPED));
assertTrue("Expected application to be stopped", appModule.getState() == Server.STATE_STOPPED);
asynchExecuteOperationWaitForRefresh(
cloudServer.getBehaviour().operations().applicationDeployment(appModule, ApplicationAction.START),
prefix, CloudServerEvent.EVENT_APP_DEPLOYMENT_CHANGED);
waitForApplicationToStart(appModule.getLocalModule(), prefix);
appModule = cloudServer.getExistingCloudModule(appModule.getDeployedApplicationName());
assertTrue("Expected application to be started",
appModule.getApplication().getState().equals(AppState.STARTED));
assertTrue("Expected application to be started", appModule.getState() == Server.STATE_STARTED);
// Verify that instances info is available
assertEquals("Expected instances information for running app", 1,
appModule.getInstancesInfo().getInstances().size());
assertNotNull("Expected instances information for running app",
appModule.getInstancesInfo().getInstances().get(0).getSince());
assertEquals("Expected instance stats for running app", 1, appModule.getApplicationStats().getRecords().size());
}
public void testAsychRestartApplication() throws Exception {
String prefix = "testAsychRestartApplication";
createWebApplicationProject();
CloudFoundryApplicationModule appModule = deployApplication(prefix, true);
assertTrue("Expected application to be stopped",
appModule.getApplication().getState().equals(AppState.STOPPED));
assertTrue("Expected application to be stopped", appModule.getState() == Server.STATE_STOPPED);
asynchExecuteOperationWaitForRefresh(
cloudServer.getBehaviour().operations().applicationDeployment(appModule, ApplicationAction.RESTART),
prefix, CloudServerEvent.EVENT_APP_DEPLOYMENT_CHANGED);
waitForApplicationToStart(appModule.getLocalModule(), prefix);
appModule = cloudServer.getExistingCloudModule(appModule.getDeployedApplicationName());
assertTrue("Expected application to be started",
appModule.getApplication().getState().equals(AppState.STARTED));
assertTrue("Expected application to be started", appModule.getState() == Server.STATE_STARTED);
// Verify that instances info is available
assertEquals("Expected instances information for running app", 1,
appModule.getInstancesInfo().getInstances().size());
assertNotNull("Expected instances information for running app",
appModule.getInstancesInfo().getInstances().get(0).getSince());
assertEquals("Expected instance stats for running app", 1, appModule.getApplicationStats().getRecords().size());
}
public void testAsynchUpdateRestartApplication() throws Exception {
String prefix = "testAsynchUpdateRestartApplication";
createWebApplicationProject();
CloudFoundryApplicationModule appModule = deployApplication(prefix, true);
assertTrue("Expected application to be stopped",
appModule.getApplication().getState().equals(AppState.STOPPED));
assertTrue("Expected application to be stopped", appModule.getState() == Server.STATE_STOPPED);
asynchExecuteOperationWaitForRefresh(cloudServer.getBehaviour().operations().applicationDeployment(appModule,
ApplicationAction.UPDATE_RESTART), prefix, CloudServerEvent.EVENT_APP_DEPLOYMENT_CHANGED);
waitForApplicationToStart(appModule.getLocalModule(), prefix);
appModule = cloudServer.getExistingCloudModule(appModule.getDeployedApplicationName());
assertTrue("Expected application to be started",
appModule.getApplication().getState().equals(AppState.STARTED));
assertTrue("Expected application to be started", appModule.getState() == Server.STATE_STARTED);
// Verify that instances info is available
assertEquals("Expected instances information for running app", 1,
appModule.getInstancesInfo().getInstances().size());
assertNotNull("Expected instances information for running app",
appModule.getInstancesInfo().getInstances().get(0).getSince());
assertEquals("Expected instance stats for running app", 1, appModule.getApplicationStats().getRecords().size());
}
public void testAsynchPushApplicationStopMode() throws Exception {
String prefix = "testAsynchPushApplicationStopMode";
String expectedAppName = harness.getDefaultWebAppName(prefix);
IProject project = createWebApplicationProject();
getTestFixture().configureForApplicationDeployment(expectedAppName, CloudUtil.DEFAULT_MEMORY, true);
IModule module = getModule(project.getName());
cloudServer.getBehaviour().operations().applicationDeployment(new IModule[] { module }, ApplicationAction.PUSH)
.run(new NullProgressMonitor());
CloudFoundryApplicationModule appModule = cloudServer.getExistingCloudModule(expectedAppName);
assertTrue("Expected application to be stopped",
appModule.getApplication().getState().equals(AppState.STOPPED));
assertTrue("Expected application to be stopped", appModule.getState() == Server.STATE_STOPPED);
}
public void testAsynchPushApplicationStartMode() throws Exception {
String prefix = "testAsynchPushApplicationStartMode";
String expectedAppName = harness.getDefaultWebAppName(prefix);
IProject project = createWebApplicationProject();
getTestFixture().configureForApplicationDeployment(expectedAppName, CloudUtil.DEFAULT_MEMORY, false);
IModule module = getModule(project.getName());
cloudServer.getBehaviour().operations().applicationDeployment(new IModule[] { module }, ApplicationAction.PUSH)
.run(new NullProgressMonitor());
CloudFoundryApplicationModule appModule = cloudServer.getExistingCloudModule(expectedAppName);
waitForApplicationToStart(appModule.getLocalModule(), prefix);
appModule = cloudServer.getExistingCloudModule(appModule.getDeployedApplicationName());
assertTrue("Expected application to be started",
appModule.getApplication().getState().equals(AppState.STARTED));
assertTrue("Expected application to be started", appModule.getState() == Server.STATE_STARTED);
// Verify that instances info is available
assertEquals("Expected instances information for running app", 1,
appModule.getInstancesInfo().getInstances().size());
assertNotNull("Expected instances information for running app",
appModule.getInstancesInfo().getInstances().get(0).getSince());
assertEquals("Expected instance stats for running app", 1, appModule.getApplicationStats().getRecords().size());
}
}
| apache-2.0 |
deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-tests/src/test/java/org/nd4j/linalg/api/iterator/NDIndexIteratorTest.java | 1675 | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.nd4j.linalg.api.iterator;
import lombok.val;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.nd4j.linalg.BaseNd4jTest;
import org.nd4j.linalg.api.iter.NdIndexIterator;
import org.nd4j.linalg.factory.Nd4jBackend;
import static org.junit.Assert.assertArrayEquals;
/**
* @author Adam Gibson
*/
@RunWith(Parameterized.class)
public class NDIndexIteratorTest extends BaseNd4jTest {
public NDIndexIteratorTest(Nd4jBackend backend) {
super(backend);
}
@Test
public void testIterate() {
val shapeIter = new NdIndexIterator(2, 2);
val possibleSolutions = new long[][] {{0, 0}, {0, 1}, {1, 0}, {1, 1},};
for (int i = 0; i < 4; i++) {
assertArrayEquals(possibleSolutions[i], shapeIter.next());
}
}
@Override
public char ordering() {
return 'f';
}
}
| apache-2.0 |
kevinearls/camel | platforms/spring-boot/components-starter/camel-aws-starter/src/main/java/org/apache/camel/component/aws/ddbstream/springboot/DdbStreamComponentAutoConfiguration.java | 6203 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.aws.ddbstream.springboot;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.Generated;
import org.apache.camel.CamelContext;
import org.apache.camel.component.aws.ddbstream.DdbStreamComponent;
import org.apache.camel.spi.ComponentCustomizer;
import org.apache.camel.spi.HasId;
import org.apache.camel.spring.boot.CamelAutoConfiguration;
import org.apache.camel.spring.boot.ComponentConfigurationProperties;
import org.apache.camel.spring.boot.util.CamelPropertiesHelper;
import org.apache.camel.spring.boot.util.ConditionalOnCamelContextAndAutoConfigurationBeans;
import org.apache.camel.spring.boot.util.GroupCondition;
import org.apache.camel.spring.boot.util.HierarchicalPropertiesEvaluator;
import org.apache.camel.support.IntrospectionSupport;
import org.apache.camel.util.ObjectHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Lazy;
/**
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Generated("org.apache.camel.maven.packaging.SpringBootAutoConfigurationMojo")
@Configuration
@Conditional({ConditionalOnCamelContextAndAutoConfigurationBeans.class,
DdbStreamComponentAutoConfiguration.GroupConditions.class})
@AutoConfigureAfter(CamelAutoConfiguration.class)
@EnableConfigurationProperties({ComponentConfigurationProperties.class,
DdbStreamComponentConfiguration.class})
public class DdbStreamComponentAutoConfiguration {
private static final Logger LOGGER = LoggerFactory
.getLogger(DdbStreamComponentAutoConfiguration.class);
@Autowired
private ApplicationContext applicationContext;
@Autowired
private CamelContext camelContext;
@Autowired
private DdbStreamComponentConfiguration configuration;
@Autowired(required = false)
private List<ComponentCustomizer<DdbStreamComponent>> customizers;
static class GroupConditions extends GroupCondition {
public GroupConditions() {
super("camel.component", "camel.component.aws-ddbstream");
}
}
@Lazy
@Bean(name = "aws-ddbstream-component")
@ConditionalOnMissingBean(DdbStreamComponent.class)
public DdbStreamComponent configureDdbStreamComponent() throws Exception {
DdbStreamComponent component = new DdbStreamComponent();
component.setCamelContext(camelContext);
Map<String, Object> parameters = new HashMap<>();
IntrospectionSupport.getProperties(configuration, parameters, null,
false);
for (Map.Entry<String, Object> entry : parameters.entrySet()) {
Object value = entry.getValue();
Class<?> paramClass = value.getClass();
if (paramClass.getName().endsWith("NestedConfiguration")) {
Class nestedClass = null;
try {
nestedClass = (Class) paramClass.getDeclaredField(
"CAMEL_NESTED_CLASS").get(null);
HashMap<String, Object> nestedParameters = new HashMap<>();
IntrospectionSupport.getProperties(value, nestedParameters,
null, false);
Object nestedProperty = nestedClass.newInstance();
CamelPropertiesHelper.setCamelProperties(camelContext,
nestedProperty, nestedParameters, false);
entry.setValue(nestedProperty);
} catch (NoSuchFieldException e) {
}
}
}
CamelPropertiesHelper.setCamelProperties(camelContext, component,
parameters, false);
if (ObjectHelper.isNotEmpty(customizers)) {
for (ComponentCustomizer<DdbStreamComponent> customizer : customizers) {
boolean useCustomizer = (customizer instanceof HasId)
? HierarchicalPropertiesEvaluator.evaluate(
applicationContext.getEnvironment(),
"camel.component.customizer",
"camel.component.aws-ddbstream.customizer",
((HasId) customizer).getId())
: HierarchicalPropertiesEvaluator.evaluate(
applicationContext.getEnvironment(),
"camel.component.customizer",
"camel.component.aws-ddbstream.customizer");
if (useCustomizer) {
LOGGER.debug("Configure component {}, with customizer {}",
component, customizer);
customizer.customize(component);
}
}
}
return component;
}
} | apache-2.0 |
davidzchen/error-prone | core/src/test/java/com/google/errorprone/refaster/testdata/input/StaticFieldTemplateExample.java | 1001 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.errorprone.refaster.testdata;
import java.util.Collections;
import java.util.List;
/**
* Test data for {@code StaticFieldTemplate}.
*
* @author mdempsky@google.com (Matthew Dempsky)
*/
public class StaticFieldTemplateExample {
public void foo() {
@SuppressWarnings("unchecked")
List<Integer> list = Collections.EMPTY_LIST;
System.out.println(list);
}
}
| apache-2.0 |
punkhorn/camel-upstream | core/camel-core/src/test/java/org/apache/camel/processor/RecipientListNoCacheTest.java | 1993 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.processor;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.mock.MockEndpoint;
import org.junit.Test;
public class RecipientListNoCacheTest extends ContextTestSupport {
@Test
public void testNoCache() throws Exception {
MockEndpoint x = getMockEndpoint("mock:x");
MockEndpoint y = getMockEndpoint("mock:y");
MockEndpoint z = getMockEndpoint("mock:z");
x.expectedBodiesReceived("foo", "bar");
y.expectedBodiesReceived("foo", "bar");
z.expectedBodiesReceived("foo", "bar");
sendBody("foo");
sendBody("bar");
assertMockEndpointsSatisfied();
}
protected void sendBody(String body) {
template.sendBodyAndHeader("direct:a", body, "recipientListHeader",
"mock:x,mock:y,mock:z");
}
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:a").recipientList(
header("recipientListHeader").tokenize(",")).cacheSize(-1);
}
};
}
}
| apache-2.0 |
spring-cloud-task-app-starters/composed-task-runner | spring-cloud-starter-task-composedtaskrunner/src/test/java/org/springframework/cloud/task/app/composedtaskrunner/ComposedTaskRunnerStepFactoryTests.java | 3544 | /*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.task.app.composedtaskrunner;
import javax.sql.DataSource;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.batch.core.Step;
import org.springframework.batch.core.StepExecutionListener;
import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
import org.springframework.batch.core.repository.JobRepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.cloud.dataflow.rest.client.TaskOperations;
import org.springframework.cloud.task.app.composedtaskrunner.properties.ComposedTaskProperties;
import org.springframework.cloud.task.configuration.TaskConfigurer;
import org.springframework.cloud.task.configuration.TaskProperties;
import org.springframework.cloud.task.repository.TaskExplorer;
import org.springframework.cloud.task.repository.TaskRepository;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.PlatformTransactionManager;
import static org.mockito.Mockito.mock;
/**
* @author Glenn Renfro
*/
@RunWith(SpringRunner.class)
@ContextConfiguration(classes={ComposedTaskRunnerStepFactoryTests.StepFactoryConfiguration.class})
public class ComposedTaskRunnerStepFactoryTests {
@Autowired
ComposedTaskRunnerStepFactory stepFactory;
@Test
public void testStep() throws Exception{
Step step = stepFactory.getObject();
Assert.assertEquals("FOOBAR", step.getName());
Assert.assertEquals(Integer.MAX_VALUE, step.getStartLimit());
}
@Configuration
public static class StepFactoryConfiguration {
@MockBean
public StepExecutionListener composedTaskStepExecutionListener;
@MockBean
public TaskOperations taskOperations;
@Bean
public TaskProperties taskProperties() {
return new TaskProperties();
}
@Bean
public StepBuilderFactory steps(){
return new StepBuilderFactory(mock(JobRepository.class), mock(PlatformTransactionManager.class));
}
@Bean
public TaskConfigurer taskConfigurer() {
return new TaskConfigurer() {
@Override
public TaskRepository getTaskRepository() {
return null;
}
@Override
public PlatformTransactionManager getTransactionManager() {
return null;
}
@Override
public TaskExplorer getTaskExplorer() {
return mock(TaskExplorer.class);
}
@Override
public DataSource getTaskDataSource() {
return mock(DataSource.class);
}
};
}
@Bean
public ComposedTaskRunnerStepFactory stepFactory(TaskProperties taskProperties) {
return new ComposedTaskRunnerStepFactory(new ComposedTaskProperties(), "FOOBAR");
}
}
}
| apache-2.0 |
gocd/gocd | config/config-server/src/test/java/com/thoughtworks/go/config/registry/ConfigElementImplementationRegistrarTest.java | 6789 | /*
* Copyright 2022 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.config.registry;
import com.thoughtworks.go.config.*;
import com.thoughtworks.go.config.materials.PackageMaterialConfig;
import com.thoughtworks.go.config.materials.PluggableSCMMaterialConfig;
import com.thoughtworks.go.config.materials.dependency.DependencyMaterialConfig;
import com.thoughtworks.go.config.materials.git.GitMaterialConfig;
import com.thoughtworks.go.config.materials.mercurial.HgMaterialConfig;
import com.thoughtworks.go.config.materials.perforce.P4MaterialConfig;
import com.thoughtworks.go.config.materials.svn.SvnMaterialConfig;
import com.thoughtworks.go.config.materials.tfs.TfsMaterialConfig;
import com.thoughtworks.go.config.pluggabletask.PluggableTask;
import com.thoughtworks.go.domain.Task;
import com.thoughtworks.go.domain.config.Admin;
import com.thoughtworks.go.domain.config.Configuration;
import com.thoughtworks.go.domain.config.PluginConfiguration;
import com.thoughtworks.go.domain.materials.MaterialConfig;
import com.thoughtworks.go.plugin.access.pluggabletask.PluggableTaskConfigStore;
import com.thoughtworks.go.plugin.access.pluggabletask.TaskPreference;
import com.thoughtworks.go.plugin.api.task.TaskView;
import com.thoughtworks.go.plugins.PluginExtensions;
import com.thoughtworks.go.plugins.presentation.PluggableViewModel;
import com.thoughtworks.go.presentation.PluggableTaskViewModel;
import com.thoughtworks.go.presentation.TaskViewModel;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
public class ConfigElementImplementationRegistrarTest {
@Mock
private PluginExtensions pluginExtns;
private ConfigElementImplementationRegistry registry;
private ConfigElementImplementationRegistrar registrar;
@BeforeEach
public void setUp() {
when(pluginExtns.configTagImplementations()).thenReturn(new ArrayList<>());
registry = new ConfigElementImplementationRegistry(pluginExtns);
registrar = new ConfigElementImplementationRegistrar(registry);
registrar.initialize();
}
@Test
public void testShouldProvideTheDefaultTaskConfigMappingsOnlyForBuiltInTasks() {
List<Class<? extends Task>> tasks = new ArrayList<>();
tasks.add(AntTask.class);
tasks.add(NantTask.class);
tasks.add(ExecTask.class);
tasks.add(RakeTask.class);
tasks.add(FetchTask.class);
tasks.add(PluggableTask.class);
tasks.add(FetchPluggableArtifactTask.class);
assertThat(registry.implementersOf(Task.class), is(tasks));
}
@Test
public void testShouldProvideTheDefaultMaterialConfigMappings() {
List<Class<? extends MaterialConfig>> materials = new ArrayList<>();
materials.add(SvnMaterialConfig.class);
materials.add(HgMaterialConfig.class);
materials.add(GitMaterialConfig.class);
materials.add(DependencyMaterialConfig.class);
materials.add(P4MaterialConfig.class);
materials.add(TfsMaterialConfig.class);
materials.add(PackageMaterialConfig.class);
materials.add(PluggableSCMMaterialConfig.class);
assertThat(registry.implementersOf(MaterialConfig.class), is(materials));
}
@Test
public void testShouldProvideTheDefaultArtifactsConfigMappings() {
List<Class<? extends ArtifactTypeConfig>> artifacts = new ArrayList<>();
artifacts.add(TestArtifactConfig.class);
artifacts.add(BuildArtifactConfig.class);
artifacts.add(PluggableArtifactConfig.class);
assertThat(registry.implementersOf(ArtifactTypeConfig.class), is(artifacts));
}
@Test
public void testShouldProvideTheDefaultAdminConfigMappings() {
List<Class<? extends Admin>> admin = new ArrayList<>();
admin.add(AdminUser.class);
admin.add(AdminRole.class);
assertThat(registry.implementersOf(Admin.class), is(admin));
}
@Test
public void shouldRegisterViewEnginesForAllTasks() {
assertReturnsAppropriateViewModelForInbuiltTasks(registry, new AntTask(), "ant");
assertReturnsAppropriateViewModelForInbuiltTasks(registry, new ExecTask(), "exec");
assertReturnsAppropriateViewModelForInbuiltTasks(registry, new FetchTask(), "fetch");
assertReturnsAppropriateViewModelForInbuiltTasks(registry, new RakeTask(), "rake");
assertReturnsAppropriateViewModelForInbuiltTasks(registry, new NantTask(), "nant");
}
@Test
public void shouldRegisterViewEngineForPluggableTask() {
TaskPreference taskPreference = mock(TaskPreference.class);
TaskView view = mock(TaskView.class);
when(taskPreference.getView()).thenReturn(view);
when(view.template()).thenReturn("plugin-template-value");
when(view.displayValue()).thenReturn("Plugin display value");
PluggableTaskConfigStore.store().setPreferenceFor("plugin1", taskPreference);
PluggableTask pluggableTask = new PluggableTask(new PluginConfiguration("plugin1", "2"), new Configuration());
PluggableViewModel<PluggableTask> pluggableTaskViewModel = registry.getViewModelFor(pluggableTask, "new");
assertEquals(PluggableTaskViewModel.class, pluggableTaskViewModel.getClass());
assertThat(pluggableTaskViewModel.getModel(), is(pluggableTask));
}
private void assertReturnsAppropriateViewModelForInbuiltTasks(ConfigElementImplementationRegistry registry, Task task, final String taskType) {
for (String actionName : new String[]{"new", "edit"}) {
PluggableViewModel viewModelFor = registry.getViewModelFor(task, actionName);
assertThat(viewModelFor, is(new TaskViewModel(task, String.format("admin/tasks/%s/%s", taskType, actionName))));
}
}
}
| apache-2.0 |
Soya93/Extract-Refactoring | platform/platform-api/src/com/intellij/util/Alarm.java | 13591 | /*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.util;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.application.Application;
import com.intellij.openapi.application.ApplicationActivationListener;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.ModalityState;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.Pair;
import com.intellij.openapi.wm.IdeFrame;
import com.intellij.util.concurrency.AppExecutorUtil;
import com.intellij.util.concurrency.EdtExecutorService;
import com.intellij.util.concurrency.QueueProcessor;
import com.intellij.util.messages.MessageBus;
import com.intellij.util.messages.MessageBusConnection;
import com.intellij.util.ui.EdtInvocationManager;
import com.intellij.util.ui.UIUtil;
import com.intellij.util.ui.update.Activatable;
import com.intellij.util.ui.update.UiNotifyConnector;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.TestOnly;
import javax.swing.*;
import java.awt.*;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;
/**
* Allows to schedule Runnable instances (requests) to be executed after a specific time interval on a specific thread.
* Use {@link #addRequest} methods to schedule the requests.
* Two requests scheduled with the same delay are executed sequentially, one after the other.
* {@link #cancelAllRequests()} and {@link #cancelRequest(Runnable)} allow to cancel already scheduled requests.
*/
public class Alarm implements Disposable {
private static final Logger LOG = Logger.getInstance("#com.intellij.util.Alarm");
private volatile boolean myDisposed;
private final List<Request> myRequests = new SmartList<>(); // guarded by LOCK
private final List<Request> myPendingRequests = new SmartList<>(); // guarded by LOCK
private final ScheduledExecutorService myExecutorService;
private final Object LOCK = new Object();
final ThreadToUse myThreadToUse;
private JComponent myActivationComponent;
@Override
public void dispose() {
if (!myDisposed) {
myDisposed = true;
cancelAllRequests();
if (myThreadToUse != ThreadToUse.SWING_THREAD) {
myExecutorService.shutdownNow();
}
}
}
private void checkDisposed() {
LOG.assertTrue(!myDisposed, "Already disposed");
}
public enum ThreadToUse {
/**
* Run request in Swing EventDispatchThread. This is the default.
* NB: <i>Requests shouldn't take long to avoid UI freezes.</i>
*/
SWING_THREAD,
/**
* @deprecated Use {@link #POOLED_THREAD} instead
*/
@Deprecated
SHARED_THREAD,
/**
* Run requests in one of application pooled threads.
*
* @see Application#executeOnPooledThread(Callable)
*/
POOLED_THREAD,
/**
* @deprecated Use {@link #POOLED_THREAD} instead
*/
@Deprecated
OWN_THREAD
}
/**
* Creates alarm that works in Swing thread
*/
public Alarm() {
this(ThreadToUse.SWING_THREAD);
}
public Alarm(@NotNull Disposable parentDisposable) {
this(ThreadToUse.SWING_THREAD, parentDisposable);
}
public Alarm(@NotNull ThreadToUse threadToUse) {
this(threadToUse, null);
LOG.assertTrue(threadToUse != ThreadToUse.POOLED_THREAD && threadToUse != ThreadToUse.OWN_THREAD,
"You must provide parent Disposable for ThreadToUse.POOLED_THREAD and ThreadToUse.OWN_THREAD Alarm");
}
public Alarm(@NotNull ThreadToUse threadToUse, @Nullable Disposable parentDisposable) {
myThreadToUse = threadToUse;
myExecutorService = threadToUse == ThreadToUse.SWING_THREAD ?
// pass straight to EDT
EdtExecutorService.getScheduledExecutorInstance() :
// or pass to app pooled thread.
// have to restrict the number of running tasks because otherwise the (implicit) contract of
// "addRequests with the same delay are executed in order" will be broken
AppExecutorUtil.createBoundedScheduledExecutorService(1);
if (parentDisposable != null) {
Disposer.register(parentDisposable, this);
}
}
public void addRequest(@NotNull final Runnable request, final int delay, boolean runWithActiveFrameOnly) {
if (runWithActiveFrameOnly && !ApplicationManager.getApplication().isActive()) {
final MessageBus bus = ApplicationManager.getApplication().getMessageBus();
final MessageBusConnection connection = bus.connect(this);
connection.subscribe(ApplicationActivationListener.TOPIC, new ApplicationActivationListener.Adapter() {
@Override
public void applicationActivated(IdeFrame ideFrame) {
connection.disconnect();
addRequest(request, delay);
}
});
}
else {
addRequest(request, delay);
}
}
private ModalityState getModalityState() {
if (myThreadToUse != ThreadToUse.SWING_THREAD) return null;
Application application = ApplicationManager.getApplication();
if (application == null) return null;
return application.getCurrentModalityState();
}
public void addRequest(@NotNull Runnable request, long delayMillis) {
_addRequest(request, delayMillis, getModalityState());
}
public void addRequest(@NotNull Runnable request, int delayMillis) {
_addRequest(request, delayMillis, getModalityState());
}
public void addComponentRequest(@NotNull Runnable request, int delay) {
assert myActivationComponent != null;
_addRequest(request, delay, ModalityState.stateForComponent(myActivationComponent));
}
public void addComponentRequest(@NotNull Runnable request, long delayMillis) {
assert myActivationComponent != null;
_addRequest(request, delayMillis, ModalityState.stateForComponent(myActivationComponent));
}
public void addRequest(@NotNull Runnable request, int delayMillis, @Nullable final ModalityState modalityState) {
LOG.assertTrue(myThreadToUse == ThreadToUse.SWING_THREAD);
_addRequest(request, delayMillis, modalityState);
}
public void addRequest(@NotNull Runnable request, long delayMillis, @Nullable final ModalityState modalityState) {
LOG.assertTrue(myThreadToUse == ThreadToUse.SWING_THREAD);
_addRequest(request, delayMillis, modalityState);
}
void _addRequest(@NotNull Runnable request, long delayMillis, @Nullable ModalityState modalityState) {
synchronized (LOCK) {
checkDisposed();
final Request requestToSchedule = new Request(request, modalityState, delayMillis);
if (myActivationComponent == null || myActivationComponent.isShowing()) {
_add(requestToSchedule);
}
else if (!myPendingRequests.contains(requestToSchedule)) {
myPendingRequests.add(requestToSchedule);
}
}
}
// must be called under LOCK
private void _add(@NotNull Request requestToSchedule) {
requestToSchedule.schedule();
myRequests.add(requestToSchedule);
}
private void flushPending() {
synchronized (LOCK) {
for (Request each : myPendingRequests) {
_add(each);
}
myPendingRequests.clear();
}
}
public boolean cancelRequest(@NotNull Runnable request) {
synchronized (LOCK) {
cancelRequest(request, myRequests);
cancelRequest(request, myPendingRequests);
return true;
}
}
private void cancelRequest(@NotNull Runnable request, @NotNull List<Request> list) {
for (int i = list.size()-1; i>=0; i--) {
Request r = list.get(i);
if (r.myTask == request) {
r.cancel();
list.remove(i);
}
}
}
public int cancelAllRequests() {
synchronized (LOCK) {
int count = cancelAllRequests(myRequests);
cancelAllRequests(myPendingRequests);
return count;
}
}
private int cancelAllRequests(@NotNull List<Request> list) {
int count = 0;
for (Request request : list) {
count++;
request.cancel();
}
list.clear();
return count;
}
@TestOnly
public void flush() {
List<Pair<Request, Runnable>> requests;
synchronized (LOCK) {
if (myRequests.isEmpty()) {
return;
}
requests = new SmartList<>();
for (Request request : myRequests) {
Runnable existingTask = request.cancel();
if (existingTask != null) {
requests.add(Pair.create(request, existingTask));
}
}
myRequests.clear();
}
for (Pair<Request, Runnable> request : requests) {
synchronized (LOCK) {
request.first.myTask = request.second;
}
request.first.run();
}
UIUtil.dispatchAllInvocationEvents();
}
@TestOnly
void waitForAllExecuted(long timeout, @NotNull TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
List<Request> requests;
synchronized (LOCK) {
requests = new ArrayList<>(myRequests);
}
for (Request request : requests) {
Future<?> future;
synchronized (LOCK) {
future = request.myFuture;
}
if (future != null) {
future.get(timeout, unit);
}
}
}
public int getActiveRequestCount() {
synchronized (LOCK) {
return myRequests.size();
}
}
public boolean isEmpty() {
synchronized (LOCK) {
return myRequests.isEmpty();
}
}
protected boolean isEdt() {
return isEventDispatchThread();
}
public static boolean isEventDispatchThread() {
final Application app = ApplicationManager.getApplication();
return app != null && app.isDispatchThread() || EventQueue.isDispatchThread();
}
private class Request implements Runnable {
private Runnable myTask; // guarded by LOCK
private final ModalityState myModalityState;
private Future<?> myFuture; // guarded by LOCK
private final long myDelay;
private Request(@NotNull final Runnable task, @Nullable ModalityState modalityState, long delayMillis) {
synchronized (LOCK) {
myTask = task;
myModalityState = modalityState;
myDelay = delayMillis;
}
}
@Override
public void run() {
try {
if (myDisposed) {
return;
}
synchronized (LOCK) {
if (myTask == null) {
return;
}
}
final Runnable scheduledTask = new Runnable() {
@Override
public void run() {
final Runnable task;
synchronized (LOCK) {
task = myTask;
myTask = null;
myRequests.remove(Request.this);
myFuture = null;
}
if (task == null) return;
if (myThreadToUse == ThreadToUse.SWING_THREAD && !isEdt()) {
//noinspection SSBasedInspection
EdtInvocationManager.getInstance().invokeLater(() -> {
if (!myDisposed) {
QueueProcessor.runSafely(task);
}
});
}
else {
QueueProcessor.runSafely(task);
}
}
@Override
public String toString() {
return "ScheduledTask "+Request.this;
}
};
if (myModalityState == null) {
scheduledTask.run();
}
else {
final Application app = ApplicationManager.getApplication();
if (app == null) {
//noinspection SSBasedInspection
SwingUtilities.invokeLater(scheduledTask);
}
else {
app.invokeLater(scheduledTask, myModalityState);
}
}
}
catch (Throwable e) {
LOG.error(e);
}
}
// must be called under LOCK
private void schedule() {
myFuture = myExecutorService.schedule(this, myDelay, TimeUnit.MILLISECONDS);
}
/**
* @return task if not yet executed
*/
@Nullable
private Runnable cancel() {
synchronized (LOCK) {
Future<?> future = myFuture;
if (future != null) {
future.cancel(false);
myFuture = null;
}
Runnable task = myTask;
myTask = null;
return task;
}
}
@Override
public String toString() {
synchronized (LOCK) {
Runnable task = myTask;
return super.toString() + (task != null ? ": "+task : "");
}
}
}
@NotNull
public Alarm setActivationComponent(@NotNull final JComponent component) {
myActivationComponent = component;
//noinspection ResultOfObjectAllocationIgnored
new UiNotifyConnector(component, new Activatable() {
@Override
public void showNotify() {
flushPending();
}
@Override
public void hideNotify() {
}
});
return this;
}
public boolean isDisposed() {
return myDisposed;
}
}
| apache-2.0 |
michael-simons/biking2 | src/test/java/ac/simons/biking2/statistics/highcharts/HighchartsNgConfigTest.java | 2103 | /*
* Copyright 2014-2019 michael-simons.eu.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ac.simons.biking2.statistics.highcharts;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import org.junit.jupiter.api.Test;
/**
* @author Michael J. Simons
*
* @since 2014-02-11
*/
class HighchartsNgConfigTest {
@Test
void testBuilder() {
final HighchartsNgConfig.Builder builder = HighchartsNgConfig.define();
assertThat(builder.computeCurrentMaxYValue(), is(equalTo(0)));
final Collection<Series<?>> series = builder
.series()
.withData(1.0, 2)
.build()
.series()
.withData(3.0, 4)
.build()
.build().getSeries();
assertThat(builder.computeCurrentMaxYValue(), is(equalTo(4)));
assertThat(series.size(), is(equalTo(2)));
final List<Series> hlp = new ArrayList<>(series);
assertThat(hlp.get(0).getData(), is(equalTo(Arrays.asList(1.0, 2))));
assertThat(hlp.get(1).getData(), is(equalTo(Arrays.asList(3.0, 4))));
}
@Test
void testJsonCreator() {
final HighchartsNgConfig config = new HighchartsNgConfig(null, null);
// Make sure no user data can be injected
assertThat(config.getUserData(), is(nullValue()));
}
}
| apache-2.0 |
Contrast-Security-OSS/cassandra-migration | src/main/java/com/contrastsecurity/cassandra/migration/action/Initialize.java | 499 | package com.contrastsecurity.cassandra.migration.action;
import com.contrastsecurity.cassandra.migration.config.Keyspace;
import com.contrastsecurity.cassandra.migration.dao.SchemaVersionDAO;
import com.datastax.driver.core.Session;
public class Initialize {
public void run(Session session, Keyspace keyspace, String migrationVersionTableName) {
SchemaVersionDAO dao = new SchemaVersionDAO(session, keyspace, migrationVersionTableName);
dao.createTablesIfNotExist();
}
}
| apache-2.0 |
googlearchive/instantbuy-sample-java | src/main/java/com/google/wallet/online/jwt/Pay.java | 1952 | /**
* Copyright 2013 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.wallet.online.jwt;
/**
* A simple class to represent the Pay request bean.
*/
public class Pay {
private String estimatedTotalPrice;
private String currencyCode;
public Pay() {
// Empty constructor used in Gson conversion of JSON -> Java Objects
}
// Constructor using builder
private Pay(Builder builder) {
this.estimatedTotalPrice = builder.estimatedTotalPrice;
this.currencyCode = builder.currencyCode;
}
public String getEstimatedTotalPrice() {
return estimatedTotalPrice;
}
public String getCurrencyCode() {
return currencyCode;
}
/**
* Creates a new builder.
* @return A new builder to create the Pay object.
*/
public static Builder newBuilder() {
return new Builder();
}
/**
* Helper class for creating a Pay object.
*/
public static class Builder {
private String estimatedTotalPrice;
private String currencyCode;
private Builder() {
}
public Builder setEstimatedTotalPrice(String estimatedTotalPrice) {
this.estimatedTotalPrice = estimatedTotalPrice;
return this;
}
public Builder setCurrencyCode(String currencyCode) {
this.currencyCode = currencyCode;
return this;
}
public Pay build() {
// validate and return
return new Pay(this);
}
}
}
| apache-2.0 |
cloudera/hcatalog | hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java | 19779 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hcatalog.pig;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hcatalog.common.HCatConstants;
import org.apache.hcatalog.common.HCatException;
import org.apache.hcatalog.common.HCatUtil;
import org.apache.hcatalog.data.HCatRecord;
import org.apache.hcatalog.data.Pair;
import org.apache.hcatalog.data.schema.HCatFieldSchema;
import org.apache.hcatalog.data.schema.HCatFieldSchema.Type;
import org.apache.hcatalog.data.schema.HCatSchema;
import org.apache.pig.LoadPushDown.RequiredField;
import org.apache.pig.PigException;
import org.apache.pig.ResourceSchema;
import org.apache.pig.ResourceSchema.ResourceFieldSchema;
import org.apache.pig.data.DataBag;
import org.apache.pig.data.DataByteArray;
import org.apache.pig.data.DataType;
import org.apache.pig.data.DefaultDataBag;
import org.apache.pig.data.Tuple;
import org.apache.pig.data.TupleFactory;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.apache.pig.impl.util.UDFContext;
import org.apache.pig.impl.util.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class PigHCatUtil {
private static final Logger LOG = LoggerFactory.getLogger(PigHCatUtil.class);
static final int PIG_EXCEPTION_CODE = 1115; // http://wiki.apache.org/pig/PigErrorHandlingFunctionalSpecification#Error_codes
private static final String DEFAULT_DB = MetaStoreUtils.DEFAULT_DATABASE_NAME;
private final Map<Pair<String, String>, Table> hcatTableCache =
new HashMap<Pair<String, String>, Table>();
private static final TupleFactory tupFac = TupleFactory.getInstance();
private static boolean pigHasBooleanSupport = false;
/**
* Determine if the current Pig version supports boolean columns. This works around a
* dependency conflict preventing HCatalog from requiring a version of Pig with boolean
* field support and should be removed once HCATALOG-466 has been resolved.
*/
static {
// DETAILS:
//
// PIG-1429 added support for boolean fields, which shipped in 0.10.0;
// this version of Pig depends on antlr 3.4.
//
// HCatalog depends heavily on Hive, which at this time uses antlr 3.0.1.
//
// antlr 3.0.1 and 3.4 are incompatible, so Pig 0.10.0 and Hive cannot be depended on in the
// same project. Pig 0.8.0 did not use antlr for its parser and can coexist with Hive,
// so that Pig version is depended on by HCatalog at this time.
try {
Schema schema = Utils.getSchemaFromString("myBooleanField: boolean");
pigHasBooleanSupport = (schema.getField("myBooleanField").type == DataType.BOOLEAN);
} catch (Throwable e) {
// pass
}
if (!pigHasBooleanSupport) {
LOG.info("This version of Pig does not support boolean fields. To enable "
+ "boolean-to-integer conversion, set the "
+ HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER
+ "=true configuration parameter.");
}
}
static public Pair<String, String> getDBTableNames(String location) throws IOException {
// the location string will be of the form:
// <database name>.<table name> - parse it and
// communicate the information to HCatInputFormat
try {
return HCatUtil.getDbAndTableName(location);
} catch (IOException e) {
String locationErrMsg = "The input location in load statement " +
"should be of the form " +
"<databasename>.<table name> or <table name>. Got " + location;
throw new PigException(locationErrMsg, PIG_EXCEPTION_CODE);
}
}
static public String getHCatServerUri(Job job) {
return job.getConfiguration().get(HiveConf.ConfVars.METASTOREURIS.varname);
}
static public String getHCatServerPrincipal(Job job) {
return job.getConfiguration().get(HCatConstants.HCAT_METASTORE_PRINCIPAL);
}
private static HiveMetaStoreClient getHiveMetaClient(String serverUri,
String serverKerberosPrincipal, Class<?> clazz) throws Exception {
HiveConf hiveConf = new HiveConf(clazz);
if (serverUri != null) {
hiveConf.set("hive.metastore.local", "false");
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim());
}
if (serverKerberosPrincipal != null) {
hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
hiveConf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, serverKerberosPrincipal);
}
try {
return HCatUtil.getHiveClient(hiveConf);
} catch (Exception e) {
throw new Exception("Could not instantiate a HiveMetaStoreClient connecting to server uri:[" + serverUri + "]", e);
}
}
HCatSchema getHCatSchema(List<RequiredField> fields, String signature, Class<?> classForUDFCLookup) throws IOException {
if (fields == null) {
return null;
}
Properties props = UDFContext.getUDFContext().getUDFProperties(
classForUDFCLookup, new String[]{signature});
HCatSchema hcatTableSchema = (HCatSchema) props.get(HCatConstants.HCAT_TABLE_SCHEMA);
ArrayList<HCatFieldSchema> fcols = new ArrayList<HCatFieldSchema>();
for (RequiredField rf : fields) {
fcols.add(hcatTableSchema.getFields().get(rf.getIndex()));
}
return new HCatSchema(fcols);
}
public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal) throws IOException {
Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri);
Table hcatTable = hcatTableCache.get(loc_server);
if (hcatTable != null) {
return hcatTable;
}
Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
String dbName = dbTablePair.first;
String tableName = dbTablePair.second;
Table table = null;
HiveMetaStoreClient client = null;
try {
client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class);
table = HCatUtil.getTable(client, dbName, tableName);
} catch (NoSuchObjectException nsoe) {
throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend
} catch (Exception e) {
throw new IOException(e);
} finally {
HCatUtil.closeHiveClientQuietly(client);
}
hcatTableCache.put(loc_server, table);
return table;
}
public static ResourceSchema getResourceSchema(HCatSchema hcatSchema) throws IOException {
List<ResourceFieldSchema> rfSchemaList = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema hfs : hcatSchema.getFields()) {
ResourceFieldSchema rfSchema;
rfSchema = getResourceSchemaFromFieldSchema(hfs);
rfSchemaList.add(rfSchema);
}
ResourceSchema rSchema = new ResourceSchema();
rSchema.setFields(rfSchemaList.toArray(new ResourceFieldSchema[0]));
return rSchema;
}
private static ResourceFieldSchema getResourceSchemaFromFieldSchema(HCatFieldSchema hfs)
throws IOException {
ResourceFieldSchema rfSchema;
// if we are dealing with a bag or tuple column - need to worry about subschema
if (hfs.getType() == Type.STRUCT) {
rfSchema = new ResourceFieldSchema()
.setName(hfs.getName())
.setDescription(hfs.getComment())
.setType(getPigType(hfs))
.setSchema(getTupleSubSchema(hfs));
} else if (hfs.getType() == Type.ARRAY) {
rfSchema = new ResourceFieldSchema()
.setName(hfs.getName())
.setDescription(hfs.getComment())
.setType(getPigType(hfs))
.setSchema(getBagSubSchema(hfs));
} else {
rfSchema = new ResourceFieldSchema()
.setName(hfs.getName())
.setDescription(hfs.getComment())
.setType(getPigType(hfs))
.setSchema(null); // no munging inner-schemas
}
return rfSchema;
}
protected static ResourceSchema getBagSubSchema(HCatFieldSchema hfs) throws IOException {
// there are two cases - array<Type> and array<struct<...>>
// in either case the element type of the array is represented in a
// tuple field schema in the bag's field schema - the second case (struct)
// more naturally translates to the tuple - in the first case (array<Type>)
// we simulate the tuple by putting the single field in a tuple
Properties props = UDFContext.getUDFContext().getClientSystemProps();
String innerTupleName = HCatConstants.HCAT_PIG_INNER_TUPLE_NAME_DEFAULT;
if (props != null && props.containsKey(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME)) {
innerTupleName = props.getProperty(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME)
.replaceAll("FIELDNAME", hfs.getName());
}
String innerFieldName = HCatConstants.HCAT_PIG_INNER_FIELD_NAME_DEFAULT;
if (props != null && props.containsKey(HCatConstants.HCAT_PIG_INNER_FIELD_NAME)) {
innerFieldName = props.getProperty(HCatConstants.HCAT_PIG_INNER_FIELD_NAME)
.replaceAll("FIELDNAME", hfs.getName());
}
ResourceFieldSchema[] bagSubFieldSchemas = new ResourceFieldSchema[1];
bagSubFieldSchemas[0] = new ResourceFieldSchema().setName(innerTupleName)
.setDescription("The tuple in the bag")
.setType(DataType.TUPLE);
HCatFieldSchema arrayElementFieldSchema = hfs.getArrayElementSchema().get(0);
if (arrayElementFieldSchema.getType() == Type.STRUCT) {
bagSubFieldSchemas[0].setSchema(getTupleSubSchema(arrayElementFieldSchema));
} else if (arrayElementFieldSchema.getType() == Type.ARRAY) {
ResourceSchema s = new ResourceSchema();
List<ResourceFieldSchema> lrfs = Arrays.asList(getResourceSchemaFromFieldSchema(arrayElementFieldSchema));
s.setFields(lrfs.toArray(new ResourceFieldSchema[0]));
bagSubFieldSchemas[0].setSchema(s);
} else {
ResourceFieldSchema[] innerTupleFieldSchemas = new ResourceFieldSchema[1];
innerTupleFieldSchemas[0] = new ResourceFieldSchema().setName(innerFieldName)
.setDescription("The inner field in the tuple in the bag")
.setType(getPigType(arrayElementFieldSchema))
.setSchema(null); // the element type is not a tuple - so no subschema
bagSubFieldSchemas[0].setSchema(new ResourceSchema().setFields(innerTupleFieldSchemas));
}
ResourceSchema s = new ResourceSchema().setFields(bagSubFieldSchemas);
return s;
}
private static ResourceSchema getTupleSubSchema(HCatFieldSchema hfs) throws IOException {
// for each struct subfield, create equivalent ResourceFieldSchema
ResourceSchema s = new ResourceSchema();
List<ResourceFieldSchema> lrfs = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema subField : hfs.getStructSubSchema().getFields()) {
lrfs.add(getResourceSchemaFromFieldSchema(subField));
}
s.setFields(lrfs.toArray(new ResourceFieldSchema[0]));
return s;
}
/**
* @param hfs the field schema of the column
* @return corresponding pig type
* @throws IOException
*/
static public byte getPigType(HCatFieldSchema hfs) throws IOException {
return getPigType(hfs.getType());
}
static public byte getPigType(Type type) throws IOException {
if (type == Type.STRING) {
return DataType.CHARARRAY;
}
if ((type == Type.INT) || (type == Type.SMALLINT) || (type == Type.TINYINT)) {
return DataType.INTEGER;
}
if (type == Type.ARRAY) {
return DataType.BAG;
}
if (type == Type.STRUCT) {
return DataType.TUPLE;
}
if (type == Type.MAP) {
return DataType.MAP;
}
if (type == Type.BIGINT) {
return DataType.LONG;
}
if (type == Type.FLOAT) {
return DataType.FLOAT;
}
if (type == Type.DOUBLE) {
return DataType.DOUBLE;
}
if (type == Type.BINARY) {
return DataType.BYTEARRAY;
}
if (type == Type.BOOLEAN && pigHasBooleanSupport) {
return DataType.BOOLEAN;
}
throw new PigException("HCatalog column type '" + type.toString()
+ "' is not supported in Pig as a column type", PIG_EXCEPTION_CODE);
}
public static Tuple transformToTuple(HCatRecord hr, HCatSchema hs) throws Exception {
if (hr == null) {
return null;
}
return transformToTuple(hr.getAll(), hs);
}
@SuppressWarnings("unchecked")
public static Object extractPigObject(Object o, HCatFieldSchema hfs) throws Exception {
Object result;
Type itemType = hfs.getType();
switch (itemType) {
case BINARY:
result = (o == null) ? null : new DataByteArray((byte[]) o);
break;
case STRUCT:
result = transformToTuple((List<Object>) o, hfs);
break;
case ARRAY:
result = transformToBag((List<? extends Object>) o, hfs);
break;
case MAP:
result = transformToPigMap((Map<Object, Object>) o, hfs);
break;
default:
result = o;
break;
}
return result;
}
private static Tuple transformToTuple(List<? extends Object> objList, HCatFieldSchema hfs) throws Exception {
try {
return transformToTuple(objList, hfs.getStructSubSchema());
} catch (Exception e) {
if (hfs.getType() != Type.STRUCT) {
throw new Exception("Expected Struct type, got " + hfs.getType(), e);
} else {
throw e;
}
}
}
private static Tuple transformToTuple(List<? extends Object> objList, HCatSchema hs) throws Exception {
if (objList == null) {
return null;
}
Tuple t = tupFac.newTuple(objList.size());
List<HCatFieldSchema> subFields = hs.getFields();
for (int i = 0; i < subFields.size(); i++) {
t.set(i, extractPigObject(objList.get(i), subFields.get(i)));
}
return t;
}
private static Map<String, Object> transformToPigMap(Map<Object, Object> map, HCatFieldSchema hfs) throws Exception {
if (map == null) {
return null;
}
Map<String, Object> result = new HashMap<String, Object>();
for (Entry<Object, Object> entry : map.entrySet()) {
// since map key for Pig has to be Strings
result.put(entry.getKey().toString(), extractPigObject(entry.getValue(), hfs.getMapValueSchema().get(0)));
}
return result;
}
@SuppressWarnings("unchecked")
private static DataBag transformToBag(List<? extends Object> list, HCatFieldSchema hfs) throws Exception {
if (list == null) {
return null;
}
HCatFieldSchema elementSubFieldSchema = hfs.getArrayElementSchema().getFields().get(0);
DataBag db = new DefaultDataBag();
for (Object o : list) {
Tuple tuple;
if (elementSubFieldSchema.getType() == Type.STRUCT) {
tuple = transformToTuple((List<Object>) o, elementSubFieldSchema);
} else {
// bags always contain tuples
tuple = tupFac.newTuple(extractPigObject(o, elementSubFieldSchema));
}
db.add(tuple);
}
return db;
}
private static void validateHCatSchemaFollowsPigRules(HCatSchema tblSchema) throws PigException {
for (HCatFieldSchema hcatField : tblSchema.getFields()) {
validateHcatFieldFollowsPigRules(hcatField);
}
}
private static void validateHcatFieldFollowsPigRules(HCatFieldSchema hcatField) throws PigException {
try {
Type hType = hcatField.getType();
switch (hType) {
case BOOLEAN:
if (!pigHasBooleanSupport) {
throw new PigException("Incompatible type found in HCat table schema: "
+ hcatField, PigHCatUtil.PIG_EXCEPTION_CODE);
}
break;
case ARRAY:
validateHCatSchemaFollowsPigRules(hcatField.getArrayElementSchema());
break;
case STRUCT:
validateHCatSchemaFollowsPigRules(hcatField.getStructSubSchema());
break;
case MAP:
// key is only string
if (hcatField.getMapKeyType() != Type.STRING) {
LOG.info("Converting non-String key of map " + hcatField.getName() + " from "
+ hcatField.getMapKeyType() + " to String.");
}
validateHCatSchemaFollowsPigRules(hcatField.getMapValueSchema());
break;
}
} catch (HCatException e) {
throw new PigException("Incompatible type found in hcat table schema: " + hcatField, PigHCatUtil.PIG_EXCEPTION_CODE, e);
}
}
public static void validateHCatTableSchemaFollowsPigRules(HCatSchema hcatTableSchema) throws IOException {
validateHCatSchemaFollowsPigRules(hcatTableSchema);
}
public static void getConfigFromUDFProperties(Properties p, Configuration config, String propName) {
if (p.getProperty(propName) != null) {
config.set(propName, p.getProperty(propName));
}
}
public static void saveConfigIntoUDFProperties(Properties p, Configuration config, String propName) {
if (config.get(propName) != null) {
p.setProperty(propName, config.get(propName));
}
}
}
| apache-2.0 |
streamlio/heron | heron/instance/src/java/com/twitter/heron/instance/OutgoingTupleCollection.java | 7843 | // Copyright 2016 Twitter. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.twitter.heron.instance;
import java.io.Serializable;
import com.google.protobuf.ByteString;
import com.google.protobuf.Message;
import com.twitter.heron.api.generated.TopologyAPI;
import com.twitter.heron.api.serializer.IPluggableSerializer;
import com.twitter.heron.api.state.State;
import com.twitter.heron.common.basics.ByteAmount;
import com.twitter.heron.common.basics.Communicator;
import com.twitter.heron.common.basics.SingletonRegistry;
import com.twitter.heron.common.config.SystemConfig;
import com.twitter.heron.common.utils.misc.PhysicalPlanHelper;
import com.twitter.heron.common.utils.misc.SerializeDeSerializeHelper;
import com.twitter.heron.proto.ckptmgr.CheckpointManager;
import com.twitter.heron.proto.system.HeronTuples;
/**
* Implements OutgoingTupleCollection will be able to handle some basic methods for send out tuples
* 1. initNewControlTuple or initNewDataTuple
* 2. addDataTuple, addAckTuple and addFailTuple
* 3. flushRemaining tuples and sent out the tuples
* <p>
* In fact, when talking about to send out tuples, we mean we push them to the out queues.
*/
public class OutgoingTupleCollection {
protected PhysicalPlanHelper helper;
// We have just one outQueue responsible for both control tuples and data tuples
private final Communicator<Message> outQueue;
// Maximum data tuple size in bytes we can put in one HeronTupleSet
private final ByteAmount maxDataTupleSize;
private final int dataTupleSetCapacity;
private final int controlTupleSetCapacity;
private final IPluggableSerializer serializer;
private HeronTuples.HeronDataTupleSet.Builder currentDataTuple;
private HeronTuples.HeronControlTupleSet.Builder currentControlTuple;
// Total data emitted in bytes for the entire life
private long totalDataEmittedInBytes;
// Current size in bytes for data types to pack into the HeronTupleSet
private long currentDataTupleSizeInBytes;
public OutgoingTupleCollection(
PhysicalPlanHelper helper,
Communicator<Message> outQueue) {
this.outQueue = outQueue;
this.helper = helper;
SystemConfig systemConfig =
(SystemConfig) SingletonRegistry.INSTANCE.getSingleton(SystemConfig.HERON_SYSTEM_CONFIG);
this.serializer =
SerializeDeSerializeHelper.getSerializer(helper.getTopologyContext().getTopologyConfig());
// Initialize the values in constructor
this.totalDataEmittedInBytes = 0;
this.currentDataTupleSizeInBytes = 0;
// Read the config values
this.dataTupleSetCapacity = systemConfig.getInstanceSetDataTupleCapacity();
this.maxDataTupleSize = systemConfig.getInstanceSetDataTupleSize();
this.controlTupleSetCapacity = systemConfig.getInstanceSetControlTupleCapacity();
}
public void sendOutTuples() {
flushRemaining();
}
/**
* Send out the instance's state with corresponding checkpointId
* @param state instance's state
* @param checkpointId the checkpointId
*/
public void sendOutState(State<Serializable, Serializable> state,
String checkpointId) {
// flush all the current data before sending the state
flushRemaining();
// Serialize the state
byte[] serializedState = serializer.serialize(state);
// Construct the instance state checkpoint
CheckpointManager.InstanceStateCheckpoint instanceState =
CheckpointManager.InstanceStateCheckpoint.newBuilder()
.setCheckpointId(checkpointId)
.setState(ByteString.copyFrom(serializedState))
.build();
CheckpointManager.StoreInstanceStateCheckpoint storeRequest =
CheckpointManager.StoreInstanceStateCheckpoint.newBuilder()
.setState(instanceState)
.build();
// Put the checkpoint to out stream queue
outQueue.offer(storeRequest);
}
public void addDataTuple(
String streamId,
HeronTuples.HeronDataTuple.Builder newTuple,
long tupleSizeInBytes) {
if (currentDataTuple == null
|| !currentDataTuple.getStream().getId().equals(streamId)
|| currentDataTuple.getTuplesCount() >= dataTupleSetCapacity
|| currentDataTupleSizeInBytes >= maxDataTupleSize.asBytes()) {
initNewDataTuple(streamId);
}
currentDataTuple.addTuples(newTuple);
currentDataTupleSizeInBytes += tupleSizeInBytes;
totalDataEmittedInBytes += tupleSizeInBytes;
}
public void addAckTuple(HeronTuples.AckTuple.Builder newTuple, long tupleSizeInBytes) {
if (currentControlTuple == null
|| currentControlTuple.getFailsCount() > 0
|| currentControlTuple.getAcksCount() >= controlTupleSetCapacity) {
initNewControlTuple();
}
currentControlTuple.addAcks(newTuple);
// Add the size of data in bytes ready to send out
totalDataEmittedInBytes += tupleSizeInBytes;
}
public void addFailTuple(HeronTuples.AckTuple.Builder newTuple, long tupleSizeInBytes) {
if (currentControlTuple == null
|| currentControlTuple.getAcksCount() > 0
|| currentControlTuple.getFailsCount() >= controlTupleSetCapacity) {
initNewControlTuple();
}
currentControlTuple.addFails(newTuple);
// Add the size of data in bytes ready to send out
totalDataEmittedInBytes += tupleSizeInBytes;
}
private void initNewDataTuple(String streamId) {
flushRemaining();
// Reset the set for data tuple
currentDataTupleSizeInBytes = 0;
TopologyAPI.StreamId.Builder sbldr = TopologyAPI.StreamId.newBuilder();
sbldr.setId(streamId);
sbldr.setComponentName(helper.getMyComponent());
currentDataTuple = HeronTuples.HeronDataTupleSet.newBuilder();
currentDataTuple.setStream(sbldr);
}
private void initNewControlTuple() {
flushRemaining();
currentControlTuple = HeronTuples.HeronControlTupleSet.newBuilder();
}
private void flushRemaining() {
if (currentDataTuple != null) {
HeronTuples.HeronTupleSet.Builder bldr = HeronTuples.HeronTupleSet.newBuilder();
bldr.setSrcTaskId(helper.getMyTaskId());
bldr.setData(currentDataTuple);
pushTupleToQueue(bldr, outQueue);
currentDataTuple = null;
}
if (currentControlTuple != null) {
HeronTuples.HeronTupleSet.Builder bldr = HeronTuples.HeronTupleSet.newBuilder();
bldr.setSrcTaskId(helper.getMyTaskId());
bldr.setControl(currentControlTuple);
pushTupleToQueue(bldr, outQueue);
currentControlTuple = null;
}
}
private void pushTupleToQueue(HeronTuples.HeronTupleSet.Builder bldr,
Communicator<Message> out) {
// The Communicator has un-bounded capacity so the offer will always be successful
out.offer(bldr.build());
}
// Return true we could offer item to outQueue
public boolean isOutQueuesAvailable() {
return outQueue.size() < outQueue.getExpectedAvailableCapacity();
}
public long getTotalDataEmittedInBytes() {
return totalDataEmittedInBytes;
}
// Clean the internal state of OutgoingTupleCollection
public void clear() {
currentControlTuple = null;
currentDataTuple = null;
outQueue.clear();
}
public void updatePhysicalPlanHelper(PhysicalPlanHelper physicalPlanHelper) {
this.helper = physicalPlanHelper;
}
}
| apache-2.0 |
ServiceComb/java-chassis | foundations/foundation-protobuf/src/test/java/org/apache/servicecomb/foundation/protobuf/performance/cases/Mixed.java | 6639 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.foundation.protobuf.performance.cases;
import java.util.Arrays;
import java.util.LinkedHashMap;
import org.apache.servicecomb.foundation.protobuf.internal.model.ProtobufRoot;
import org.apache.servicecomb.foundation.protobuf.internal.model.ProtobufRoot.User;
import org.apache.servicecomb.foundation.protobuf.performance.TestBase;
import org.apache.servicecomb.foundation.test.scaffolding.model.Color;
public class Mixed extends TestBase {
public Mixed() {
pojoRoot.setInt32(10000);
pojoRoot.setInt64(20000L);
pojoRoot.setUint32(30000);
pojoRoot.setUint64(40000L);
pojoRoot.setSint32(50000);
pojoRoot.setSint64(60000L);
pojoRoot.setFixed32(70000);
pojoRoot.setFixed64(80000L);
pojoRoot.setSfixed32(90000);
pojoRoot.setSfixed64(100000L);
pojoRoot.setFloatValue((float) 10000);
pojoRoot.setDoubleValue(20000.0);
pojoRoot.setBool(true);
pojoRoot.setObjInt32(10000);
pojoRoot.setObjInt64(20000L);
pojoRoot.setObjUint32(30000);
pojoRoot.setObjUint64(40000L);
pojoRoot.setObjSint32(50000);
pojoRoot.setObjSint64(60000L);
pojoRoot.setObjFixed32(70000);
pojoRoot.setObjFixed64(80000L);
pojoRoot.setObjSfixed32(90000);
pojoRoot.setObjSfixed64(100000L);
pojoRoot.setObjFloatValue((float) 10000);
pojoRoot.setObjDoubleValue(20000.0);
pojoRoot.setObjBool(true);
pojoRoot.setString("string value");
pojoRoot.setColor(Color.BLUE);
pojoRoot.setUser(new org.apache.servicecomb.foundation.protobuf.internal.model.User("name1"));
pojoRoot.setSsMap(new LinkedHashMap<>());
pojoRoot.getSsMap().put("k1", "v1");
pojoRoot.getSsMap().put("k2", "v2");
pojoRoot.setSpMap(new LinkedHashMap<>());
pojoRoot.getSpMap().put("u1", new org.apache.servicecomb.foundation.protobuf.internal.model.User().name("name1"));
pojoRoot.getSpMap().put("u2", new org.apache.servicecomb.foundation.protobuf.internal.model.User().name("name2"));
pojoRoot.setInt32sPacked(Arrays.asList(10000, 20000, 30000));
pojoRoot.setInt64sPacked(Arrays.asList(10000L, 20000L, 30000L));
pojoRoot.setUint32sPacked(Arrays.asList(10000, 20000, 30000));
pojoRoot.setUint64sPacked(Arrays.asList(10000L, 20000L, 30000L));
pojoRoot.setSint32sPacked(Arrays.asList(10000, 20000, 30000));
pojoRoot.setSint64sPacked(Arrays.asList(10000L, 20000L, 30000L));
pojoRoot.setFixed32sPacked(Arrays.asList(10000, 20000, 30000));
pojoRoot.setFixed64sPacked(Arrays.asList(10000L, 20000L, 30000L));
pojoRoot.setSfixed32sPacked(Arrays.asList(10000, 20000, 30000));
pojoRoot.setSfixed64sPacked(Arrays.asList(10000L, 20000L, 30000L));
pojoRoot.setFloatsPacked(Arrays.asList((float) 10000, (float) 20000, (float) 30000));
pojoRoot.setDoublesPacked(Arrays.asList(10000.0, 20000.0, 30000.0));
pojoRoot.setBoolsPacked(Arrays.asList(true, false));
pojoRoot.setColorsPacked(Arrays.asList(Color.RED, Color.BLUE));
pojoRoot.setStrings(Arrays.asList("string value1", "string value2"));
pojoRoot.setUsers(Arrays.asList(
new org.apache.servicecomb.foundation.protobuf.internal.model.User().name("name1"),
new org.apache.servicecomb.foundation.protobuf.internal.model.User().name("name2"),
new org.apache.servicecomb.foundation.protobuf.internal.model.User().name("name3"),
new org.apache.servicecomb.foundation.protobuf.internal.model.User().name("name4")));
builder.setInt32(10000)
.setInt64(20000L)
.setUint32(30000)
.setUint64(40000L)
.setSint32(50000)
.setSint64(60000L)
.setFixed32(70000)
.setFixed64(80000L)
.setSfixed32(90000)
.setSfixed64(100000L)
.setFloatValue((float) 10000)
.setDoubleValue(20000.0)
.setBool(true)
.setObjInt32(10000)
.setObjInt64(20000L)
.setObjUint32(30000)
.setObjUint64(40000L)
.setObjSint32(50000)
.setObjSint64(60000L)
.setObjFixed32(70000)
.setObjFixed64(80000L)
.setObjSfixed32(90000)
.setObjSfixed64(100000L)
.setObjFloatValue((float) 10000)
.setObjDoubleValue(20000.0)
.setObjBool(true)
.setString("string value")
.setColorValue(2)
.setUser(User.newBuilder().setName("name1").build())
.putSsMap("k1", "v1")
.putSsMap("k2", "v2")
.putSpMap("u1", User.newBuilder().setName("name1").build())
.putSpMap("u2", User.newBuilder().setName("name2").build())
.addAllInt32SPacked(Arrays.asList(10000, 20000, 30000))
.addAllInt64SPacked(Arrays.asList(10000L, 20000L, 30000L))
.addAllUint32SPacked(Arrays.asList(10000, 20000, 30000))
.addAllUint64SPacked(Arrays.asList(10000L, 20000L, 30000L))
.addAllSint32SPacked(Arrays.asList(10000, 20000, 30000))
.addAllSint64SPacked(Arrays.asList(10000L, 20000L, 30000L))
.addAllFixed32SPacked(Arrays.asList(10000, 20000, 30000))
.addAllFixed64SPacked(Arrays.asList(10000L, 20000L, 30000L))
.addAllSfixed32SPacked(Arrays.asList(10000, 20000, 30000))
.addAllSfixed64SPacked(Arrays.asList(10000L, 20000L, 30000L))
.addAllFloatsPacked(Arrays.asList((float) 10000, (float) 20000, (float) 30000))
.addAllDoublesPacked(Arrays.asList(10000.0, 20000.0, 30000.0))
.addAllBoolsPacked(Arrays.asList(true, false))
.addAllColorsPacked(Arrays.asList(ProtobufRoot.Color.RED, ProtobufRoot.Color.BLUE))
.addStrings("string value1")
.addStrings("string value2")
.addUsers(User.newBuilder().setName("name1").build())
.addUsers(User.newBuilder().setName("name2").build())
.addUsers(User.newBuilder().setName("name3").build())
.addUsers(User.newBuilder().setName("name4").build());
}
}
| apache-2.0 |
mayonghui2112/helloWorld | sourceCode/testMaven/onjava8/src/main/java/strings/Hex.java | 1391 | // strings/Hex.java
// (c)2017 MindView LLC: see Copyright.txt
// We make no guarantees that this code is fit for any purpose.
// Visit http://OnJava8.com for more book information.
// {java onjava.Hex}
package strings;
import java.io.*;
import java.nio.file.*;
public class Hex {
public static String format(byte[] data) {
StringBuilder result = new StringBuilder();
int n = 0;
for(byte b : data) {
if(n % 16 == 0)
result.append(String.format("%05X: ", n));
result.append(String.format("%02X ", b));
n++;
if(n % 16 == 0) result.append("\n");
}
result.append("\n");
return result.toString();
}
public static void
main(String[] args) throws Exception {
if(args.length == 0)
// Test by displaying this class file:
System.out.println(format(
Files.readAllBytes(Paths.get(
"build/classes/main/onjava/Hex.class"))));
else
System.out.println(format(
Files.readAllBytes(Paths.get(args[0]))));
}
}
/* Output: (First 6 Lines)
00000: CA FE BA BE 00 00 00 34 00 61 0A 00 05 00 31 07
00010: 00 32 0A 00 02 00 31 08 00 33 07 00 34 0A 00 35
00020: 00 36 0A 00 0F 00 37 0A 00 02 00 38 08 00 39 0A
00030: 00 3A 00 3B 08 00 3C 0A 00 02 00 3D 09 00 3E 00
00040: 3F 08 00 40 07 00 41 0A 00 42 00 43 0A 00 44 00
00050: 45 0A 00 14 00 46 0A 00 47 00 48 07 00 49 01 00
...
*/
| apache-2.0 |
ullgren/camel | core/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AtomixMessagingComponentBuilderFactory.java | 15769 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.builder.component.dsl;
import javax.annotation.Generated;
import org.apache.camel.Component;
import org.apache.camel.builder.component.AbstractComponentBuilder;
import org.apache.camel.builder.component.ComponentBuilder;
import org.apache.camel.component.atomix.client.messaging.AtomixMessagingComponent;
/**
* The atomix-messaging component is used to access Atomix's group messaging.
*
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Generated("org.apache.camel.maven.packaging.ComponentDslMojo")
public interface AtomixMessagingComponentBuilderFactory {
/**
* Atomix Messaging (camel-atomix)
* The atomix-messaging component is used to access Atomix's group
* messaging.
*
* Category: clustering
* Since: 2.20
* Maven coordinates: org.apache.camel:camel-atomix
*/
static AtomixMessagingComponentBuilder atomixMessaging() {
return new AtomixMessagingComponentBuilderImpl();
}
/**
* Builder for the Atomix Messaging component.
*/
interface AtomixMessagingComponentBuilder
extends
ComponentBuilder<AtomixMessagingComponent> {
/**
* The Atomix instance to use.
*
* The option is a: <code>io.atomix.Atomix</code> type.
*
* Group: common
*/
default AtomixMessagingComponentBuilder atomix(io.atomix.Atomix atomix) {
doSetProperty("atomix", atomix);
return this;
}
/**
* The broadcast type.
*
* The option is a:
* <code>org.apache.camel.component.atomix.client.messaging.AtomixMessaging.BroadcastType</code> type.
*
* Default: ALL
* Group: common
*/
default AtomixMessagingComponentBuilder broadcastType(
org.apache.camel.component.atomix.client.messaging.AtomixMessaging.BroadcastType broadcastType) {
doSetProperty("broadcastType", broadcastType);
return this;
}
/**
* The messaging channel name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default AtomixMessagingComponentBuilder channelName(
java.lang.String channelName) {
doSetProperty("channelName", channelName);
return this;
}
/**
* The shared component configuration.
*
* The option is a:
* <code>org.apache.camel.component.atomix.client.messaging.AtomixMessagingConfiguration</code> type.
*
* Group: common
*/
default AtomixMessagingComponentBuilder configuration(
org.apache.camel.component.atomix.client.messaging.AtomixMessagingConfiguration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* The path to the AtomixClient configuration.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default AtomixMessagingComponentBuilder configurationUri(
java.lang.String configurationUri) {
doSetProperty("configurationUri", configurationUri);
return this;
}
/**
* The default action.
*
* The option is a:
* <code>org.apache.camel.component.atomix.client.messaging.AtomixMessaging.Action</code> type.
*
* Default: DIRECT
* Group: common
*/
default AtomixMessagingComponentBuilder defaultAction(
org.apache.camel.component.atomix.client.messaging.AtomixMessaging.Action defaultAction) {
doSetProperty("defaultAction", defaultAction);
return this;
}
/**
* The Atomix Group member name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default AtomixMessagingComponentBuilder memberName(
java.lang.String memberName) {
doSetProperty("memberName", memberName);
return this;
}
/**
* The nodes the AtomixClient should connect to.
*
* The option is a:
* <code>java.util.List<io.atomix.catalyst.transport.Address></code>
* type.
*
* Group: common
*/
default AtomixMessagingComponentBuilder nodes(
java.util.List<io.atomix.catalyst.transport.Address> nodes) {
doSetProperty("nodes", nodes);
return this;
}
/**
* The header that wil carry the result.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default AtomixMessagingComponentBuilder resultHeader(
java.lang.String resultHeader) {
doSetProperty("resultHeader", resultHeader);
return this;
}
/**
* The class name (fqn) of the Atomix transport.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: io.atomix.catalyst.transport.netty.NettyTransport
* Group: common
*/
default AtomixMessagingComponentBuilder transportClassName(
java.lang.String transportClassName) {
doSetProperty("transportClassName", transportClassName);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*/
default AtomixMessagingComponentBuilder bridgeErrorHandler(
boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*/
default AtomixMessagingComponentBuilder lazyStartProducer(
boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the component should use basic property binding (Camel 2.x)
* or the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*/
default AtomixMessagingComponentBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
doSetProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* The cluster wide default resource configuration.
*
* The option is a: <code>java.util.Properties</code> type.
*
* Group: advanced
*/
default AtomixMessagingComponentBuilder defaultResourceConfig(
java.util.Properties defaultResourceConfig) {
doSetProperty("defaultResourceConfig", defaultResourceConfig);
return this;
}
/**
* The local default resource options.
*
* The option is a: <code>java.util.Properties</code> type.
*
* Group: advanced
*/
default AtomixMessagingComponentBuilder defaultResourceOptions(
java.util.Properties defaultResourceOptions) {
doSetProperty("defaultResourceOptions", defaultResourceOptions);
return this;
}
/**
* Sets if the local member should join groups as PersistentMember or
* not. If set to ephemeral the local member will receive an auto
* generated ID thus the local one is ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*/
default AtomixMessagingComponentBuilder ephemeral(boolean ephemeral) {
doSetProperty("ephemeral", ephemeral);
return this;
}
/**
* The read consistency level.
*
* The option is a: <code>io.atomix.resource.ReadConsistency</code>
* type.
*
* Group: advanced
*/
default AtomixMessagingComponentBuilder readConsistency(
io.atomix.resource.ReadConsistency readConsistency) {
doSetProperty("readConsistency", readConsistency);
return this;
}
/**
* Cluster wide resources configuration.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.util.Properties></code> type.
*
* Group: advanced
*/
default AtomixMessagingComponentBuilder resourceConfigs(
java.util.Map<java.lang.String, java.util.Properties> resourceConfigs) {
doSetProperty("resourceConfigs", resourceConfigs);
return this;
}
/**
* Local resources configurations.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.util.Properties></code> type.
*
* Group: advanced
*/
default AtomixMessagingComponentBuilder resourceOptions(
java.util.Map<java.lang.String, java.util.Properties> resourceOptions) {
doSetProperty("resourceOptions", resourceOptions);
return this;
}
}
class AtomixMessagingComponentBuilderImpl
extends
AbstractComponentBuilder<AtomixMessagingComponent>
implements
AtomixMessagingComponentBuilder {
@Override
protected AtomixMessagingComponent buildConcreteComponent() {
return new AtomixMessagingComponent();
}
private org.apache.camel.component.atomix.client.messaging.AtomixMessagingConfiguration getOrCreateConfiguration(
org.apache.camel.component.atomix.client.messaging.AtomixMessagingComponent component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.atomix.client.messaging.AtomixMessagingConfiguration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "atomix": getOrCreateConfiguration((AtomixMessagingComponent) component).setAtomix((io.atomix.Atomix) value); return true;
case "broadcastType": getOrCreateConfiguration((AtomixMessagingComponent) component).setBroadcastType((org.apache.camel.component.atomix.client.messaging.AtomixMessaging.BroadcastType) value); return true;
case "channelName": getOrCreateConfiguration((AtomixMessagingComponent) component).setChannelName((java.lang.String) value); return true;
case "configuration": ((AtomixMessagingComponent) component).setConfiguration((org.apache.camel.component.atomix.client.messaging.AtomixMessagingConfiguration) value); return true;
case "configurationUri": ((AtomixMessagingComponent) component).setConfigurationUri((java.lang.String) value); return true;
case "defaultAction": getOrCreateConfiguration((AtomixMessagingComponent) component).setDefaultAction((org.apache.camel.component.atomix.client.messaging.AtomixMessaging.Action) value); return true;
case "memberName": getOrCreateConfiguration((AtomixMessagingComponent) component).setMemberName((java.lang.String) value); return true;
case "nodes": ((AtomixMessagingComponent) component).setNodes((java.util.List) value); return true;
case "resultHeader": getOrCreateConfiguration((AtomixMessagingComponent) component).setResultHeader((java.lang.String) value); return true;
case "transportClassName": getOrCreateConfiguration((AtomixMessagingComponent) component).setTransportClassName((java.lang.String) value); return true;
case "bridgeErrorHandler": ((AtomixMessagingComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "lazyStartProducer": ((AtomixMessagingComponent) component).setLazyStartProducer((boolean) value); return true;
case "basicPropertyBinding": ((AtomixMessagingComponent) component).setBasicPropertyBinding((boolean) value); return true;
case "defaultResourceConfig": getOrCreateConfiguration((AtomixMessagingComponent) component).setDefaultResourceConfig((java.util.Properties) value); return true;
case "defaultResourceOptions": getOrCreateConfiguration((AtomixMessagingComponent) component).setDefaultResourceOptions((java.util.Properties) value); return true;
case "ephemeral": getOrCreateConfiguration((AtomixMessagingComponent) component).setEphemeral((boolean) value); return true;
case "readConsistency": getOrCreateConfiguration((AtomixMessagingComponent) component).setReadConsistency((io.atomix.resource.ReadConsistency) value); return true;
case "resourceConfigs": getOrCreateConfiguration((AtomixMessagingComponent) component).setResourceConfigs((java.util.Map) value); return true;
case "resourceOptions": getOrCreateConfiguration((AtomixMessagingComponent) component).setResourceOptions((java.util.Map) value); return true;
default: return false;
}
}
}
} | apache-2.0 |
davidvgalbraith/elasticsearch | core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java | 3304 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gateway;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.settings.Settings;
import java.util.Comparator;
/**
* A comparator that compares ShardRouting based on it's indexes priority (index.priority),
* it's creation date (index.creation_date), or eventually by it's index name in reverse order.
* We try to recover first shards from an index with the highest priority, if that's the same
* we try to compare the timestamp the index is created and pick the newer first (time-based indices,
* here the newer indices matter more). If even that is the same, we compare the index name which is useful
* if the date is baked into the index name. ie logstash-2015.05.03.
*/
public abstract class PriorityComparator implements Comparator<ShardRouting> {
@Override
public final int compare(ShardRouting o1, ShardRouting o2) {
final String o1Index = o1.index();
final String o2Index = o2.index();
int cmp = 0;
if (o1Index.equals(o2Index) == false) {
final Settings settingsO1 = getIndexSettings(o1Index);
final Settings settingsO2 = getIndexSettings(o2Index);
cmp = Long.compare(priority(settingsO2), priority(settingsO1));
if (cmp == 0) {
cmp = Long.compare(timeCreated(settingsO2), timeCreated(settingsO1));
if (cmp == 0) {
cmp = o2Index.compareTo(o1Index);
}
}
}
return cmp;
}
private int priority(Settings settings) {
return IndexMetaData.INDEX_PRIORITY_SETTING.get(settings);
}
private long timeCreated(Settings settings) {
return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1l);
}
protected abstract Settings getIndexSettings(String index);
/**
* Returns a PriorityComparator that uses the RoutingAllocation index metadata to access the index setting per index.
*/
public static PriorityComparator getAllocationComparator(final RoutingAllocation allocation) {
return new PriorityComparator() {
@Override
protected Settings getIndexSettings(String index) {
IndexMetaData indexMetaData = allocation.metaData().index(index);
return indexMetaData.getSettings();
}
};
}
}
| apache-2.0 |
carpedm20/pinpoint | profiler/src/test/java/com/navercorp/pinpoint/profiler/modifier/db/interceptor/BindValueUtilsTest.java | 3927 | /*
* Copyright 2014 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.profiler.modifier.db.interceptor;
import org.junit.Assert;
import org.junit.Test;
import com.navercorp.pinpoint.profiler.modifier.db.interceptor.BindValueUtils;
public class BindValueUtilsTest {
@Test
public void testBindValueToString() throws Exception {
String[] bindValue = {"a", "b"};
String result = BindValueUtils.bindValueToString(bindValue);
Assert.assertEquals("a, b", result);
}
@Test
public void testBindValueToString_limit1() throws Exception {
String[] bindValue = {"a", "b"};
String result = BindValueUtils.bindValueToString(bindValue, 0);
Assert.assertEquals("...(2)", result);
}
@Test
public void testBindValueToString_limit2() throws Exception {
String[] bindValue = {"a", "b"};
String result = BindValueUtils.bindValueToString(bindValue, 1);
Assert.assertEquals("a, ...(2)", result);
}
@Test
public void testBindValueToString_limit3() throws Exception {
String[] bindValue = {"abc", "b"};
String result = BindValueUtils.bindValueToString(bindValue, 1);
Assert.assertEquals("a...(3), ...(2)", result);
}
@Test
public void testBindValueToString_limit4() throws Exception {
String[] bindValue = {"abc", "b", "c"};
String result = BindValueUtils.bindValueToString(bindValue, 1);
Assert.assertEquals("a...(3), ...(3)", result);
}
@Test
public void testBindValueToString_limit5() throws Exception {
String[] bindValue = {"abc", "b", "c"};
String result = BindValueUtils.bindValueToString(bindValue, 1024);
Assert.assertEquals("abc, b, c", result);
}
@Test
public void testBindValueToString_limit6() throws Exception {
String[] bindValue = {"a", "b", "1234567891012"};
// limit is smaller than 3rd arg.
String result = BindValueUtils.bindValueToString(bindValue, 10);
Assert.assertEquals("a, b, 1234567891...(13)", result);
}
@Test
public void testBindValueToString_limit7() throws Exception {
String[] bindValue = {"a", "12345678901", "c"};
// limit is smaller than 2nd arg.
String result = BindValueUtils.bindValueToString(bindValue, 10);
Assert.assertEquals("a, 1234567890...(11), ...(3)", result);
}
@Test
public void testBindValueToString_null() throws Exception {
String result = BindValueUtils.bindValueToString(null, 10);
Assert.assertEquals("", result);
}
@Test
public void testBindValueToString_native() throws Exception {
String[] bindValue = {"a", "b"};
String result = BindValueUtils.bindValueToString(bindValue, -1);
Assert.assertEquals("...(2)", result);
}
@Test
public void testBindValueToString_singleLargeString() throws Exception {
String[] bindValue = {"123456"};
String result = BindValueUtils.bindValueToString(bindValue, 5);
Assert.assertEquals("12345...(6)", result);
}
@Test
public void testBindValueToString_twoLargeString() throws Exception {
String[] bindValue = {"123456", "123456"};
String result = BindValueUtils.bindValueToString(bindValue, 5);
Assert.assertEquals("12345...(6), ...(2)", result);
}
} | apache-2.0 |
denvey/discuz_Android | libs/ZUtilsExtWidget/src/com/kit/widget/calendar/DayStyle.java | 2203 | package com.kit.widget.calendar;
import java.util.*;
public class DayStyle {
// methods
// fields
private final static String[] vecStrWeekDayNames = getWeekDayNames();
// 表头显示数据
private static String[] getWeekDayNames() {
String[] vec = new String[10];
vec[Calendar.SUNDAY] = "周日";
vec[Calendar.MONDAY] = "周一";
vec[Calendar.TUESDAY] = "周二";
vec[Calendar.WEDNESDAY] = "周三";
vec[Calendar.THURSDAY] = "周四";
vec[Calendar.FRIDAY] = "周五";
vec[Calendar.SATURDAY] = "周六";
return vec;
}
// 表头Name
public static String getWeekDayName(int iDay) {
return vecStrWeekDayNames[iDay];
}
// methods
public static int getColorFrameHeader(boolean bHoliday) {
// 标题栏颜色
if (bHoliday)
return 0x80FF8C00;
return 0x80FF8C00;
}
public static int getColorTextHeader(boolean bHoliday) {
// 标题栏字体
if (bHoliday)
return 0xffd0d0d0;
return 0xffcccccc;
}
public static int getColorText(boolean bHoliday, boolean bToday,
boolean bMark) {
if (bToday || bMark)
return 0xffffffff;
if (bHoliday)
return 0xfff0f0f0;
return 0xff4682B4;
}
// public static int getColorText(boolean bHoliday, boolean bToday) {
// if (bToday)
// return 0xff002200;
// if (bHoliday)
// return 0xfff0f0f0;
// return 0xffdddddd;
// }
// public static int getColorBkg(boolean bHoliday, boolean bToday) {
// if (bToday)
// return 0xff88bb88;
// if (bHoliday)
// return 0xffaaaaaa;
// return 0xff888888;
// }
public static int getColorBkg(boolean bHoliday, boolean bToday,
boolean bMark) {
if (bToday)
return 0xff88bb88;
if (bMark)
return 0xffff4500;
if (bHoliday)
return 0x99FFF5EE;
return 0x90ffffff;
}
public static int getWeekDay(int index, int iFirstDayOfWeek) {
int iWeekDay = -1;
if (iFirstDayOfWeek == Calendar.MONDAY) {
iWeekDay = index + Calendar.MONDAY;
if (iWeekDay > Calendar.SATURDAY)
iWeekDay = Calendar.SUNDAY;
}
if (iFirstDayOfWeek == Calendar.SUNDAY) {
iWeekDay = index + Calendar.SUNDAY;
}
return iWeekDay;
}
}
| apache-2.0 |
sindicate/solidstack | src/solidstack/script/objects/Assoc.java | 914 | /*--
* Copyright 2012 René M. de Bloois
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package solidstack.script.objects;
public class Assoc
{
private Object label;
private Object value;
public Assoc( Object label, Object value )
{
this.label = label;
this.value = value;
}
public Object getLabel()
{
return this.label;
}
public Object getValue()
{
return this.value;
}
}
| apache-2.0 |
ProxyBuilder/proxybuilder | testusage/src/main/java/org/rapidpm/proxybuilder/proxy/generated/v012/MyClass.java | 1452 | /**
* Copyright © 2013 Sven Ruppert (sven.ruppert@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Copyright © 2013 Sven Ruppert (sven.ruppert@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.rapidpm.proxybuilder.proxy.generated.v012;
import org.rapidpm.proxybuilder.proxy.generated.annotations.StaticMetricsProxy;
@StaticMetricsProxy
public class MyClass {
public final void doWork() {
}
}
| apache-2.0 |
rogozinds/GridTree | gridtree/src/test/java/org/vaadin/gridtree/ContainerTest.java | 2584 | package org.vaadin.gridtree;
import java.util.Arrays;
import java.util.List;
import com.vaadin.data.Item;
import com.vaadin.data.util.HierarchicalContainer;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class ContainerTest {
GridTreeContainer tc=null;
/**
* Create container with such structure:
* -0
* -01
* -010
* -011
* -02
*/
@Before
public void createContainer() {
final HierarchicalContainer container=new HierarchicalContainer();
container.addContainerProperty("id", String.class, "");
final Item item0=container.addItem("0");
item0.getItemProperty("id").setValue("id1");
final Item item01=container.addItem("01");
item01.getItemProperty("id").setValue("id2");
final Item item02=container.addItem("02");
item02.getItemProperty("id").setValue("id3");
final Item item010=container.addItem("010");
item010.getItemProperty("id").setValue("id4");
final Item item011=container.addItem("011");
item011.getItemProperty("id").setValue("id5");
container.setParent("01", "0");
container.setParent("02", "0");
container.setParent("010", "01");
container.setParent("011", "01");
tc= new GridTreeContainer(container);
//By default all items are collapsed
Assert.assertEquals(1, tc.size());
}
@Test
public void testExpand() {
tc.toogleCollapse("0");
Assert.assertEquals(3,tc.size());
}
@Test
public void testToogleCollapse() {
tc.toogleCollapse("0");
Assert.assertEquals(3, tc.size());
tc.toogleCollapse("01");
//All 5 items are shown
Assert.assertEquals(5, tc.size());
tc.toogleCollapse("0");
//collapseing top parent row should have collapse all its children
Assert.assertEquals(1, tc.size());
}
@Test
public void testToogleCollapseTwoTimes() {
tc.toogleCollapse("0");
tc.toogleCollapse("01");
tc.toogleCollapse("0");
tc.toogleCollapse("0");
tc.toogleCollapse("01");
//collapsing top parent row should have collapse all its children
Assert.assertEquals(5, tc.size());
}
//Test toogleCollapse method return changed item ids
@Test
public void testToogleCollapseReturnValue() {
tc.toogleCollapse("0");
tc.toogleCollapse("01");
final List<Object> changedItems=tc.toogleCollapse("0");
final List<Object> expectedItemsChanged=(List<Object>)(List<?>) Arrays.asList("0","01");
Assert.assertEquals("Items changed",expectedItemsChanged,changedItems);
}
@Test
public void testGetLevel() {
Assert.assertEquals(0,tc.getLevel("0"));
Assert.assertEquals(1,tc.getLevel("01"));
Assert.assertEquals(2,tc.getLevel("010"));
}
}
| apache-2.0 |
adessaigne/camel | core/camel-core/src/test/java/org/apache/camel/converter/JaxpTest.java | 4097 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.converter;
import java.io.InputStream;
import java.io.StringReader;
import javax.xml.transform.Source;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamSource;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.apache.camel.TypeConverter;
import org.apache.camel.impl.converter.DefaultTypeConverter;
import org.apache.camel.impl.engine.DefaultClassResolver;
import org.apache.camel.impl.engine.DefaultFactoryFinderResolver;
import org.apache.camel.impl.engine.DefaultPackageScanClassResolver;
import org.apache.camel.support.service.ServiceHelper;
import org.apache.camel.util.ReflectionInjector;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.*;
public class JaxpTest {
private static final Logger LOG = LoggerFactory.getLogger(JaxpTest.class);
protected TypeConverter converter = new DefaultTypeConverter(
new DefaultPackageScanClassResolver(), new ReflectionInjector(),
new DefaultFactoryFinderResolver().resolveDefaultFactoryFinder(new DefaultClassResolver()), false);
@BeforeEach
public void setUp() throws Exception {
ServiceHelper.startService(converter);
}
@Test
public void testConvertToDocument() throws Exception {
Document document
= converter.convertTo(Document.class, "<?xml version=\"1.0\" encoding=\"UTF-8\"?><hello>world!</hello>");
assertNotNull(document);
LOG.debug("Found document: " + document);
// lets now convert back again
String text = converter.convertTo(String.class, document);
// The preamble changes a little under Java 1.6 it adds a
// standalone="no" attribute.
assertTrue(text.endsWith("<hello>world!</hello>"), "Converted to String: " + text);
}
@Test
public void testConvertToSource() throws Exception {
Source source = converter.convertTo(Source.class, "<hello>world!</hello>");
assertNotNull(source);
LOG.debug("Found document: " + source);
}
@Test
public void testStreamSourceToDomSource() throws Exception {
StreamSource streamSource = new StreamSource(new StringReader("<hello>world!</hello>"));
DOMSource domSource = converter.convertTo(DOMSource.class, streamSource);
assertNotNull(domSource, "Could not convert to a DOMSource!");
LOG.debug("Found document: " + domSource);
}
@Test
public void testNodeToSourceThenToInputStream() throws Exception {
Document document = converter.convertTo(Document.class, "<?xml version=\"1.0\"?><hello>world!</hello>");
Element element = document.getDocumentElement();
Source source = converter.convertTo(Source.class, element);
assertNotNull(source, "Could not convert from Node to Source!");
LOG.debug("Found source: " + source);
InputStream in = converter.convertTo(InputStream.class, source);
assertNotNull(in, "Could not convert from Source to InputStream!");
String actualText = IOConverter.toString(in, null);
assertEquals("<hello>world!</hello>", actualText, "Text");
}
}
| apache-2.0 |
jenmalloy/enmasse | service-broker/src/main/java/io/enmasse/osb/api/ServiceType.java | 1395 | /*
* Copyright 2018, EnMasse authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.enmasse.osb.api;
import java.util.Optional;
import java.util.UUID;
public enum ServiceType {
// TODO: These are tied to the 'standard' address space
ANYCAST("ac6348d6-eeea-43e5-9b97-5ed18da5dcaf", "enmasse-anycast", "anycast"),
MULTICAST("7739ea7d-8de4-4fe8-8297-90f703904587", "enmasse-multicast", "multicast"),
QUEUE("7739ea7d-8de4-4fe8-8297-90f703904589", "enmasse-queue", "queue"),
TOPIC("7739ea7d-8de4-4fe8-8297-90f703904590", "enmasse-topic", "topic");
private UUID uuid;
private String serviceName;
private String addressType;
ServiceType(String uuid, String serviceName, String addressType) {
this.uuid = UUID.fromString(uuid);
this.serviceName = serviceName;
this.addressType = addressType;
}
public static Optional<ServiceType> valueOf(UUID uuid) {
for (ServiceType serviceType : values()) {
if (serviceType.uuid().equals(uuid)) {
return Optional.of(serviceType);
}
}
return Optional.empty();
}
public UUID uuid() {
return uuid;
}
public String serviceName() {
return serviceName;
}
public String addressType() {
return addressType;
}
}
| apache-2.0 |
matthewmcnew/jbosh | src/main/java/org/igniterealtime/jbosh/Attributes.java | 2902 | /*
* Copyright 2009 Mike Cumings
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.igniterealtime.jbosh;
import javax.xml.XMLConstants;
/**
* Class containing constants for attribute definitions used by the
* XEP-0124 specification. We shouldn't need to expose these outside
* our package, since nobody else should be needing to worry about
* them.
*/
final class Attributes {
/**
* Private constructor to prevent construction of library class.
*/
private Attributes() {
super();
}
static final BodyQName ACCEPT = BodyQName.createBOSH("accept");
static final BodyQName AUTHID = BodyQName.createBOSH("authid");
static final BodyQName ACK = BodyQName.createBOSH("ack");
static final BodyQName CHARSETS = BodyQName.createBOSH("charsets");
static final BodyQName CONDITION = BodyQName.createBOSH("condition");
static final BodyQName CONTENT = BodyQName.createBOSH("content");
static final BodyQName FROM = BodyQName.createBOSH("from");
static final BodyQName HOLD = BodyQName.createBOSH("hold");
static final BodyQName INACTIVITY = BodyQName.createBOSH("inactivity");
static final BodyQName KEY = BodyQName.createBOSH("key");
static final BodyQName MAXPAUSE = BodyQName.createBOSH("maxpause");
static final BodyQName NEWKEY = BodyQName.createBOSH("newkey");
static final BodyQName PAUSE = BodyQName.createBOSH("pause");
static final BodyQName POLLING = BodyQName.createBOSH("polling");
static final BodyQName REPORT = BodyQName.createBOSH("report");
static final BodyQName REQUESTS = BodyQName.createBOSH("requests");
static final BodyQName RID = BodyQName.createBOSH("rid");
static final BodyQName ROUTE = BodyQName.createBOSH("route");
static final BodyQName SECURE = BodyQName.createBOSH("secure");
static final BodyQName SID = BodyQName.createBOSH("sid");
static final BodyQName STREAM = BodyQName.createBOSH("stream");
static final BodyQName TIME = BodyQName.createBOSH("time");
static final BodyQName TO = BodyQName.createBOSH("to");
static final BodyQName TYPE = BodyQName.createBOSH("type");
static final BodyQName VER = BodyQName.createBOSH("ver");
static final BodyQName WAIT = BodyQName.createBOSH("wait");
static final BodyQName XML_LANG =
BodyQName.createWithPrefix(XMLConstants.XML_NS_URI, "lang", "xml");
}
| apache-2.0 |
rangadi/beam | runners/google-cloud-dataflow-java/worker/src/test/java/org/apache/beam/runners/dataflow/worker/IntrinsicMapTaskExecutorFactoryTest.java | 31729 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.runners.dataflow.worker;
import static org.apache.beam.runners.dataflow.util.Structs.addString;
import static org.apache.beam.runners.dataflow.worker.DataflowOutputCounter.getElementCounterName;
import static org.apache.beam.runners.dataflow.worker.DataflowOutputCounter.getMeanByteCounterName;
import static org.apache.beam.runners.dataflow.worker.DataflowOutputCounter.getObjectCounterName;
import static org.apache.beam.runners.dataflow.worker.counters.CounterName.named;
import static org.apache.beam.sdk.util.SerializableUtils.serializeToByteArray;
import static org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertThat;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
import com.google.api.services.dataflow.model.FlattenInstruction;
import com.google.api.services.dataflow.model.InstructionInput;
import com.google.api.services.dataflow.model.InstructionOutput;
import com.google.api.services.dataflow.model.MapTask;
import com.google.api.services.dataflow.model.MultiOutputInfo;
import com.google.api.services.dataflow.model.ParDoInstruction;
import com.google.api.services.dataflow.model.ParallelInstruction;
import com.google.api.services.dataflow.model.PartialGroupByKeyInstruction;
import com.google.api.services.dataflow.model.ReadInstruction;
import com.google.api.services.dataflow.model.Source;
import com.google.api.services.dataflow.model.WriteInstruction;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.graph.MutableNetwork;
import com.google.common.graph.Network;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Function;
import javax.annotation.Nullable;
import org.apache.beam.runners.dataflow.util.CloudObject;
import org.apache.beam.runners.dataflow.util.CloudObjects;
import org.apache.beam.runners.dataflow.util.PropertyNames;
import org.apache.beam.runners.dataflow.worker.apiary.FixMultiOutputInfosOnParDoInstructions;
import org.apache.beam.runners.dataflow.worker.counters.Counter;
import org.apache.beam.runners.dataflow.worker.counters.Counter.CounterUpdateExtractor;
import org.apache.beam.runners.dataflow.worker.counters.CounterFactory.CounterMean;
import org.apache.beam.runners.dataflow.worker.counters.CounterSet;
import org.apache.beam.runners.dataflow.worker.fn.IdGenerator;
import org.apache.beam.runners.dataflow.worker.graph.Edges.Edge;
import org.apache.beam.runners.dataflow.worker.graph.Edges.MultiOutputInfoEdge;
import org.apache.beam.runners.dataflow.worker.graph.MapTaskToNetworkFunction;
import org.apache.beam.runners.dataflow.worker.graph.Nodes.ExecutionLocation;
import org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode;
import org.apache.beam.runners.dataflow.worker.graph.Nodes.Node;
import org.apache.beam.runners.dataflow.worker.graph.Nodes.OperationNode;
import org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode;
import org.apache.beam.runners.dataflow.worker.util.common.worker.FlattenOperation;
import org.apache.beam.runners.dataflow.worker.util.common.worker.Operation;
import org.apache.beam.runners.dataflow.worker.util.common.worker.ParDoOperation;
import org.apache.beam.runners.dataflow.worker.util.common.worker.ReadOperation;
import org.apache.beam.runners.dataflow.worker.util.common.worker.Sink;
import org.apache.beam.runners.dataflow.worker.util.common.worker.WriteOperation;
import org.apache.beam.sdk.coders.BigEndianIntegerCoder;
import org.apache.beam.sdk.coders.Coder;
import org.apache.beam.sdk.coders.CoderRegistry;
import org.apache.beam.sdk.coders.IterableCoder;
import org.apache.beam.sdk.coders.KvCoder;
import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.Sum;
import org.apache.beam.sdk.transforms.windowing.IntervalWindow.IntervalWindowCoder;
import org.apache.beam.sdk.util.AppliedCombineFn;
import org.apache.beam.sdk.util.DoFnInfo;
import org.apache.beam.sdk.util.SerializableUtils;
import org.apache.beam.sdk.util.StringUtils;
import org.apache.beam.sdk.util.Transport;
import org.apache.beam.sdk.util.WindowedValue;
import org.apache.beam.sdk.util.WindowedValue.FullWindowedValueCoder;
import org.apache.beam.sdk.values.TupleTag;
import org.apache.beam.sdk.values.WindowingStrategy;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
/** Tests for {@link IntrinsicMapTaskExecutorFactory}. */
@RunWith(JUnit4.class)
public class IntrinsicMapTaskExecutorFactoryTest {
private static final String STAGE = "test";
private static final Function<MapTask, MutableNetwork<Node, Edge>> mapTaskToNetwork =
new FixMultiOutputInfosOnParDoInstructions(IdGenerator::generate)
.andThen(new MapTaskToNetworkFunction());
private static final CloudObject windowedStringCoder =
CloudObjects.asCloudObject(WindowedValue.getValueOnlyCoder(StringUtf8Coder.of()));
private IntrinsicMapTaskExecutorFactory mapTaskExecutorFactory;
private PipelineOptions options;
private ReaderRegistry readerRegistry;
private SinkRegistry sinkRegistry;
@Mock private Network<Node, Edge> network;
@Mock private CounterUpdateExtractor<?> updateExtractor;
private final CounterSet counterSet = new CounterSet();
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
mapTaskExecutorFactory = IntrinsicMapTaskExecutorFactory.defaultFactory();
options = PipelineOptionsFactory.create();
readerRegistry =
ReaderRegistry.defaultRegistry()
.register(
ReaderFactoryTest.TestReaderFactory.class.getName(),
new ReaderFactoryTest.TestReaderFactory())
.register(
ReaderFactoryTest.SingletonTestReaderFactory.class.getName(),
new ReaderFactoryTest.SingletonTestReaderFactory());
sinkRegistry =
SinkRegistry.defaultRegistry()
.register(TestSinkFactory.class.getName(), new TestSinkFactory());
}
@Test
public void testCreateMapTaskExecutor() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
createReadInstruction("Read"),
createParDoInstruction(0, 0, "DoFn1"),
createParDoInstruction(0, 0, "DoFnWithContext"),
createFlattenInstruction(1, 0, 2, 0, "Flatten"),
createWriteInstruction(3, 0, "Write"));
MapTask mapTask = new MapTask();
mapTask.setStageName(STAGE);
mapTask.setSystemName("systemName");
mapTask.setInstructions(instructions);
mapTask.setFactory(Transport.getJsonFactory());
try (DataflowMapTaskExecutor executor =
mapTaskExecutorFactory.create(
null /* beamFnControlClientHandler */,
null /* beamFnDataService */,
null, /* dataApiServiceDescriptor */
null /* beamFnStateService */,
mapTaskToNetwork.apply(mapTask),
options,
STAGE,
readerRegistry,
sinkRegistry,
BatchModeExecutionContext.forTesting(options, counterSet, "testStage"),
counterSet,
IdGenerator::generate)) {
// Safe covariant cast not expressible without rawtypes.
@SuppressWarnings({"rawtypes", "unchecked"})
List<Object> operations = (List) executor.operations;
assertThat(
operations,
hasItems(
instanceOf(ReadOperation.class),
instanceOf(ParDoOperation.class),
instanceOf(ParDoOperation.class),
instanceOf(FlattenOperation.class),
instanceOf(WriteOperation.class)));
// Verify that the inputs are attached.
ReadOperation readOperation =
Iterables.getOnlyElement(Iterables.filter(operations, ReadOperation.class));
assertEquals(2, readOperation.receivers[0].getReceiverCount());
FlattenOperation flattenOperation =
Iterables.getOnlyElement(Iterables.filter(operations, FlattenOperation.class));
for (ParDoOperation operation : Iterables.filter(operations, ParDoOperation.class)) {
assertSame(flattenOperation, operation.receivers[0].getOnlyReceiver());
}
WriteOperation writeOperation =
Iterables.getOnlyElement(Iterables.filter(operations, WriteOperation.class));
assertSame(writeOperation, flattenOperation.receivers[0].getOnlyReceiver());
}
@SuppressWarnings("unchecked")
Counter<Long, ?> otherMsecCounter =
(Counter<Long, ?>) counterSet.getExistingCounter("test-other-msecs");
// "other" state only got created upon MapTaskExecutor.execute().
assertNull(otherMsecCounter);
counterSet.extractUpdates(false, updateExtractor);
verifyOutputCounters(
updateExtractor,
"read_output_name",
"DoFn1_output",
"DoFnWithContext_output",
"flatten_output_name");
verify(updateExtractor).longSum(eq(named("Read-ByteCount")), anyBoolean(), anyLong());
verify(updateExtractor).longSum(eq(named("Write-ByteCount")), anyBoolean(), anyLong());
verifyNoMoreInteractions(updateExtractor);
}
private static void verifyOutputCounters(
CounterUpdateExtractor<?> updateExtractor, String... outputNames) {
for (String outputName : outputNames) {
verify(updateExtractor)
.longSum(eq(named(getElementCounterName(outputName))), anyBoolean(), anyLong());
verify(updateExtractor)
.longSum(eq(named(getObjectCounterName(outputName))), anyBoolean(), anyLong());
verify(updateExtractor)
.longMean(
eq(named(getMeanByteCounterName(outputName))),
anyBoolean(),
Mockito.<CounterMean<Long>>any());
}
}
@Test
public void testExecutionContextPlumbing() throws Exception {
List<ParallelInstruction> instructions =
Arrays.asList(
createReadInstruction("Read", ReaderFactoryTest.SingletonTestReaderFactory.class),
createParDoInstruction(0, 0, "DoFn1", "DoFnUserName"),
createParDoInstruction(1, 0, "DoFnWithContext", "DoFnWithContextUserName"));
MapTask mapTask = new MapTask();
mapTask.setStageName(STAGE);
mapTask.setInstructions(instructions);
mapTask.setFactory(Transport.getJsonFactory());
BatchModeExecutionContext context =
BatchModeExecutionContext.forTesting(options, counterSet, "testStage");
try (DataflowMapTaskExecutor executor =
mapTaskExecutorFactory.create(
null /* beamFnControlClientHandler */,
null /* beamFnDataService */,
null, /* dataApiServiceDescriptor */
null /* beamFnStateService */,
mapTaskToNetwork.apply(mapTask),
options,
STAGE,
readerRegistry,
sinkRegistry,
context,
counterSet,
IdGenerator::generate)) {
executor.execute();
}
List<String> stepNames = new ArrayList<>();
for (BatchModeExecutionContext.StepContext stepContext : context.getAllStepContexts()) {
stepNames.add(stepContext.getNameContext().systemName());
}
assertThat(stepNames, hasItems("DoFn1", "DoFnWithContext"));
}
static ParallelInstruction createReadInstruction(String name) {
return createReadInstruction(name, ReaderFactoryTest.TestReaderFactory.class);
}
static ParallelInstruction createReadInstruction(
String name, Class<? extends ReaderFactory> readerFactoryClass) {
CloudObject spec = CloudObject.forClass(readerFactoryClass);
Source cloudSource = new Source();
cloudSource.setSpec(spec);
cloudSource.setCodec(windowedStringCoder);
ReadInstruction readInstruction = new ReadInstruction();
readInstruction.setSource(cloudSource);
InstructionOutput output = new InstructionOutput();
output.setName("read_output_name");
output.setCodec(windowedStringCoder);
output.setOriginalName("originalName");
output.setSystemName("systemName");
ParallelInstruction instruction = new ParallelInstruction();
instruction.setSystemName(name);
instruction.setOriginalName(name + "OriginalName");
instruction.setRead(readInstruction);
instruction.setOutputs(Arrays.asList(output));
return instruction;
}
@Test
public void testCreateReadOperation() throws Exception {
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(createReadInstruction("Read"), ExecutionLocation.UNKNOWN);
when(network.successors(instructionNode))
.thenReturn(
ImmutableSet.<Node>of(
IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet)
.apply(
InstructionOutputNode.create(
instructionNode.getParallelInstruction().getOutputs().get(0)))));
when(network.outDegree(instructionNode)).thenReturn(1);
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE,
network,
PipelineOptionsFactory.create(),
readerRegistry,
sinkRegistry,
BatchModeExecutionContext.forTesting(options, counterSet, "testStage"))
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(ReadOperation.class));
ReadOperation readOperation = (ReadOperation) ((OperationNode) operationNode).getOperation();
assertEquals(1, readOperation.receivers.length);
assertEquals(0, readOperation.receivers[0].getReceiverCount());
assertEquals(Operation.InitializationState.UNSTARTED, readOperation.initializationState);
assertThat(readOperation.reader, instanceOf(ReaderFactoryTest.TestReader.class));
counterSet.extractUpdates(false, updateExtractor);
verifyOutputCounters(updateExtractor, "read_output_name");
verify(updateExtractor).longSum(eq(named("Read-ByteCount")), anyBoolean(), anyLong());
verifyNoMoreInteractions(updateExtractor);
}
static ParallelInstruction createWriteInstruction(
int producerIndex, int producerOutputNum, String systemName) {
InstructionInput cloudInput = new InstructionInput();
cloudInput.setProducerInstructionIndex(producerIndex);
cloudInput.setOutputNum(producerOutputNum);
CloudObject spec =
CloudObject.forClass(IntrinsicMapTaskExecutorFactoryTest.TestSinkFactory.class);
com.google.api.services.dataflow.model.Sink cloudSink =
new com.google.api.services.dataflow.model.Sink();
cloudSink.setSpec(spec);
cloudSink.setCodec(windowedStringCoder);
WriteInstruction writeInstruction = new WriteInstruction();
writeInstruction.setInput(cloudInput);
writeInstruction.setSink(cloudSink);
ParallelInstruction instruction = new ParallelInstruction();
instruction.setWrite(writeInstruction);
instruction.setSystemName(systemName);
instruction.setOriginalName(systemName + "OriginalName");
return instruction;
}
@SuppressWarnings("unchecked")
@Test
public void testCreateWriteOperation() throws Exception {
int producerIndex = 1;
int producerOutputNum = 2;
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(
createWriteInstruction(producerIndex, producerOutputNum, "WriteOperation"),
ExecutionLocation.UNKNOWN);
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE,
network,
options,
readerRegistry,
sinkRegistry,
BatchModeExecutionContext.forTesting(options, counterSet, "testStage"))
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(WriteOperation.class));
WriteOperation writeOperation = (WriteOperation) ((OperationNode) operationNode).getOperation();
assertEquals(0, writeOperation.receivers.length);
assertEquals(Operation.InitializationState.UNSTARTED, writeOperation.initializationState);
assertThat(writeOperation.sink, instanceOf(SizeReportingSinkWrapper.class));
assertThat(
((SizeReportingSinkWrapper<?>) writeOperation.sink).getUnderlyingSink(),
instanceOf(TestSink.class));
counterSet.extractUpdates(false, updateExtractor);
verify(updateExtractor).longSum(eq(named("WriteOperation-ByteCount")), anyBoolean(), anyLong());
verifyNoMoreInteractions(updateExtractor);
}
static class TestDoFn extends DoFn<String, String> {
@ProcessElement
public void processElement(ProcessContext c) {
c.output(c.element());
}
}
static class TestSink extends Sink<Integer> {
@Override
public SinkWriter<Integer> writer() {
return new TestSinkWriter();
}
/** A sink writer that drops its input values, for testing. */
static class TestSinkWriter implements SinkWriter<Integer> {
@Override
public long add(Integer outputElem) {
return 4;
}
@Override
public void close() {}
@Override
public void abort() throws IOException {
close();
}
}
}
static class TestSinkFactory implements SinkFactory {
@Override
public TestSink create(
CloudObject o,
@Nullable Coder<?> coder,
@Nullable PipelineOptions options,
@Nullable DataflowExecutionContext executionContext,
DataflowOperationContext operationContext) {
return new TestSink();
}
}
static ParallelInstruction createParDoInstruction(
int producerIndex, int producerOutputNum, String systemName) {
return createParDoInstruction(producerIndex, producerOutputNum, systemName, "");
}
static ParallelInstruction createParDoInstruction(
int producerIndex, int producerOutputNum, String systemName, String userName) {
InstructionInput cloudInput = new InstructionInput();
cloudInput.setProducerInstructionIndex(producerIndex);
cloudInput.setOutputNum(producerOutputNum);
TestDoFn fn = new TestDoFn();
String serializedFn =
StringUtils.byteArrayToJsonString(
SerializableUtils.serializeToByteArray(
DoFnInfo.forFn(
fn,
WindowingStrategy.globalDefault(),
null /* side input views */,
null /* input coder */,
new TupleTag<>(PropertyNames.OUTPUT) /* main output id */)));
CloudObject cloudUserFn = CloudObject.forClassName("DoFn");
addString(cloudUserFn, PropertyNames.SERIALIZED_FN, serializedFn);
MultiOutputInfo mainOutputTag = new MultiOutputInfo();
mainOutputTag.setTag("1");
ParDoInstruction parDoInstruction = new ParDoInstruction();
parDoInstruction.setInput(cloudInput);
parDoInstruction.setNumOutputs(1);
parDoInstruction.setMultiOutputInfos(ImmutableList.of(mainOutputTag));
parDoInstruction.setUserFn(cloudUserFn);
InstructionOutput output = new InstructionOutput();
output.setName(systemName + "_output");
output.setCodec(windowedStringCoder);
output.setOriginalName("originalName");
output.setSystemName("systemName");
ParallelInstruction instruction = new ParallelInstruction();
instruction.setParDo(parDoInstruction);
instruction.setOutputs(Arrays.asList(output));
instruction.setSystemName(systemName);
instruction.setOriginalName(systemName + "OriginalName");
instruction.setName(userName);
return instruction;
}
@Test
public void testCreateParDoOperation() throws Exception {
int producerIndex = 1;
int producerOutputNum = 2;
BatchModeExecutionContext context =
BatchModeExecutionContext.forTesting(options, counterSet, "testStage");
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(
createParDoInstruction(producerIndex, producerOutputNum, "DoFn"),
ExecutionLocation.UNKNOWN);
Node outputReceiverNode =
IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet)
.apply(
InstructionOutputNode.create(
instructionNode.getParallelInstruction().getOutputs().get(0)));
when(network.successors(instructionNode)).thenReturn(ImmutableSet.of(outputReceiverNode));
when(network.outDegree(instructionNode)).thenReturn(1);
when(network.edgesConnecting(instructionNode, outputReceiverNode))
.thenReturn(
ImmutableSet.<Edge>of(
MultiOutputInfoEdge.create(
instructionNode
.getParallelInstruction()
.getParDo()
.getMultiOutputInfos()
.get(0))));
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE, network, options, readerRegistry, sinkRegistry, context)
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(ParDoOperation.class));
ParDoOperation parDoOperation = (ParDoOperation) ((OperationNode) operationNode).getOperation();
assertEquals(1, parDoOperation.receivers.length);
assertEquals(0, parDoOperation.receivers[0].getReceiverCount());
assertEquals(Operation.InitializationState.UNSTARTED, parDoOperation.initializationState);
}
static ParallelInstruction createPartialGroupByKeyInstruction(
int producerIndex, int producerOutputNum) {
InstructionInput cloudInput = new InstructionInput();
cloudInput.setProducerInstructionIndex(producerIndex);
cloudInput.setOutputNum(producerOutputNum);
PartialGroupByKeyInstruction pgbkInstruction = new PartialGroupByKeyInstruction();
pgbkInstruction.setInput(cloudInput);
pgbkInstruction.setInputElementCodec(
CloudObjects.asCloudObject(
FullWindowedValueCoder.of(
KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()),
IntervalWindowCoder.of())));
InstructionOutput output = new InstructionOutput();
output.setName("pgbk_output_name");
output.setCodec(
CloudObjects.asCloudObject(
KvCoder.of(StringUtf8Coder.of(), IterableCoder.of(BigEndianIntegerCoder.of()))));
output.setOriginalName("originalName");
output.setSystemName("systemName");
ParallelInstruction instruction = new ParallelInstruction();
instruction.setOriginalName("pgbk_original_name");
instruction.setSystemName("pgbk_system_name");
instruction.setPartialGroupByKey(pgbkInstruction);
instruction.setOutputs(Arrays.asList(output));
return instruction;
}
@Test
public void testCreatePartialGroupByKeyOperation() throws Exception {
int producerIndex = 1;
int producerOutputNum = 2;
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(
createPartialGroupByKeyInstruction(producerIndex, producerOutputNum),
ExecutionLocation.UNKNOWN);
when(network.successors(instructionNode))
.thenReturn(
ImmutableSet.<Node>of(
IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet)
.apply(
InstructionOutputNode.create(
instructionNode.getParallelInstruction().getOutputs().get(0)))));
when(network.outDegree(instructionNode)).thenReturn(1);
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE,
network,
PipelineOptionsFactory.create(),
readerRegistry,
sinkRegistry,
BatchModeExecutionContext.forTesting(options, counterSet, "testStage"))
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(ParDoOperation.class));
ParDoOperation pgbkOperation = (ParDoOperation) ((OperationNode) operationNode).getOperation();
assertEquals(1, pgbkOperation.receivers.length);
assertEquals(0, pgbkOperation.receivers[0].getReceiverCount());
assertEquals(Operation.InitializationState.UNSTARTED, pgbkOperation.initializationState);
}
@Test
public void testCreatePartialGroupByKeyOperationWithCombine() throws Exception {
int producerIndex = 1;
int producerOutputNum = 2;
ParallelInstruction instruction =
createPartialGroupByKeyInstruction(producerIndex, producerOutputNum);
AppliedCombineFn<?, ?, ?, ?> combineFn =
AppliedCombineFn.withInputCoder(
Sum.ofIntegers(),
CoderRegistry.createDefault(),
KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()));
CloudObject cloudCombineFn = CloudObject.forClassName("CombineFn");
addString(
cloudCombineFn,
PropertyNames.SERIALIZED_FN,
byteArrayToJsonString(serializeToByteArray(combineFn)));
instruction.getPartialGroupByKey().setValueCombiningFn(cloudCombineFn);
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(instruction, ExecutionLocation.UNKNOWN);
when(network.successors(instructionNode))
.thenReturn(
ImmutableSet.<Node>of(
IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet)
.apply(
InstructionOutputNode.create(
instructionNode.getParallelInstruction().getOutputs().get(0)))));
when(network.outDegree(instructionNode)).thenReturn(1);
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE,
network,
options,
readerRegistry,
sinkRegistry,
BatchModeExecutionContext.forTesting(options, counterSet, "testStage"))
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(ParDoOperation.class));
ParDoOperation pgbkOperation = (ParDoOperation) ((OperationNode) operationNode).getOperation();
assertEquals(1, pgbkOperation.receivers.length);
assertEquals(0, pgbkOperation.receivers[0].getReceiverCount());
assertEquals(Operation.InitializationState.UNSTARTED, pgbkOperation.initializationState);
}
static ParallelInstruction createFlattenInstruction(
int producerIndex1,
int producerOutputNum1,
int producerIndex2,
int producerOutputNum2,
String systemName) {
List<InstructionInput> cloudInputs = new ArrayList<>();
InstructionInput cloudInput1 = new InstructionInput();
cloudInput1.setProducerInstructionIndex(producerIndex1);
cloudInput1.setOutputNum(producerOutputNum1);
cloudInputs.add(cloudInput1);
InstructionInput cloudInput2 = new InstructionInput();
cloudInput2.setProducerInstructionIndex(producerIndex2);
cloudInput2.setOutputNum(producerOutputNum2);
cloudInputs.add(cloudInput2);
FlattenInstruction flattenInstruction = new FlattenInstruction();
flattenInstruction.setInputs(cloudInputs);
InstructionOutput output = new InstructionOutput();
output.setName("flatten_output_name");
output.setCodec(CloudObjects.asCloudObject(StringUtf8Coder.of()));
output.setOriginalName("originalName");
output.setSystemName("systemName");
ParallelInstruction instruction = new ParallelInstruction();
instruction.setFlatten(flattenInstruction);
instruction.setOutputs(Arrays.asList(output));
instruction.setSystemName(systemName);
instruction.setOriginalName(systemName + "OriginalName");
return instruction;
}
@Test
public void testCreateFlattenOperation() throws Exception {
int producerIndex1 = 1;
int producerOutputNum1 = 2;
int producerIndex2 = 0;
int producerOutputNum2 = 1;
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(
createFlattenInstruction(
producerIndex1, producerOutputNum1, producerIndex2, producerOutputNum2, "Flatten"),
ExecutionLocation.UNKNOWN);
when(network.successors(instructionNode))
.thenReturn(
ImmutableSet.<Node>of(
IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet)
.apply(
InstructionOutputNode.create(
instructionNode.getParallelInstruction().getOutputs().get(0)))));
when(network.outDegree(instructionNode)).thenReturn(1);
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE,
network,
options,
readerRegistry,
sinkRegistry,
BatchModeExecutionContext.forTesting(options, counterSet, "testStage"))
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(FlattenOperation.class));
FlattenOperation flattenOperation =
(FlattenOperation) ((OperationNode) operationNode).getOperation();
assertEquals(1, flattenOperation.receivers.length);
assertEquals(0, flattenOperation.receivers[0].getReceiverCount());
assertEquals(Operation.InitializationState.UNSTARTED, flattenOperation.initializationState);
}
}
| apache-2.0 |
jexp/idea2 | platform/lang-impl/src/com/intellij/ide/actions/WeighingActionGroup.java | 3509 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.actions;
import com.intellij.openapi.actionSystem.*;
import com.intellij.openapi.actionSystem.impl.PresentationFactory;
import com.intellij.openapi.actionSystem.impl.Utils;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.ArrayList;
/**
* @author peter
*/
public abstract class WeighingActionGroup extends ActionGroup {
private final PresentationFactory myPresentationFactory = new PresentationFactory();
@Override
public void update(AnActionEvent e) {
getDelegate().update(e);
}
protected abstract ActionGroup getDelegate();
private static void getAllChildren(@Nullable AnActionEvent e, ActionGroup group, List<AnAction> result) {
for (final AnAction action : group.getChildren(e)) {
if (action instanceof ActionGroup && !((ActionGroup) action).isPopup()) {
getAllChildren(e, (ActionGroup) action, result);
} else {
result.add(action);
}
}
}
@NotNull
public AnAction[] getChildren(@Nullable AnActionEvent e) {
final AnAction[] children = getDelegate().getChildren(e);
if (e == null) {
return children;
}
final ArrayList<AnAction> all = new ArrayList<AnAction>();
getAllChildren(e, getDelegate(), all);
LinkedHashSet<AnAction> heaviest = null;
double maxWeight = Presentation.DEFAULT_WEIGHT;
for (final AnAction action : all) {
final Presentation presentation = myPresentationFactory.getPresentation(action);
presentation.setWeight(Presentation.DEFAULT_WEIGHT);
Utils.updateGroupChild(e.getDataContext(), e.getPlace(), action, presentation);
if (presentation.isEnabled() && presentation.isVisible()) {
if (presentation.getWeight() > maxWeight) {
maxWeight = presentation.getWeight();
heaviest = new LinkedHashSet<AnAction>();
}
if (presentation.getWeight() == maxWeight && heaviest != null) {
heaviest.add(action);
}
}
}
if (heaviest == null) {
return children;
}
final DefaultActionGroup chosen = new DefaultActionGroup();
boolean prevSeparator = true;
for (final AnAction action : all) {
final boolean separator = action instanceof Separator;
if (separator && !prevSeparator) {
chosen.add(action);
}
prevSeparator = separator;
if (shouldBeChosenAnyway(action)) {
heaviest.add(action);
}
if (heaviest.contains(action)) {
chosen.add(action);
}
}
final ActionGroup other = new ExcludingActionGroup(getDelegate(), heaviest);
other.setPopup(true);
other.getTemplatePresentation().setText("Other...");
return new AnAction[]{chosen, new Separator(), other};
}
protected boolean shouldBeChosenAnyway(AnAction action) {
return false;
}
}
| apache-2.0 |
alexsh/cw-omnibus | ViewPager/Indicator/app/src/main/java/com/commonsware/android/pager2/EditorFragment.java | 1856 | /***
Copyright (c) 2012-14 CommonsWare, LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy
of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
Covered in detail in the book _The Busy Coder's Guide to Android Development_
https://commonsware.com/Android
*/
package com.commonsware.android.pager2;
import android.app.Fragment;
import android.content.Context;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.EditText;
public class EditorFragment extends Fragment {
private static final String KEY_POSITION="position";
static EditorFragment newInstance(int position) {
EditorFragment frag=new EditorFragment();
Bundle args=new Bundle();
args.putInt(KEY_POSITION, position);
frag.setArguments(args);
return(frag);
}
static String getTitle(Context ctxt, int position) {
return(String.format(ctxt.getString(R.string.hint), position + 1));
}
@Override
public View onCreateView(LayoutInflater inflater,
ViewGroup container,
Bundle savedInstanceState) {
View result=inflater.inflate(R.layout.editor, container, false);
EditText editor=(EditText)result.findViewById(R.id.editor);
int position=getArguments().getInt(KEY_POSITION, -1);
editor.setHint(getTitle(getActivity(), position));
return(result);
}
} | apache-2.0 |
Svetik77/maven-enforcer | enforcer-rules/src/main/java/org/apache/maven/plugins/enforcer/ReactorModuleConvergence.java | 19182 | package org.apache.maven.plugins.enforcer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.lang.SystemUtils;
import org.apache.maven.enforcer.rule.api.EnforcerRuleException;
import org.apache.maven.enforcer.rule.api.EnforcerRuleHelper;
import org.apache.maven.execution.MavenSession;
import org.apache.maven.model.Dependency;
import org.apache.maven.plugin.logging.Log;
import org.apache.maven.project.MavenProject;
import org.codehaus.plexus.component.configurator.expression.ExpressionEvaluationException;
import org.codehaus.plexus.util.StringUtils;
/**
* This rule will check if a multi module build will follow the best practices.
*
* @author Karl-Heinz Marbaise
* @since 1.4
*/
public class ReactorModuleConvergence
extends AbstractNonCacheableEnforcerRule
{
private boolean ignoreModuleDependencies = false;
private Log logger;
public void execute( EnforcerRuleHelper helper )
throws EnforcerRuleException
{
logger = helper.getLog();
MavenSession session;
try
{
session = (MavenSession) helper.evaluate( "${session}" );
}
catch ( ExpressionEvaluationException eee )
{
throw new EnforcerRuleException( "Unable to retrieve the MavenSession: ", eee );
}
List<MavenProject> sortedProjects = session.getSortedProjects();
if ( sortedProjects != null && !sortedProjects.isEmpty() )
{
checkReactor( sortedProjects );
checkParentsInReactor( sortedProjects );
checkMissingParentsInReactor( sortedProjects );
checkParentsPartOfTheReactor( sortedProjects );
if ( !isIgnoreModuleDependencies() )
{
checkDependenciesWithinReactor( sortedProjects );
}
}
}
private void checkParentsPartOfTheReactor( List<MavenProject> sortedProjects )
throws EnforcerRuleException
{
List<MavenProject> parentsWhichAreNotPartOfTheReactor =
existParentsWhichAreNotPartOfTheReactor( sortedProjects );
if ( !parentsWhichAreNotPartOfTheReactor.isEmpty() )
{
StringBuilder sb = new StringBuilder().append( SystemUtils.LINE_SEPARATOR );
addMessageIfExist( sb );
for ( MavenProject mavenProject : parentsWhichAreNotPartOfTheReactor )
{
sb.append( " module: " );
sb.append( mavenProject.getId() );
sb.append( SystemUtils.LINE_SEPARATOR );
}
throw new EnforcerRuleException( "Module parents have been found which could not be found in the reactor."
+ sb.toString() );
}
}
/**
* Convenience method to create a user readable message.
*
* @param sortedProjects The list of reactor projects.
* @throws EnforcerRuleException In case of a violation.
*/
private void checkMissingParentsInReactor( List<MavenProject> sortedProjects )
throws EnforcerRuleException
{
List<MavenProject> modulesWithoutParentsInReactor = existModulesWithoutParentsInReactor( sortedProjects );
if ( !modulesWithoutParentsInReactor.isEmpty() )
{
StringBuilder sb = new StringBuilder().append( SystemUtils.LINE_SEPARATOR );
addMessageIfExist( sb );
for ( MavenProject mavenProject : modulesWithoutParentsInReactor )
{
sb.append( " module: " );
sb.append( mavenProject.getId() );
sb.append( SystemUtils.LINE_SEPARATOR );
}
throw new EnforcerRuleException( "Reactor contains modules without parents." + sb.toString() );
}
}
private void checkDependenciesWithinReactor( List<MavenProject> sortedProjects )
throws EnforcerRuleException
{
// After we are sure having consistent version we can simply use the first one?
String reactorVersion = sortedProjects.get( 0 ).getVersion();
Map<MavenProject, List<Dependency>> areThereDependenciesWhichAreNotPartOfTheReactor =
areThereDependenciesWhichAreNotPartOfTheReactor( reactorVersion, sortedProjects );
if ( !areThereDependenciesWhichAreNotPartOfTheReactor.isEmpty() )
{
StringBuilder sb = new StringBuilder().append( SystemUtils.LINE_SEPARATOR );
addMessageIfExist( sb );
// CHECKSTYLE_OFF: LineLength
for ( Entry<MavenProject, List<Dependency>> item : areThereDependenciesWhichAreNotPartOfTheReactor.entrySet() )
{
sb.append( " module: " );
sb.append( item.getKey().getId() );
sb.append( SystemUtils.LINE_SEPARATOR );
for ( Dependency dependency : item.getValue() )
{
String id =
dependency.getGroupId() + ":" + dependency.getArtifactId() + ":" + dependency.getVersion();
sb.append( " dependency: " );
sb.append( id );
sb.append( SystemUtils.LINE_SEPARATOR );
}
}
throw new EnforcerRuleException(
"Reactor modules contains dependencies which do not reference the reactor."
+ sb.toString() );
// CHECKSTYLE_ON: LineLength
}
}
/**
* Convenience method to create a user readable message.
*
* @param sortedProjects The list of reactor projects.
* @throws EnforcerRuleException In case of a violation.
*/
private void checkParentsInReactor( List<MavenProject> sortedProjects )
throws EnforcerRuleException
{
// After we are sure having consistent version we can simply use the first one?
String reactorVersion = sortedProjects.get( 0 ).getVersion();
List<MavenProject> areParentsFromTheReactor = areParentsFromTheReactor( reactorVersion, sortedProjects );
if ( !areParentsFromTheReactor.isEmpty() )
{
StringBuilder sb = new StringBuilder().append( SystemUtils.LINE_SEPARATOR );
addMessageIfExist( sb );
for ( MavenProject mavenProject : areParentsFromTheReactor )
{
sb.append( " --> " );
sb.append( mavenProject.getId() );
sb.append( " parent:" );
sb.append( mavenProject.getParent().getId() );
sb.append( SystemUtils.LINE_SEPARATOR );
}
throw new EnforcerRuleException( "Reactor modules have parents which contain a wrong version."
+ sb.toString() );
}
}
/**
* Convenience method to create user readable message.
*
* @param sortedProjects The list of reactor projects.
* @throws EnforcerRuleException In case of a violation.
*/
private void checkReactor( List<MavenProject> sortedProjects )
throws EnforcerRuleException
{
List<MavenProject> consistenceCheckResult = isReactorVersionConsistent( sortedProjects );
if ( !consistenceCheckResult.isEmpty() )
{
StringBuilder sb = new StringBuilder().append( SystemUtils.LINE_SEPARATOR );
addMessageIfExist( sb );
for ( MavenProject mavenProject : consistenceCheckResult )
{
sb.append( " --> " );
sb.append( mavenProject.getId() );
sb.append( SystemUtils.LINE_SEPARATOR );
}
throw new EnforcerRuleException( "The reactor contains different versions." + sb.toString() );
}
}
private List<MavenProject> areParentsFromTheReactor( String reactorVersion, List<MavenProject> sortedProjects )
{
List<MavenProject> result = new ArrayList<MavenProject>();
for ( MavenProject mavenProject : sortedProjects )
{
logger.debug( "Project: " + mavenProject.getId() );
if ( hasParent( mavenProject ) )
{
if ( !mavenProject.isExecutionRoot() )
{
MavenProject parent = mavenProject.getParent();
if ( !reactorVersion.equals( parent.getVersion() ) )
{
logger.debug( "The project: " + mavenProject.getId()
+ " has a parent which version does not match the other elements in reactor" );
result.add( mavenProject );
}
}
}
else
{
// This situation is currently ignored, cause it's handled by existModulesWithoutParentsInReactor()
}
}
return result;
}
private List<MavenProject> existParentsWhichAreNotPartOfTheReactor( List<MavenProject> sortedProjects )
{
List<MavenProject> result = new ArrayList<MavenProject>();
for ( MavenProject mavenProject : sortedProjects )
{
logger.debug( "Project: " + mavenProject.getId() );
if ( hasParent( mavenProject ) )
{
if ( !mavenProject.isExecutionRoot() )
{
MavenProject parent = mavenProject.getParent();
if ( !isProjectPartOfTheReactor( parent, sortedProjects ) )
{
result.add( mavenProject );
}
}
}
}
return result;
}
/**
* This will check of the groupId/artifactId can be found in any reactor project. The version will be ignored cause
* versions are checked before.
*
* @param project The project which should be checked if it is contained in the sortedProjects.
* @param sortedProjects The list of existing projects.
* @return true if the project has been found within the list false otherwise.
*/
private boolean isProjectPartOfTheReactor( MavenProject project, List<MavenProject> sortedProjects )
{
return isGAPartOfTheReactor( project.getGroupId(), project.getArtifactId(), sortedProjects );
}
private boolean isDependencyPartOfTheReactor( Dependency dependency, List<MavenProject> sortedProjects )
{
return isGAPartOfTheReactor( dependency.getGroupId(), dependency.getArtifactId(), sortedProjects );
}
/**
* This will check if the given <code>groupId/artifactId</code> is part of the current reactor.
*
* @param groupId The groupId
* @param artifactId The artifactId
* @param sortedProjects The list of projects within the reactor.
* @return true if the groupId/artifactId is part of the reactor false otherwise.
*/
private boolean isGAPartOfTheReactor( String groupId, String artifactId, List<MavenProject> sortedProjects )
{
boolean result = false;
for ( MavenProject mavenProject : sortedProjects )
{
String parentId = groupId + ":" + artifactId;
String projectId = mavenProject.getGroupId() + ":" + mavenProject.getArtifactId();
if ( parentId.equals( projectId ) )
{
result = true;
}
}
return result;
}
/**
* Assume we have a module which is a child of a multi module build but this child does not have a parent. This
* method will exactly search for such cases.
*
* @param sortedProjects The sorted list of the reactor modules.
* @return The resulting list will contain the modules in the reactor which do not have a parent. The list will
* never null. If the list is empty no violation have happened.
*/
private List<MavenProject> existModulesWithoutParentsInReactor( List<MavenProject> sortedProjects )
{
List<MavenProject> result = new ArrayList<MavenProject>();
for ( MavenProject mavenProject : sortedProjects )
{
logger.debug( "Project: " + mavenProject.getId() );
if ( !hasParent( mavenProject ) )
{
// TODO: Should add an option to force having a parent?
if ( mavenProject.isExecutionRoot() )
{
logger.debug( "The root does not need having a parent." );
}
else
{
logger.debug( "The module: " + mavenProject.getId() + " has no parent." );
result.add( mavenProject );
}
}
}
return result;
}
/**
* Convenience method to handle adding a dependency to the Map of List.
*
* @param result The result List which should be handled.
* @param project The MavenProject which will be added.
* @param dependency The dependency which will be added.
*/
private void addDep( Map<MavenProject, List<Dependency>> result, MavenProject project, Dependency dependency )
{
if ( result.containsKey( project ) )
{
List<Dependency> list = result.get( project );
if ( list == null )
{
list = new ArrayList<Dependency>();
}
list.add( dependency );
result.put( project, list );
}
else
{
List<Dependency> list = new ArrayList<Dependency>();
list.add( dependency );
result.put( project, list );
}
}
/**
* Go through the list of modules in the builds and check if we have dependencies. If yes we will check every
* dependency based on groupId/artifactId if it belongs to the multi module build. In such a case it will be checked
* if the version does fit the version in the rest of build.
*
* @param reactorVersion The version of the reactor.
* @param sortedProjects The list of existing projects within this build.
* @return List of violations. Never null. If the list is empty than no violation has happened.
*/
// CHECKSTYLE_OFF: LineLength
private Map<MavenProject, List<Dependency>> areThereDependenciesWhichAreNotPartOfTheReactor( String reactorVersion,
List<MavenProject> sortedProjects )
// CHECKSTYLE_ON: LineLength
{
Map<MavenProject, List<Dependency>> result = new HashMap<MavenProject, List<Dependency>>();
for ( MavenProject mavenProject : sortedProjects )
{
logger.debug( "Project: " + mavenProject.getId() );
@SuppressWarnings( "unchecked" )
List<Dependency> dependencies = mavenProject.getDependencies();
if ( hasDependencies( dependencies ) )
{
for ( Dependency dependency : dependencies )
{
logger.debug( " -> Dep:" + dependency.getGroupId() + ":" + dependency.getArtifactId() + ":"
+ dependency.getVersion() );
if ( isDependencyPartOfTheReactor( dependency, sortedProjects ) )
{
if ( !dependency.getVersion().equals( reactorVersion ) )
{
addDep( result, mavenProject, dependency );
}
}
}
}
}
return result;
}
/**
* This method will check the following situation within a multi-module build.
*
* <pre>
* <parent>
* <groupId>...</groupId>
* <artifactId>...</artifactId>
* <version>1.0-SNAPSHOT</version>
* </parent>
*
* <version>1.1-SNAPSHOT</version>
* </pre>
*
* @param projectList The sorted list of the reactor modules.
* @return The resulting list will contain the modules in the reactor which do the thing in the example above. The
* list will never null. If the list is empty no violation have happened.
*/
private List<MavenProject> isReactorVersionConsistent( List<MavenProject> projectList )
{
List<MavenProject> result = new ArrayList<MavenProject>();
if ( projectList != null && !projectList.isEmpty() )
{
String version = projectList.get( 0 ).getVersion();
logger.debug( "First version:" + version );
for ( MavenProject mavenProject : projectList )
{
logger.debug( " -> checking " + mavenProject.getId() );
if ( !version.equals( mavenProject.getVersion() ) )
{
result.add( mavenProject );
}
}
}
return result;
}
private boolean hasDependencies( List<Dependency> dependencies )
{
return dependencies != null && !dependencies.isEmpty();
}
private boolean hasParent( MavenProject mavenProject )
{
return mavenProject.getParent() != null;
}
public boolean isIgnoreModuleDependencies()
{
return ignoreModuleDependencies;
}
public void setIgnoreModuleDependencies( boolean ignoreModuleDependencies )
{
this.ignoreModuleDependencies = ignoreModuleDependencies;
}
/**
* This will add the given user message to the output.
*
* @param sb The already initialized exception message part.
*/
private void addMessageIfExist( StringBuilder sb )
{
if ( !StringUtils.isEmpty( getMessage() ) )
{
sb.append( getMessage() );
sb.append( SystemUtils.LINE_SEPARATOR );
}
}
} | apache-2.0 |
apache/geronimo-yoko | yoko-core/src/main/java/org/apache/yoko/orb/OCI/CLIENT_SIDE.java | 1079 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.yoko.orb.OCI;
//
// IDL:orb.yoko.apache.org/OCI/CLIENT_SIDE:1.0
//
/**
*
* This transport was created as a client side connection.
*
* @see Originator
* @see TransportInfo
*
**/
public interface CLIENT_SIDE
{
short value = (short)(0L);
}
| apache-2.0 |
kohsah/akomantoso-lib | src/main/java/org/akomantoso/schema/v3/csd11/Classification.java | 2997 | //
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.8-b130911.1802
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2015.01.16 at 12:56:36 PM IST
//
package org.akomantoso.schema.v3.csd11;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element ref="{http://docs.oasis-open.org/legaldocml/ns/akn/3.0/CSD11}keyword" maxOccurs="unbounded"/>
* </sequence>
* <attGroup ref="{http://docs.oasis-open.org/legaldocml/ns/akn/3.0/CSD11}source"/>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"keyword"
})
@XmlRootElement(name = "classification")
public class Classification {
@XmlElement(required = true)
protected List<Keyword> keyword;
@XmlAttribute(name = "source", required = true)
@XmlSchemaType(name = "anyURI")
protected String source;
/**
* Gets the value of the keyword property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the keyword property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getKeyword().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link Keyword }
*
*
*/
public List<Keyword> getKeyword() {
if (keyword == null) {
keyword = new ArrayList<Keyword>();
}
return this.keyword;
}
/**
* Gets the value of the source property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getSource() {
return source;
}
/**
* Sets the value of the source property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setSource(String value) {
this.source = value;
}
}
| apache-2.0 |
dexlex/DexMovingImageView | dexmovingimageviewlib/src/main/java/it/dex/movingimageviewlib/evaluating/evaluators/GyroscopeEvaluator.java | 3521 | /*
* Copyright 2014-2015 Diego Grancini
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.dex.movingimageviewlib.evaluating.evaluators;
import android.content.Context;
import android.hardware.Sensor;
import android.hardware.SensorEvent;
import android.hardware.SensorEventListener;
import android.hardware.SensorManager;
import android.view.View;
import it.dex.movingimageviewlib.evaluating.Evaluator;
/**
* Evaluator class that use an OnSensorChanged implementation to generate values.
* <p/>
* GyroscopeEvaluator created by Diego Grancini on 03/01/2015.
*/
public class GyroscopeEvaluator extends Evaluator implements SensorEventListener {
private static final float MIN_TIME_STEP = (1f / 40f);
private SensorManager mSensorManager;
private Sensor mGyroSensor;
private long mLastTime = System.currentTimeMillis();
private float x, y, z;
public GyroscopeEvaluator(View view) {
super(view);
}
public GyroscopeEvaluator(View view, OnEventOccurred onEventOccurred) {
super(view, onEventOccurred);
}
@Override
protected void onCreate(View view) {
mSensorManager = (SensorManager) view.getContext().getSystemService(Context.SENSOR_SERVICE);
mGyroSensor = mSensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE);
mSensorManager.registerListener(this, mGyroSensor, SensorManager.SENSOR_DELAY_FASTEST);
}
@Override
public float evaluateX(View view) {
return (int) (x * view.getWidth());
}
@Override
public float evaluateY(View view) {
return (int) (y * view.getHeight());
}
@Override
public float evaluateAngle(View view, float defaultAngle) {
return (float) (z * 180d / Math.PI);
}
@Override
protected void onDestroy(View view) {
mSensorManager.unregisterListener(this, mGyroSensor);
}
@Override
public void onSensorChanged(SensorEvent event) {
float[] values = event.values;
float x = values[1];
float y = values[0];
float z = values[2];
float angularVelocity = z * 0.96f;
long now = System.currentTimeMillis();
float timeDiff = (now - mLastTime) / 1000f;
mLastTime = now;
if (timeDiff > 1) {
timeDiff = MIN_TIME_STEP;
}
this.x += x * timeDiff;
if (this.x > 1f)
this.x = 1f;
else if (this.x < -1f)
this.x = -1f;
this.y += y * timeDiff;
if (this.y > 1f)
this.y = 1f;
else if (this.y < -1f)
this.y = -1f;
this.z += angularVelocity * timeDiff;
if (this.x == 0 && this.y == 0 && this.z == 0) {
if (getOnEventOccurred() != null && isNotifyEvent())
getOnEventOccurred().onEventOccurred(getView(), this, EVENT_STATUS.MIDDLE, ++middleLoopCount);
}
getView().invalidate();
}
@Override
public void onAccuracyChanged(Sensor sensor, int accuracy) {
}
}
| apache-2.0 |
apache/tapestry3 | tapestry-framework/src/org/apache/tapestry/util/xml/DocumentParseException.java | 2945 | // Copyright 2004 The Apache Software Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.tapestry.util.xml;
import org.apache.tapestry.ApplicationRuntimeException;
import org.apache.tapestry.ILocation;
import org.apache.tapestry.IResourceLocation;
import org.apache.tapestry.Location;
import org.xml.sax.SAXParseException;
/**
* Exception thrown if there is any kind of error parsing the
* an XML document.
*
* @see org.apache.tapestry.parse.SpecificationParser
*
* @author Howard Lewis Ship
* @version $Id$
* @since 0.2.10
*
**/
public class DocumentParseException extends ApplicationRuntimeException
{
private IResourceLocation _documentLocation;
public DocumentParseException(String message, Throwable rootCause)
{
this(message, null, null, rootCause);
}
public DocumentParseException(String message, IResourceLocation documentLocation)
{
this(message, documentLocation, null);
}
public DocumentParseException(
String message,
IResourceLocation documentLocation,
Throwable rootCause)
{
this(message, documentLocation, null, rootCause);
}
public DocumentParseException(
String message,
IResourceLocation documentLocation,
ILocation location,
Throwable rootCause)
{
super(message, null, location, rootCause);
_documentLocation = documentLocation;
}
public DocumentParseException(
String message,
IResourceLocation documentLocation,
SAXParseException rootCause)
{
this(
message,
documentLocation,
rootCause == null
|| documentLocation == null
? null
: new Location(
documentLocation,
rootCause.getLineNumber(),
rootCause.getColumnNumber()),
rootCause);
}
public DocumentParseException(String message)
{
this(message, null, null, null);
}
public DocumentParseException(Throwable rootCause)
{
this(rootCause.getMessage(), rootCause);
}
public DocumentParseException(SAXParseException rootCause)
{
this(rootCause.getMessage(), (Throwable) rootCause);
}
public IResourceLocation getDocumentLocation()
{
return _documentLocation;
}
} | apache-2.0 |
flightx31/Bookmark-anator | src/main/java/com/bookmarkanator/ui/interfaces/UIControllerInterface.java | 3074 | package com.bookmarkanator.ui.interfaces;
import java.util.*;
import com.bookmarkanator.core.*;
import com.bookmarkanator.ui.fxui.*;
import com.bookmarkanator.ui.fxui.bookmarks.*;
import com.bookmarkanator.util.*;
public interface UIControllerInterface
{
Settings getSettings();
/**
* Gets a list of visible bookmarks with all the search terms selected. It takes into account the search term, show types fields, and selected tags.
*
* @return A list of visible bookmarks.
* @throws Exception
*/
Set<AbstractUIBookmark> getVisibleUIBookmarks();
/**
* Should be called after the interfaces are set so that the controller can set an initial state of the UI.
*
* @throws Exception
*/
void initUI()
throws Exception;
void updateUI()
throws Exception;
SearchOptions getSearchOptions();
void setSearchOptions(SearchOptions searchOptions)
throws Exception;
SimilarItemIterator getSimilarItemIterator(String closeSearchTerm);
//Available Bookmark Types Methods
void toggleShowType(AbstractUIBookmark abstractUIBookmark)
throws Exception;
void showAllTypes()
throws Exception;
void hideAllTypes()
throws Exception;
//Selected Tags Methods
void clearAllSelectedTagGroups()
throws Exception;
void setTagModeForCurrentGroup(String tagModeForCurrentGroup)
throws Exception;
void setCurrentGroup(SearchOptions.TagsInfo currentGroup)
throws Exception;
SearchOptions.TagsInfo getCurrentGroup();
void addTagGroup()
throws Exception;
void removeTagGroup(SearchOptions.TagsInfo tagGroup)
throws Exception;
void removeTagFromGroup(SearchOptions.TagsInfo tagGroup, String tag)
throws Exception;
//Available Tags Methods
void selectTag(String tag)
throws Exception;//needs to add the tag only if it is in the list of existing tags.
// ============================================================
// Interface Getter and Setter Methods
// ============================================================
void setTypesUI(BKTypesInterface types);
void setSelectedTagsUI(SelectedTagsInterface selectedTagsUI);
void setAvailableTagsUI(AvailableTagsInterface availableTagsUI);
void setBookmarksListUI(BookmarksListInterface bookmarksListUI);
void setSearchUI(SearchInterface searchUI);
void setMenuUi(MenuInterface menuUI);
void setQuickPanelUI(QuickPanelInterface quickPanelUI);
void setNewBookmarkSelectorUI(NewBookmarkSelectionInterface newBookmarkSelectorUI);
BKTypesInterface getTypesUI();
SelectedTagsInterface getSelectedTagsUI();
AvailableTagsInterface getAvailableTagsUI();
BookmarksListInterface getBookmarksListUI();
SearchInterface getSearchUI();
MenuInterface getMenuUi();
QuickPanelInterface getQuickPanelUI();
NewBookmarkSelectionInterface getNewBookmarkSelectorUI();
boolean isEditMode();
void setEditMode(boolean editMode);
}
| apache-2.0 |
henryyan/snakeyaml | src/main/java/org/yaml/snakeyaml/events/Event.java | 1991 | /**
* Copyright (c) 2008-2013, http://www.snakeyaml.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.yaml.snakeyaml.events;
import org.yaml.snakeyaml.error.Mark;
/**
* Basic unit of output from a {@link org.yaml.snakeyaml.parser.Parser} or input
* of a {@link org.yaml.snakeyaml.emitter.Emitter}.
*/
public abstract class Event {
public enum ID {
Alias, DocumentEnd, DocumentStart, MappingEnd, MappingStart, Scalar, SequenceEnd, SequenceStart, StreamEnd, StreamStart
}
private final Mark startMark;
private final Mark endMark;
public Event(Mark startMark, Mark endMark) {
this.startMark = startMark;
this.endMark = endMark;
}
public String toString() {
return "<" + this.getClass().getName() + "(" + getArguments() + ")>";
}
public Mark getStartMark() {
return startMark;
}
public Mark getEndMark() {
return endMark;
}
/**
* @see "__repr__ for Event in PyYAML"
*/
protected String getArguments() {
return "";
}
public abstract boolean is(Event.ID id);
/*
* for tests only
*/
@Override
public boolean equals(Object obj) {
if (obj instanceof Event) {
return toString().equals(obj.toString());
} else {
return false;
}
}
/*
* for tests only
*/
@Override
public int hashCode() {
return toString().hashCode();
}
}
| apache-2.0 |
vladisav/ignite | modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java | 2708 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.ml.regressions.linear;
import org.apache.ignite.ml.Trainer;
import org.apache.ignite.ml.math.Matrix;
import org.apache.ignite.ml.math.Vector;
import org.apache.ignite.ml.optimization.BarzilaiBorweinUpdater;
import org.apache.ignite.ml.optimization.GradientDescent;
import org.apache.ignite.ml.optimization.LeastSquaresGradientFunction;
import org.apache.ignite.ml.optimization.SimpleUpdater;
/**
* Linear regression trainer based on least squares loss function and gradient descent optimization algorithm.
*/
public class LinearRegressionSGDTrainer implements Trainer<LinearRegressionModel, Matrix> {
/**
* Gradient descent optimizer.
*/
private final GradientDescent gradientDescent;
/** */
public LinearRegressionSGDTrainer(GradientDescent gradientDescent) {
this.gradientDescent = gradientDescent;
}
/** */
public LinearRegressionSGDTrainer(int maxIterations, double convergenceTol) {
this.gradientDescent = new GradientDescent(new LeastSquaresGradientFunction(), new BarzilaiBorweinUpdater())
.withMaxIterations(maxIterations)
.withConvergenceTol(convergenceTol);
}
/** */
public LinearRegressionSGDTrainer(int maxIterations, double convergenceTol, double learningRate) {
this.gradientDescent = new GradientDescent(new LeastSquaresGradientFunction(), new SimpleUpdater(learningRate))
.withMaxIterations(maxIterations)
.withConvergenceTol(convergenceTol);
}
/**
* {@inheritDoc}
*/
@Override public LinearRegressionModel train(Matrix data) {
Vector variables = gradientDescent.optimize(data, data.likeVector(data.columnSize()));
Vector weights = variables.viewPart(1, variables.size() - 1);
double intercept = variables.get(0);
return new LinearRegressionModel(weights, intercept);
}
}
| apache-2.0 |
RyanSkraba/beam | sdks/java/fn-execution/src/main/java/org/apache/beam/sdk/fn/stream/ForwardingClientResponseObserver.java | 2444 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.fn.stream;
import org.apache.beam.vendor.grpc.v1p21p0.io.grpc.stub.ClientCallStreamObserver;
import org.apache.beam.vendor.grpc.v1p21p0.io.grpc.stub.ClientResponseObserver;
import org.apache.beam.vendor.grpc.v1p21p0.io.grpc.stub.StreamObserver;
/**
* A {@link ClientResponseObserver} which delegates all {@link StreamObserver} calls.
*
* <p>Used to wrap existing {@link StreamObserver}s to be able to install an {@link
* ClientCallStreamObserver#setOnReadyHandler(Runnable) onReadyHandler}.
*
* <p>This is as thread-safe as the underlying stream observer that is being wrapped.
*/
public final class ForwardingClientResponseObserver<ReqT, RespT>
implements ClientResponseObserver<RespT, ReqT> {
public static <ReqT, RespT> ForwardingClientResponseObserver<ReqT, RespT> create(
StreamObserver<ReqT> inbound, Runnable onReadyHandler) {
return new ForwardingClientResponseObserver<>(inbound, onReadyHandler);
}
private final Runnable onReadyHandler;
private final StreamObserver<ReqT> inboundObserver;
ForwardingClientResponseObserver(StreamObserver<ReqT> inboundObserver, Runnable onReadyHandler) {
this.inboundObserver = inboundObserver;
this.onReadyHandler = onReadyHandler;
}
@Override
public void onNext(ReqT value) {
inboundObserver.onNext(value);
}
@Override
public void onError(Throwable t) {
inboundObserver.onError(t);
}
@Override
public void onCompleted() {
inboundObserver.onCompleted();
}
@Override
public void beforeStart(ClientCallStreamObserver<RespT> stream) {
stream.setOnReadyHandler(onReadyHandler);
}
}
| apache-2.0 |
nazarewk/elasticsearch | core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java | 7689 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.nested;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.store.Directory;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class ReverseNestedAggregatorTests extends AggregatorTestCase {
private static final String VALUE_FIELD_NAME = "number";
private static final String NESTED_OBJECT = "nested_object";
private static final String NESTED_AGG = "nestedAgg";
private static final String REVERSE_AGG_NAME = "reverseNestedAgg";
private static final String MAX_AGG_NAME = "maxAgg";
public void testNoDocs() throws IOException {
try (Directory directory = newDirectory()) {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
// intentionally not writing any docs
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
NESTED_OBJECT);
ReverseNestedAggregationBuilder reverseNestedBuilder
= new ReverseNestedAggregationBuilder(REVERSE_AGG_NAME);
nestedBuilder.subAggregation(reverseNestedBuilder);
MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME)
.field(VALUE_FIELD_NAME);
reverseNestedBuilder.subAggregation(maxAgg);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(
NumberFieldMapper.NumberType.LONG);
fieldType.setName(VALUE_FIELD_NAME);
Nested nested = search(newSearcher(indexReader, true, true),
new MatchAllDocsQuery(), nestedBuilder, fieldType);
ReverseNested reverseNested = (ReverseNested)
((InternalAggregation)nested).getProperty(REVERSE_AGG_NAME);
assertEquals(REVERSE_AGG_NAME, reverseNested.getName());
assertEquals(0, reverseNested.getDocCount());
InternalMax max = (InternalMax)
((InternalAggregation)reverseNested).getProperty(MAX_AGG_NAME);
assertEquals(MAX_AGG_NAME, max.getName());
assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), Double.MIN_VALUE);
}
}
}
public void testMaxFromParentDocs() throws IOException {
int numParentDocs = randomIntBetween(1, 20);
int expectedParentDocs = 0;
int expectedNestedDocs = 0;
double expectedMaxValue = Double.NEGATIVE_INFINITY;
try (Directory directory = newDirectory()) {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
for (int i = 0; i < numParentDocs; i++) {
List<Document> documents = new ArrayList<>();
int numNestedDocs = randomIntBetween(0, 20);
for (int nested = 0; nested < numNestedDocs; nested++) {
Document document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#" + i,
UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "__" + NESTED_OBJECT,
TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
expectedNestedDocs++;
}
Document document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#" + i,
UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test",
TypeFieldMapper.Defaults.FIELD_TYPE));
long value = randomNonNegativeLong() % 10000;
document.add(new SortedNumericDocValuesField(VALUE_FIELD_NAME, value));
if (numNestedDocs > 0) {
expectedMaxValue = Math.max(expectedMaxValue, value);
expectedParentDocs++;
}
documents.add(document);
iw.addDocuments(documents);
}
iw.commit();
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
NESTED_OBJECT);
ReverseNestedAggregationBuilder reverseNestedBuilder
= new ReverseNestedAggregationBuilder(REVERSE_AGG_NAME);
nestedBuilder.subAggregation(reverseNestedBuilder);
MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME)
.field(VALUE_FIELD_NAME);
reverseNestedBuilder.subAggregation(maxAgg);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(
NumberFieldMapper.NumberType.LONG);
fieldType.setName(VALUE_FIELD_NAME);
Nested nested = search(newSearcher(indexReader, true, true),
new MatchAllDocsQuery(), nestedBuilder, fieldType);
assertEquals(expectedNestedDocs, nested.getDocCount());
ReverseNested reverseNested = (ReverseNested)
((InternalAggregation)nested).getProperty(REVERSE_AGG_NAME);
assertEquals(REVERSE_AGG_NAME, reverseNested.getName());
assertEquals(expectedParentDocs, reverseNested.getDocCount());
InternalMax max = (InternalMax)
((InternalAggregation)reverseNested).getProperty(MAX_AGG_NAME);
assertEquals(MAX_AGG_NAME, max.getName());
assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE);
}
}
}
}
| apache-2.0 |
bowring/ET_Redux | src/main/java/org/earthtime/plots/evolution/seaWater/SeaWaterDelta234UGraph.java | 8861 | /*
* Copyright 2019 CIRDLES.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.earthtime.plots.evolution.seaWater;
import java.awt.BasicStroke;
import java.awt.Color;
import java.awt.Font;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.RenderingHints;
import java.awt.Shape;
import java.awt.font.TextLayout;
import java.awt.geom.Line2D;
import java.awt.geom.Rectangle2D;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.earthtime.plots.AbstractDataView;
import org.earthtime.utilities.TicGeneratorForAxes;
/**
*
* @author James F. Bowring, CIRDLES.org, and Earth-Time.org
*/
public class SeaWaterDelta234UGraph extends AbstractDataView {
private static SeaWaterInitialDelta234UTableModel model;
public SeaWaterDelta234UGraph(SeaWaterInitialDelta234UTableModel model) {
super();
this.model = model;
this.leftMargin = 50;
this.topMargin = 30;
this.graphWidth = 900;
this.graphHeight = 230;
this.xLocation = 0;
this.showMe = true;
initGraph();
}
private void initGraph() {
setBounds(xLocation, 0, graphWidth + leftMargin * 2, graphHeight + topMargin * 2);
setOpaque(true);
setBackground(Color.white);
}
@Override
public void refreshPanel(boolean doReset) {
preparePanel(doReset);
repaint();
}
@Override
public void paintComponent(Graphics g) {
super.paintComponent(g);
paint((Graphics2D) g, false);
}
public void paint(Graphics2D g2d, boolean svgStyle) {
RenderingHints rh = g2d.getRenderingHints();
rh.put(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
rh.put(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_QUALITY);
g2d.setRenderingHints(rh);
g2d.setPaint(Color.BLACK);
g2d.setStroke(new BasicStroke(1.0f));
g2d.setFont(new Font(
"SansSerif",
Font.BOLD,
10));
// draw graph border
g2d.setPaint(Color.black);
g2d.drawRect(leftMargin, topMargin, (int) graphWidth - 1, (int) graphHeight - 1);
// spring green
g2d.setPaint(new Color(0, 255, 127));
for (int i = 0; i < myOnPeakData.length; i++) {
g2d.setPaint(new Color(0, 255, 127));
Shape rawRatioPoint = new java.awt.geom.Ellipse2D.Double( //
mapX(myOnPeakNormalizedAquireTimes[i]) - 3, mapY(myOnPeakData[i]) - 3, 6, 6);
g2d.draw(rawRatioPoint);
g2d.fill(rawRatioPoint);
if (i > 0) {
// draw line
g2d.setPaint(new Color(0, 255, 127));
g2d.setStroke(new BasicStroke(1.0f));
Line2D line = new Line2D.Double(
mapX(myOnPeakNormalizedAquireTimes[i - 1]),
mapY(myOnPeakData[i - 1]),
mapX(myOnPeakNormalizedAquireTimes[i]),
mapY(myOnPeakData[i]));
g2d.draw(line);
g2d.setPaint(Color.black);
g2d.setStroke(new BasicStroke(0.5f));
Line2D upperEnv = new Line2D.Double(
mapX(myOnPeakNormalizedAquireTimes[i - 1]),
mapY(myOnPeakDataUpperUnct[i - 1]),
mapX(myOnPeakNormalizedAquireTimes[i]),
mapY(myOnPeakDataUpperUnct[i]));
g2d.draw(upperEnv);
Line2D lowerEnv = new Line2D.Double(
mapX(myOnPeakNormalizedAquireTimes[i - 1]),
mapY(myOnPeakDataLowerUnct[i - 1]),
mapX(myOnPeakNormalizedAquireTimes[i]),
mapY(myOnPeakDataLowerUnct[i]));
g2d.draw(lowerEnv);
}
}
g2d.setPaint(Color.black);
for (int i = 0; i < myOnPeakNormalizedAquireTimes.length; i++) {
try {
Shape ticMark = new Line2D.Double(
mapX(myOnPeakNormalizedAquireTimes[i]),
mapY(minY) - 2,
mapX(myOnPeakNormalizedAquireTimes[i]),
mapY(minY) + 4);
g2d.draw(ticMark);
TextLayout mLayout
= new TextLayout(
String.valueOf(myOnPeakNormalizedAquireTimes[i]), g2d.getFont(), g2d.getFontRenderContext());
Rectangle2D bounds = mLayout.getBounds();
g2d.drawString(String.valueOf(myOnPeakNormalizedAquireTimes[i]),
(float) mapX(myOnPeakNormalizedAquireTimes[i]) - (float) (bounds.getWidth() / 2.0f),
(float) mapY(minY) + 15);
} catch (Exception e) {
}
}
Set<Double> yTics = new HashSet<>();
yTics.add(minY);
yTics.add(maxY);
for (int i = 0; i < myOnPeakData.length; i++) {
yTics.add(myOnPeakData[i]);
// yTics.add(myOnPeakDataUpperUnct[i]);
// yTics.add(myOnPeakDataLowerUnct[i]);
}
for (Double tic : yTics) {
try {
Shape ticMark = new Line2D.Double(
mapX(minX) - 4,
mapY(tic),
mapX(minX) + 2,
mapY(tic));
g2d.draw(ticMark);
TextLayout mLayout
= new TextLayout(
String.valueOf(tic), g2d.getFont(), g2d.getFontRenderContext());
Rectangle2D bounds = mLayout.getBounds();
g2d.drawString(String.valueOf(tic),
(float) mapX(minX) - (float) bounds.getWidth() - 5,
(float) mapY(tic) + (float) (bounds.getHeight() / 2.0f));
} catch (Exception e) {
}
}
}
@Override
public void preparePanel(boolean doReset) {
List<SeaWaterDelta234UModelEntry> entryListOrig = model.getEntryList();
// remove -1 entries
List<SeaWaterDelta234UModelEntry> entryList = new ArrayList<>();
for (SeaWaterDelta234UModelEntry swe : entryListOrig) {
if (swe.getAgeInKa() >= 0) {
entryList.add(swe);
}
}
myOnPeakNormalizedAquireTimes = new double[entryList.size()];
myOnPeakData = new double[entryList.size()];
myOnPeakDataUpperUnct = new double[entryList.size()];
myOnPeakDataLowerUnct = new double[entryList.size()];
for (int i = 0; i < myOnPeakNormalizedAquireTimes.length; i++) {
myOnPeakNormalizedAquireTimes[i] = entryList.get(i).ageInKa;
myOnPeakData[i] = entryList.get(i).delta234UPerMil;
myOnPeakDataUpperUnct[i] = entryList.get(i).delta234UPerMil + entryList.get(i).oneSigmaAbsUnct;
myOnPeakDataLowerUnct[i] = entryList.get(i).delta234UPerMil - entryList.get(i).oneSigmaAbsUnct;
}
setDisplayOffsetY(0.0);
setDisplayOffsetX(0.0);
// X-axis lays out time evenly spaced
minX = myOnPeakNormalizedAquireTimes[0];
maxX = myOnPeakNormalizedAquireTimes[myOnPeakNormalizedAquireTimes.length - 1];
double xMarginStretch = TicGeneratorForAxes.generateMarginAdjustment(minX, maxX, 0.05);
minX -= xMarginStretch;
maxX += xMarginStretch;
// Y-axis is ratios
minY = Double.MAX_VALUE;
maxY = -Double.MAX_VALUE;
// find min and max y
for (int i = 0; i < myOnPeakData.length; i++) {
if ((Double.isFinite(myOnPeakData[i]))) {
minY = Math.min(minY, myOnPeakDataLowerUnct[i]);
maxY = Math.max(maxY, myOnPeakDataUpperUnct[i]);
}
}
// adjust margins for unknowns
minY -= 2;
maxY += 2;
// double yMarginStretch = TicGeneratorForAxes.generateMarginAdjustment(minY, maxY, 0.05);
// minY -= yMarginStretch;
// maxY += yMarginStretch;
}
/**
* @param aModel the model to set
*/
public static void setModel(SeaWaterInitialDelta234UTableModel aModel) {
model = aModel;
}
}
| apache-2.0 |
studanshu/datacollector | container/src/main/java/com/streamsets/datacollector/updatechecker/UpdateChecker.java | 6566 | /**
* Copyright 2015 StreamSets Inc.
*
* Licensed under the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.streamsets.datacollector.updatechecker;
import com.google.common.annotations.VisibleForTesting;
import com.streamsets.datacollector.config.PipelineConfiguration;
import com.streamsets.datacollector.config.StageConfiguration;
import com.streamsets.datacollector.execution.PipelineState;
import com.streamsets.datacollector.execution.PipelineStatus;
import com.streamsets.datacollector.execution.Runner;
import com.streamsets.datacollector.json.ObjectMapperFactory;
import com.streamsets.datacollector.main.DataCollectorBuildInfo;
import com.streamsets.datacollector.main.RuntimeInfo;
import com.streamsets.datacollector.store.PipelineStoreException;
import com.streamsets.datacollector.util.Configuration;
import com.streamsets.pipeline.api.impl.Utils;
import org.apache.commons.codec.binary.Base64;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.HttpURLConnection;
import java.net.URL;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
public class UpdateChecker implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(UpdateChecker.class);
public static final String URL_KEY = "streamsets.updatecheck.url";
public static final String URL_DEFAULT = "https://streamsets.com/rest/v1/updatecheck";
static final String APPLICATION_JSON_MIME = "application/json";
private final RuntimeInfo runtimeInfo;
private final Runner runner;
private URL url = null;
private volatile Map updateInfo;
private final PipelineConfiguration pipelineConf;
public UpdateChecker(RuntimeInfo runtimeInfo, Configuration configuration,
PipelineConfiguration pipelineConf, Runner runner) {
this.pipelineConf = pipelineConf;
this.runtimeInfo = runtimeInfo;
this.runner = runner;
String url = configuration.get(URL_KEY, URL_DEFAULT);
try {
this.url = new URL(url);
} catch (Exception ex) {
LOG.trace("Invalid update check URL '{}': {}", url, ex.toString(), ex);
}
}
URL getUrl() {
return url;
}
static String getSha256(String id) {
try {
MessageDigest md = MessageDigest.getInstance("SHA-256");
md.update(id.getBytes("UTF-8"));
return Base64.encodeBase64String(md.digest());
} catch (Exception ex) {
return "<UNKNOWN>";
}
}
@SuppressWarnings("unchecked")
@VisibleForTesting
Map getUploadInfo() {
Map uploadInfo = null;
if (pipelineConf != null) {
List stages = new ArrayList();
// Stats aggregator target stage
Map stage = new LinkedHashMap();
if(pipelineConf.getStatsAggregatorStage() != null) {
stage.put("name", pipelineConf.getStatsAggregatorStage().getStageName());
stage.put("version", pipelineConf.getStatsAggregatorStage().getStageVersion());
stage.put("library", pipelineConf.getStatsAggregatorStage().getLibrary());
stages.add(stage);
}
// error stage
stage = new LinkedHashMap();
stage.put("name", pipelineConf.getErrorStage().getStageName());
stage.put("version", pipelineConf.getErrorStage().getStageVersion());
stage.put("library", pipelineConf.getErrorStage().getLibrary());
stages.add(stage);
// pipeline stages
for (StageConfiguration stageConf : pipelineConf.getStages()) {
stage = new LinkedHashMap();
stage.put("name", stageConf.getStageName());
stage.put("version", stageConf.getStageVersion());
stage.put("library", stageConf.getLibrary());
stages.add(stage);
}
uploadInfo = new LinkedHashMap();
uploadInfo.put("sdc.sha256", getSha256(runner.getToken()));
uploadInfo.put("sdc.buildInfo", new DataCollectorBuildInfo());
uploadInfo.put("sdc.stages", stages);
}
return uploadInfo;
}
@Override
public void run() {
updateInfo = null;
PipelineState ps;
try {
ps = runner.getState();
} catch (PipelineStoreException e) {
LOG.warn(Utils.format("Cannot get pipeline state: '{}'", e.toString()), e);
return;
}
if (ps.getStatus() == PipelineStatus.RUNNING) {
if (url != null) {
Map uploadInfo = getUploadInfo();
if (uploadInfo != null) {
HttpURLConnection conn = null;
try {
conn = (HttpURLConnection) url.openConnection();
conn.setConnectTimeout(2000);
conn.setReadTimeout(2000);
conn.setDoOutput(true);
conn.setDoInput(true);
conn.setRequestProperty("content-type", APPLICATION_JSON_MIME);
ObjectMapperFactory.getOneLine().writeValue(conn.getOutputStream(), uploadInfo);
if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
String responseContentType = conn.getHeaderField("content-type");
if (APPLICATION_JSON_MIME.equals(responseContentType)) {
updateInfo = ObjectMapperFactory.get().readValue(conn.getInputStream(), Map.class);
} else {
LOG.trace("Got invalid content-type '{}' from from update-check server", responseContentType);
}
} else {
LOG.trace("Got '{} : {}' from update-check server", conn.getResponseCode(), conn.getResponseMessage());
}
} catch (Exception ex) {
LOG.trace("Could not do an update check: {}", ex.toString(), ex);
} finally {
if (conn != null) {
conn.disconnect();
}
}
}
}
}
}
public Map getUpdateInfo() {
return updateInfo;
}
}
| apache-2.0 |
wangsongpeng/jdk-src | src/main/java/java/time/format/Parsed.java | 28799 | /*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*/
/*
*
*
*
*
*
* Copyright (c) 2008-2013, Stephen Colebourne & Michael Nascimento Santos
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of JSR-310 nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package java.time.format;
import static java.time.temporal.ChronoField.AMPM_OF_DAY;
import static java.time.temporal.ChronoField.CLOCK_HOUR_OF_AMPM;
import static java.time.temporal.ChronoField.CLOCK_HOUR_OF_DAY;
import static java.time.temporal.ChronoField.HOUR_OF_AMPM;
import static java.time.temporal.ChronoField.HOUR_OF_DAY;
import static java.time.temporal.ChronoField.INSTANT_SECONDS;
import static java.time.temporal.ChronoField.MICRO_OF_DAY;
import static java.time.temporal.ChronoField.MICRO_OF_SECOND;
import static java.time.temporal.ChronoField.MILLI_OF_DAY;
import static java.time.temporal.ChronoField.MILLI_OF_SECOND;
import static java.time.temporal.ChronoField.MINUTE_OF_DAY;
import static java.time.temporal.ChronoField.MINUTE_OF_HOUR;
import static java.time.temporal.ChronoField.NANO_OF_DAY;
import static java.time.temporal.ChronoField.NANO_OF_SECOND;
import static java.time.temporal.ChronoField.OFFSET_SECONDS;
import static java.time.temporal.ChronoField.SECOND_OF_DAY;
import static java.time.temporal.ChronoField.SECOND_OF_MINUTE;
import java.time.DateTimeException;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.Period;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.chrono.ChronoLocalDate;
import java.time.chrono.ChronoLocalDateTime;
import java.time.chrono.ChronoZonedDateTime;
import java.time.chrono.Chronology;
import java.time.temporal.ChronoField;
import java.time.temporal.TemporalAccessor;
import java.time.temporal.TemporalField;
import java.time.temporal.TemporalQueries;
import java.time.temporal.TemporalQuery;
import java.time.temporal.UnsupportedTemporalTypeException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
/**
* A store of parsed data.
* <p>
* This class is used during parsing to collect the data. Part of the parsing process
* involves handling optional blocks and multiple copies of the data get created to
* support the necessary backtracking.
* <p>
* Once parsing is completed, this class can be used as the resultant {@code TemporalAccessor}.
* In most cases, it is only exposed once the fields have been resolved.
*
* @implSpec
* This class is a mutable context intended for use from a single thread.
* Usage of the class is thread-safe within standard parsing as a new instance of this class
* is automatically created for each parse and parsing is single-threaded
*
* @since 1.8
*/
final class Parsed implements TemporalAccessor {
// some fields are accessed using package scope from DateTimeParseContext
/**
* The parsed fields.
*/
final Map<TemporalField, Long> fieldValues = new HashMap<>();
/**
* The parsed zone.
*/
ZoneId zone;
/**
* The parsed chronology.
*/
Chronology chrono;
/**
* Whether a leap-second is parsed.
*/
boolean leapSecond;
/**
* The resolver style to use.
*/
private ResolverStyle resolverStyle;
/**
* The resolved date.
*/
private ChronoLocalDate date;
/**
* The resolved time.
*/
private LocalTime time;
/**
* The excess period from time-only parsing.
*/
Period excessDays = Period.ZERO;
/**
* Creates an instance.
*/
Parsed() {
}
/**
* Creates a copy.
*/
Parsed copy() {
// only copy fields used in parsing stage
Parsed cloned = new Parsed();
cloned.fieldValues.putAll(this.fieldValues);
cloned.zone = this.zone;
cloned.chrono = this.chrono;
cloned.leapSecond = this.leapSecond;
return cloned;
}
//-----------------------------------------------------------------------
@Override
public boolean isSupported(TemporalField field) {
if (fieldValues.containsKey(field) ||
(date != null && date.isSupported(field)) ||
(time != null && time.isSupported(field))) {
return true;
}
return field != null && (field instanceof ChronoField == false) && field.isSupportedBy(this);
}
@Override
public long getLong(TemporalField field) {
Objects.requireNonNull(field, "field");
Long value = fieldValues.get(field);
if (value != null) {
return value;
}
if (date != null && date.isSupported(field)) {
return date.getLong(field);
}
if (time != null && time.isSupported(field)) {
return time.getLong(field);
}
if (field instanceof ChronoField) {
throw new UnsupportedTemporalTypeException("Unsupported field: " + field);
}
return field.getFrom(this);
}
@SuppressWarnings("unchecked")
@Override
public <R> R query(TemporalQuery<R> query) {
if (query == TemporalQueries.zoneId()) {
return (R) zone;
} else if (query == TemporalQueries.chronology()) {
return (R) chrono;
} else if (query == TemporalQueries.localDate()) {
return (R) (date != null ? LocalDate.from(date) : null);
} else if (query == TemporalQueries.localTime()) {
return (R) time;
} else if (query == TemporalQueries.zone() || query == TemporalQueries.offset()) {
return query.queryFrom(this);
} else if (query == TemporalQueries.precision()) {
return null; // not a complete date/time
}
// inline TemporalAccessor.super.query(query) as an optimization
// non-JDK classes are not permitted to make this optimization
return query.queryFrom(this);
}
//-----------------------------------------------------------------------
/**
* Resolves the fields in this context.
*
* @param resolverStyle the resolver style, not null
* @param resolverFields the fields to use for resolving, null for all fields
* @return this, for method chaining
* @throws DateTimeException if resolving one field results in a value for
* another field that is in conflict
*/
TemporalAccessor resolve(ResolverStyle resolverStyle, Set<TemporalField> resolverFields) {
if (resolverFields != null) {
fieldValues.keySet().retainAll(resolverFields);
}
this.resolverStyle = resolverStyle;
resolveFields();
resolveTimeLenient();
crossCheck();
resolvePeriod();
resolveFractional();
resolveInstant();
return this;
}
//-----------------------------------------------------------------------
private void resolveFields() {
// resolve ChronoField
resolveInstantFields();
resolveDateFields();
resolveTimeFields();
// if any other fields, handle them
// any lenient date resolution should return epoch-day
if (fieldValues.size() > 0) {
int changedCount = 0;
outer:
while (changedCount < 50) {
for (Entry<TemporalField, Long> entry : fieldValues.entrySet()) {
TemporalField targetField = entry.getKey();
TemporalAccessor resolvedObject = targetField.resolve(fieldValues, this, resolverStyle);
if (resolvedObject != null) {
if (resolvedObject instanceof ChronoZonedDateTime) {
ChronoZonedDateTime<?> czdt = (ChronoZonedDateTime<?>) resolvedObject;
if (zone == null) {
zone = czdt.getZone();
} else if (zone.equals(czdt.getZone()) == false) {
throw new DateTimeException("ChronoZonedDateTime must use the effective parsed zone: " + zone);
}
resolvedObject = czdt.toLocalDateTime();
}
if (resolvedObject instanceof ChronoLocalDateTime) {
ChronoLocalDateTime<?> cldt = (ChronoLocalDateTime<?>) resolvedObject;
updateCheckConflict(cldt.toLocalTime(), Period.ZERO);
updateCheckConflict(cldt.toLocalDate());
changedCount++;
continue outer; // have to restart to avoid concurrent modification
}
if (resolvedObject instanceof ChronoLocalDate) {
updateCheckConflict((ChronoLocalDate) resolvedObject);
changedCount++;
continue outer; // have to restart to avoid concurrent modification
}
if (resolvedObject instanceof LocalTime) {
updateCheckConflict((LocalTime) resolvedObject, Period.ZERO);
changedCount++;
continue outer; // have to restart to avoid concurrent modification
}
throw new DateTimeException("Method resolve() can only return ChronoZonedDateTime, " +
"ChronoLocalDateTime, ChronoLocalDate or LocalTime");
} else if (fieldValues.containsKey(targetField) == false) {
changedCount++;
continue outer; // have to restart to avoid concurrent modification
}
}
break;
}
if (changedCount == 50) { // catch infinite loops
throw new DateTimeException("One of the parsed fields has an incorrectly implemented resolve method");
}
// if something changed then have to redo ChronoField resolve
if (changedCount > 0) {
resolveInstantFields();
resolveDateFields();
resolveTimeFields();
}
}
}
private void updateCheckConflict(TemporalField targetField, TemporalField changeField, Long changeValue) {
Long old = fieldValues.put(changeField, changeValue);
if (old != null && old.longValue() != changeValue.longValue()) {
throw new DateTimeException("Conflict found: " + changeField + " " + old +
" differs from " + changeField + " " + changeValue +
" while resolving " + targetField);
}
}
//-----------------------------------------------------------------------
private void resolveInstantFields() {
// resolve parsed instant seconds to date and time if zone available
if (fieldValues.containsKey(INSTANT_SECONDS)) {
if (zone != null) {
resolveInstantFields0(zone);
} else {
Long offsetSecs = fieldValues.get(OFFSET_SECONDS);
if (offsetSecs != null) {
ZoneOffset offset = ZoneOffset.ofTotalSeconds(offsetSecs.intValue());
resolveInstantFields0(offset);
}
}
}
}
private void resolveInstantFields0(ZoneId selectedZone) {
Instant instant = Instant.ofEpochSecond(fieldValues.remove(INSTANT_SECONDS));
ChronoZonedDateTime<?> zdt = chrono.zonedDateTime(instant, selectedZone);
updateCheckConflict(zdt.toLocalDate());
updateCheckConflict(INSTANT_SECONDS, SECOND_OF_DAY, (long) zdt.toLocalTime().toSecondOfDay());
}
//-----------------------------------------------------------------------
private void resolveDateFields() {
updateCheckConflict(chrono.resolveDate(fieldValues, resolverStyle));
}
private void updateCheckConflict(ChronoLocalDate cld) {
if (date != null) {
if (cld != null && date.equals(cld) == false) {
throw new DateTimeException("Conflict found: Fields resolved to two different dates: " + date + " " + cld);
}
} else if (cld != null) {
if (chrono.equals(cld.getChronology()) == false) {
throw new DateTimeException("ChronoLocalDate must use the effective parsed chronology: " + chrono);
}
date = cld;
}
}
//-----------------------------------------------------------------------
private void resolveTimeFields() {
// simplify fields
if (fieldValues.containsKey(CLOCK_HOUR_OF_DAY)) {
// lenient allows anything, smart allows 0-24, strict allows 1-24
long ch = fieldValues.remove(CLOCK_HOUR_OF_DAY);
if (resolverStyle == ResolverStyle.STRICT || (resolverStyle == ResolverStyle.SMART && ch != 0)) {
CLOCK_HOUR_OF_DAY.checkValidValue(ch);
}
updateCheckConflict(CLOCK_HOUR_OF_DAY, HOUR_OF_DAY, ch == 24 ? 0 : ch);
}
if (fieldValues.containsKey(CLOCK_HOUR_OF_AMPM)) {
// lenient allows anything, smart allows 0-12, strict allows 1-12
long ch = fieldValues.remove(CLOCK_HOUR_OF_AMPM);
if (resolverStyle == ResolverStyle.STRICT || (resolverStyle == ResolverStyle.SMART && ch != 0)) {
CLOCK_HOUR_OF_AMPM.checkValidValue(ch);
}
updateCheckConflict(CLOCK_HOUR_OF_AMPM, HOUR_OF_AMPM, ch == 12 ? 0 : ch);
}
if (fieldValues.containsKey(AMPM_OF_DAY) && fieldValues.containsKey(HOUR_OF_AMPM)) {
long ap = fieldValues.remove(AMPM_OF_DAY);
long hap = fieldValues.remove(HOUR_OF_AMPM);
if (resolverStyle == ResolverStyle.LENIENT) {
updateCheckConflict(AMPM_OF_DAY, HOUR_OF_DAY, Math.addExact(Math.multiplyExact(ap, 12), hap));
} else { // STRICT or SMART
AMPM_OF_DAY.checkValidValue(ap);
HOUR_OF_AMPM.checkValidValue(ap);
updateCheckConflict(AMPM_OF_DAY, HOUR_OF_DAY, ap * 12 + hap);
}
}
if (fieldValues.containsKey(NANO_OF_DAY)) {
long nod = fieldValues.remove(NANO_OF_DAY);
if (resolverStyle != ResolverStyle.LENIENT) {
NANO_OF_DAY.checkValidValue(nod);
}
updateCheckConflict(NANO_OF_DAY, HOUR_OF_DAY, nod / 3600_000_000_000L);
updateCheckConflict(NANO_OF_DAY, MINUTE_OF_HOUR, (nod / 60_000_000_000L) % 60);
updateCheckConflict(NANO_OF_DAY, SECOND_OF_MINUTE, (nod / 1_000_000_000L) % 60);
updateCheckConflict(NANO_OF_DAY, NANO_OF_SECOND, nod % 1_000_000_000L);
}
if (fieldValues.containsKey(MICRO_OF_DAY)) {
long cod = fieldValues.remove(MICRO_OF_DAY);
if (resolverStyle != ResolverStyle.LENIENT) {
MICRO_OF_DAY.checkValidValue(cod);
}
updateCheckConflict(MICRO_OF_DAY, SECOND_OF_DAY, cod / 1_000_000L);
updateCheckConflict(MICRO_OF_DAY, MICRO_OF_SECOND, cod % 1_000_000L);
}
if (fieldValues.containsKey(MILLI_OF_DAY)) {
long lod = fieldValues.remove(MILLI_OF_DAY);
if (resolverStyle != ResolverStyle.LENIENT) {
MILLI_OF_DAY.checkValidValue(lod);
}
updateCheckConflict(MILLI_OF_DAY, SECOND_OF_DAY, lod / 1_000);
updateCheckConflict(MILLI_OF_DAY, MILLI_OF_SECOND, lod % 1_000);
}
if (fieldValues.containsKey(SECOND_OF_DAY)) {
long sod = fieldValues.remove(SECOND_OF_DAY);
if (resolverStyle != ResolverStyle.LENIENT) {
SECOND_OF_DAY.checkValidValue(sod);
}
updateCheckConflict(SECOND_OF_DAY, HOUR_OF_DAY, sod / 3600);
updateCheckConflict(SECOND_OF_DAY, MINUTE_OF_HOUR, (sod / 60) % 60);
updateCheckConflict(SECOND_OF_DAY, SECOND_OF_MINUTE, sod % 60);
}
if (fieldValues.containsKey(MINUTE_OF_DAY)) {
long mod = fieldValues.remove(MINUTE_OF_DAY);
if (resolverStyle != ResolverStyle.LENIENT) {
MINUTE_OF_DAY.checkValidValue(mod);
}
updateCheckConflict(MINUTE_OF_DAY, HOUR_OF_DAY, mod / 60);
updateCheckConflict(MINUTE_OF_DAY, MINUTE_OF_HOUR, mod % 60);
}
// combine partial second fields strictly, leaving lenient expansion to later
if (fieldValues.containsKey(NANO_OF_SECOND)) {
long nos = fieldValues.get(NANO_OF_SECOND);
if (resolverStyle != ResolverStyle.LENIENT) {
NANO_OF_SECOND.checkValidValue(nos);
}
if (fieldValues.containsKey(MICRO_OF_SECOND)) {
long cos = fieldValues.remove(MICRO_OF_SECOND);
if (resolverStyle != ResolverStyle.LENIENT) {
MICRO_OF_SECOND.checkValidValue(cos);
}
nos = cos * 1000 + (nos % 1000);
updateCheckConflict(MICRO_OF_SECOND, NANO_OF_SECOND, nos);
}
if (fieldValues.containsKey(MILLI_OF_SECOND)) {
long los = fieldValues.remove(MILLI_OF_SECOND);
if (resolverStyle != ResolverStyle.LENIENT) {
MILLI_OF_SECOND.checkValidValue(los);
}
updateCheckConflict(MILLI_OF_SECOND, NANO_OF_SECOND, los * 1_000_000L + (nos % 1_000_000L));
}
}
// convert to time if all four fields available (optimization)
if (fieldValues.containsKey(HOUR_OF_DAY) && fieldValues.containsKey(MINUTE_OF_HOUR) &&
fieldValues.containsKey(SECOND_OF_MINUTE) && fieldValues.containsKey(NANO_OF_SECOND)) {
long hod = fieldValues.remove(HOUR_OF_DAY);
long moh = fieldValues.remove(MINUTE_OF_HOUR);
long som = fieldValues.remove(SECOND_OF_MINUTE);
long nos = fieldValues.remove(NANO_OF_SECOND);
resolveTime(hod, moh, som, nos);
}
}
private void resolveTimeLenient() {
// leniently create a time from incomplete information
// done after everything else as it creates information from nothing
// which would break updateCheckConflict(field)
if (time == null) {
// NANO_OF_SECOND merged with MILLI/MICRO above
if (fieldValues.containsKey(MILLI_OF_SECOND)) {
long los = fieldValues.remove(MILLI_OF_SECOND);
if (fieldValues.containsKey(MICRO_OF_SECOND)) {
// merge milli-of-second and micro-of-second for better error message
long cos = los * 1_000 + (fieldValues.get(MICRO_OF_SECOND) % 1_000);
updateCheckConflict(MILLI_OF_SECOND, MICRO_OF_SECOND, cos);
fieldValues.remove(MICRO_OF_SECOND);
fieldValues.put(NANO_OF_SECOND, cos * 1_000L);
} else {
// convert milli-of-second to nano-of-second
fieldValues.put(NANO_OF_SECOND, los * 1_000_000L);
}
} else if (fieldValues.containsKey(MICRO_OF_SECOND)) {
// convert micro-of-second to nano-of-second
long cos = fieldValues.remove(MICRO_OF_SECOND);
fieldValues.put(NANO_OF_SECOND, cos * 1_000L);
}
// merge hour/minute/second/nano leniently
Long hod = fieldValues.get(HOUR_OF_DAY);
if (hod != null) {
Long moh = fieldValues.get(MINUTE_OF_HOUR);
Long som = fieldValues.get(SECOND_OF_MINUTE);
Long nos = fieldValues.get(NANO_OF_SECOND);
// check for invalid combinations that cannot be defaulted
if ((moh == null && (som != null || nos != null)) ||
(moh != null && som == null && nos != null)) {
return;
}
// default as necessary and build time
long mohVal = (moh != null ? moh : 0);
long somVal = (som != null ? som : 0);
long nosVal = (nos != null ? nos : 0);
resolveTime(hod, mohVal, somVal, nosVal);
fieldValues.remove(HOUR_OF_DAY);
fieldValues.remove(MINUTE_OF_HOUR);
fieldValues.remove(SECOND_OF_MINUTE);
fieldValues.remove(NANO_OF_SECOND);
}
}
// validate remaining
if (resolverStyle != ResolverStyle.LENIENT && fieldValues.size() > 0) {
for (Entry<TemporalField, Long> entry : fieldValues.entrySet()) {
TemporalField field = entry.getKey();
if (field instanceof ChronoField && field.isTimeBased()) {
((ChronoField) field).checkValidValue(entry.getValue());
}
}
}
}
private void resolveTime(long hod, long moh, long som, long nos) {
if (resolverStyle == ResolverStyle.LENIENT) {
long totalNanos = Math.multiplyExact(hod, 3600_000_000_000L);
totalNanos = Math.addExact(totalNanos, Math.multiplyExact(moh, 60_000_000_000L));
totalNanos = Math.addExact(totalNanos, Math.multiplyExact(som, 1_000_000_000L));
totalNanos = Math.addExact(totalNanos, nos);
int excessDays = (int) Math.floorDiv(totalNanos, 86400_000_000_000L); // safe int cast
long nod = Math.floorMod(totalNanos, 86400_000_000_000L);
updateCheckConflict(LocalTime.ofNanoOfDay(nod), Period.ofDays(excessDays));
} else { // STRICT or SMART
int mohVal = MINUTE_OF_HOUR.checkValidIntValue(moh);
int nosVal = NANO_OF_SECOND.checkValidIntValue(nos);
// handle 24:00 end of day
if (resolverStyle == ResolverStyle.SMART && hod == 24 && mohVal == 0 && som == 0 && nosVal == 0) {
updateCheckConflict(LocalTime.MIDNIGHT, Period.ofDays(1));
} else {
int hodVal = HOUR_OF_DAY.checkValidIntValue(hod);
int somVal = SECOND_OF_MINUTE.checkValidIntValue(som);
updateCheckConflict(LocalTime.of(hodVal, mohVal, somVal, nosVal), Period.ZERO);
}
}
}
private void resolvePeriod() {
// add whole days if we have both date and time
if (date != null && time != null && excessDays.isZero() == false) {
date = date.plus(excessDays);
excessDays = Period.ZERO;
}
}
private void resolveFractional() {
// ensure fractional seconds available as ChronoField requires
// resolveTimeLenient() will have merged MICRO_OF_SECOND/MILLI_OF_SECOND to NANO_OF_SECOND
if (time == null &&
(fieldValues.containsKey(INSTANT_SECONDS) ||
fieldValues.containsKey(SECOND_OF_DAY) ||
fieldValues.containsKey(SECOND_OF_MINUTE))) {
if (fieldValues.containsKey(NANO_OF_SECOND)) {
long nos = fieldValues.get(NANO_OF_SECOND);
fieldValues.put(MICRO_OF_SECOND, nos / 1000);
fieldValues.put(MILLI_OF_SECOND, nos / 1000000);
} else {
fieldValues.put(NANO_OF_SECOND, 0L);
fieldValues.put(MICRO_OF_SECOND, 0L);
fieldValues.put(MILLI_OF_SECOND, 0L);
}
}
}
private void resolveInstant() {
// add instant seconds if we have date, time and zone
if (date != null && time != null) {
if (zone != null) {
long instant = date.atTime(time).atZone(zone).getLong(ChronoField.INSTANT_SECONDS);
fieldValues.put(INSTANT_SECONDS, instant);
} else {
Long offsetSecs = fieldValues.get(OFFSET_SECONDS);
if (offsetSecs != null) {
ZoneOffset offset = ZoneOffset.ofTotalSeconds(offsetSecs.intValue());
long instant = date.atTime(time).atZone(offset).getLong(ChronoField.INSTANT_SECONDS);
fieldValues.put(INSTANT_SECONDS, instant);
}
}
}
}
private void updateCheckConflict(LocalTime timeToSet, Period periodToSet) {
if (time != null) {
if (time.equals(timeToSet) == false) {
throw new DateTimeException("Conflict found: Fields resolved to different times: " + time + " " + timeToSet);
}
if (excessDays.isZero() == false && periodToSet.isZero() == false && excessDays.equals(periodToSet) == false) {
throw new DateTimeException("Conflict found: Fields resolved to different excess periods: " + excessDays + " " + periodToSet);
} else {
excessDays = periodToSet;
}
} else {
time = timeToSet;
excessDays = periodToSet;
}
}
//-----------------------------------------------------------------------
private void crossCheck() {
// only cross-check date, time and date-time
// avoid object creation if possible
if (date != null) {
crossCheck(date);
}
if (time != null) {
crossCheck(time);
if (date != null && fieldValues.size() > 0) {
crossCheck(date.atTime(time));
}
}
}
private void crossCheck(TemporalAccessor target) {
for (Iterator<Entry<TemporalField, Long>> it = fieldValues.entrySet().iterator(); it.hasNext(); ) {
Entry<TemporalField, Long> entry = it.next();
TemporalField field = entry.getKey();
if (target.isSupported(field)) {
long val1;
try {
val1 = target.getLong(field);
} catch (RuntimeException ex) {
continue;
}
long val2 = entry.getValue();
if (val1 != val2) {
throw new DateTimeException("Conflict found: Field " + field + " " + val1 +
" differs from " + field + " " + val2 + " derived from " + target);
}
it.remove();
}
}
}
//-----------------------------------------------------------------------
@Override
public String toString() {
StringBuilder buf = new StringBuilder(64);
buf.append(fieldValues).append(',').append(chrono);
if (zone != null) {
buf.append(',').append(zone);
}
if (date != null || time != null) {
buf.append(" resolved to ");
if (date != null) {
buf.append(date);
if (time != null) {
buf.append('T').append(time);
}
} else {
buf.append(time);
}
}
return buf.toString();
}
}
| apache-2.0 |
baishuo/hadoop-2.6.0-cdh5.4.7_baishuo | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java | 20886 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
import java.util.EnumSet;
import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.lang.time.DateUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerRescheduledEvent;
import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
import org.apache.hadoop.yarn.state.MultipleArcTransition;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@SuppressWarnings({"unchecked", "rawtypes"})
public class RMContainerImpl implements RMContainer {
private static final Log LOG = LogFactory.getLog(RMContainerImpl.class);
private static final StateMachineFactory<RMContainerImpl, RMContainerState,
RMContainerEventType, RMContainerEvent>
stateMachineFactory = new StateMachineFactory<RMContainerImpl,
RMContainerState, RMContainerEventType, RMContainerEvent>(
RMContainerState.NEW)
// Transitions from NEW state
.addTransition(RMContainerState.NEW, RMContainerState.ALLOCATED,
RMContainerEventType.START, new ContainerStartedTransition())
.addTransition(RMContainerState.NEW, RMContainerState.KILLED,
RMContainerEventType.KILL)
.addTransition(RMContainerState.NEW, RMContainerState.RESERVED,
RMContainerEventType.RESERVED, new ContainerReservedTransition())
.addTransition(RMContainerState.NEW,
EnumSet.of(RMContainerState.RUNNING, RMContainerState.COMPLETED),
RMContainerEventType.RECOVER, new ContainerRecoveredTransition())
// Transitions from RESERVED state
.addTransition(RMContainerState.RESERVED, RMContainerState.RESERVED,
RMContainerEventType.RESERVED, new ContainerReservedTransition())
.addTransition(RMContainerState.RESERVED, RMContainerState.ALLOCATED,
RMContainerEventType.START, new ContainerStartedTransition())
.addTransition(RMContainerState.RESERVED, RMContainerState.KILLED,
RMContainerEventType.KILL) // nothing to do
.addTransition(RMContainerState.RESERVED, RMContainerState.RELEASED,
RMContainerEventType.RELEASED) // nothing to do
// Transitions from ALLOCATED state
.addTransition(RMContainerState.ALLOCATED, RMContainerState.ACQUIRED,
RMContainerEventType.ACQUIRED, new AcquiredTransition())
.addTransition(RMContainerState.ALLOCATED, RMContainerState.EXPIRED,
RMContainerEventType.EXPIRE, new FinishedTransition())
.addTransition(RMContainerState.ALLOCATED, RMContainerState.KILLED,
RMContainerEventType.KILL, new ContainerRescheduledTransition())
// Transitions from ACQUIRED state
.addTransition(RMContainerState.ACQUIRED, RMContainerState.RUNNING,
RMContainerEventType.LAUNCHED, new LaunchedTransition())
.addTransition(RMContainerState.ACQUIRED, RMContainerState.COMPLETED,
RMContainerEventType.FINISHED, new ContainerFinishedAtAcquiredState())
.addTransition(RMContainerState.ACQUIRED, RMContainerState.RELEASED,
RMContainerEventType.RELEASED, new KillTransition())
.addTransition(RMContainerState.ACQUIRED, RMContainerState.EXPIRED,
RMContainerEventType.EXPIRE, new KillTransition())
.addTransition(RMContainerState.ACQUIRED, RMContainerState.KILLED,
RMContainerEventType.KILL, new KillTransition())
// Transitions from RUNNING state
.addTransition(RMContainerState.RUNNING, RMContainerState.COMPLETED,
RMContainerEventType.FINISHED, new FinishedTransition())
.addTransition(RMContainerState.RUNNING, RMContainerState.KILLED,
RMContainerEventType.KILL, new KillTransition())
.addTransition(RMContainerState.RUNNING, RMContainerState.RELEASED,
RMContainerEventType.RELEASED, new KillTransition())
.addTransition(RMContainerState.RUNNING, RMContainerState.RUNNING,
RMContainerEventType.EXPIRE)
// Transitions from COMPLETED state
.addTransition(RMContainerState.COMPLETED, RMContainerState.COMPLETED,
EnumSet.of(RMContainerEventType.EXPIRE, RMContainerEventType.RELEASED,
RMContainerEventType.KILL))
// Transitions from EXPIRED state
.addTransition(RMContainerState.EXPIRED, RMContainerState.EXPIRED,
EnumSet.of(RMContainerEventType.RELEASED, RMContainerEventType.KILL))
// Transitions from RELEASED state
.addTransition(RMContainerState.RELEASED, RMContainerState.RELEASED,
EnumSet.of(RMContainerEventType.EXPIRE, RMContainerEventType.RELEASED,
RMContainerEventType.KILL, RMContainerEventType.FINISHED))
// Transitions from KILLED state
.addTransition(RMContainerState.KILLED, RMContainerState.KILLED,
EnumSet.of(RMContainerEventType.EXPIRE, RMContainerEventType.RELEASED,
RMContainerEventType.KILL, RMContainerEventType.FINISHED))
// create the topology tables
.installTopology();
private final StateMachine<RMContainerState, RMContainerEventType,
RMContainerEvent> stateMachine;
private final ReadLock readLock;
private final WriteLock writeLock;
private final ContainerId containerId;
private final ApplicationAttemptId appAttemptId;
private final NodeId nodeId;
private final Container container;
private final RMContext rmContext;
private final EventHandler eventHandler;
private final ContainerAllocationExpirer containerAllocationExpirer;
private final String user;
private Resource reservedResource;
private NodeId reservedNode;
private Priority reservedPriority;
private long creationTime;
private long finishTime;
private ContainerStatus finishedStatus;
private boolean isAMContainer;
private List<ResourceRequest> resourceRequests;
public RMContainerImpl(Container container,
ApplicationAttemptId appAttemptId, NodeId nodeId, String user,
RMContext rmContext) {
this(container, appAttemptId, nodeId, user, rmContext, System
.currentTimeMillis());
}
public RMContainerImpl(Container container,
ApplicationAttemptId appAttemptId, NodeId nodeId,
String user, RMContext rmContext, long creationTime) {
this.stateMachine = stateMachineFactory.make(this);
this.containerId = container.getId();
this.nodeId = nodeId;
this.container = container;
this.appAttemptId = appAttemptId;
this.user = user;
this.creationTime = creationTime;
this.rmContext = rmContext;
this.eventHandler = rmContext.getDispatcher().getEventHandler();
this.containerAllocationExpirer = rmContext.getContainerAllocationExpirer();
this.isAMContainer = false;
this.resourceRequests = null;
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
this.readLock = lock.readLock();
this.writeLock = lock.writeLock();
rmContext.getRMApplicationHistoryWriter().containerStarted(this);
rmContext.getSystemMetricsPublisher().containerCreated(
this, this.creationTime);
}
@Override
public ContainerId getContainerId() {
return this.containerId;
}
@Override
public ApplicationAttemptId getApplicationAttemptId() {
return this.appAttemptId;
}
@Override
public Container getContainer() {
return this.container;
}
@Override
public RMContainerState getState() {
this.readLock.lock();
try {
return this.stateMachine.getCurrentState();
} finally {
this.readLock.unlock();
}
}
@Override
public Resource getReservedResource() {
return reservedResource;
}
@Override
public NodeId getReservedNode() {
return reservedNode;
}
@Override
public Priority getReservedPriority() {
return reservedPriority;
}
@Override
public Resource getAllocatedResource() {
return container.getResource();
}
@Override
public NodeId getAllocatedNode() {
return container.getNodeId();
}
@Override
public Priority getAllocatedPriority() {
return container.getPriority();
}
@Override
public long getCreationTime() {
return creationTime;
}
@Override
public long getFinishTime() {
try {
readLock.lock();
return finishTime;
} finally {
readLock.unlock();
}
}
@Override
public String getDiagnosticsInfo() {
try {
readLock.lock();
if (getFinishedStatus() != null) {
return getFinishedStatus().getDiagnostics();
} else {
return null;
}
} finally {
readLock.unlock();
}
}
@Override
public String getLogURL() {
try {
readLock.lock();
return WebAppUtils.getRunningLogURL("//" + container.getNodeHttpAddress(),
ConverterUtils.toString(containerId), user);
} finally {
readLock.unlock();
}
}
@Override
public int getContainerExitStatus() {
try {
readLock.lock();
if (getFinishedStatus() != null) {
return getFinishedStatus().getExitStatus();
} else {
return 0;
}
} finally {
readLock.unlock();
}
}
@Override
public ContainerState getContainerState() {
try {
readLock.lock();
if (getFinishedStatus() != null) {
return getFinishedStatus().getState();
} else {
return ContainerState.RUNNING;
}
} finally {
readLock.unlock();
}
}
@Override
public List<ResourceRequest> getResourceRequests() {
try {
readLock.lock();
return resourceRequests;
} finally {
readLock.unlock();
}
}
public void setResourceRequests(List<ResourceRequest> requests) {
try {
writeLock.lock();
this.resourceRequests = requests;
} finally {
writeLock.unlock();
}
}
@Override
public String toString() {
return containerId.toString();
}
@Override
public boolean isAMContainer() {
try {
readLock.lock();
return isAMContainer;
} finally {
readLock.unlock();
}
}
public void setAMContainer(boolean isAMContainer) {
try {
writeLock.lock();
this.isAMContainer = isAMContainer;
} finally {
writeLock.unlock();
}
}
@Override
public void handle(RMContainerEvent event) {
LOG.debug("Processing " + event.getContainerId() + " of type " + event.getType());
try {
writeLock.lock();
RMContainerState oldState = getState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitonException e) {
LOG.error("Can't handle this event at current state", e);
LOG.error("Invalid event " + event.getType() +
" on container " + this.containerId);
}
if (oldState != getState()) {
LOG.info(event.getContainerId() + " Container Transitioned from "
+ oldState + " to " + getState());
}
}
finally {
writeLock.unlock();
}
}
public ContainerStatus getFinishedStatus() {
return finishedStatus;
}
private static class BaseTransition implements
SingleArcTransition<RMContainerImpl, RMContainerEvent> {
@Override
public void transition(RMContainerImpl cont, RMContainerEvent event) {
}
}
private static final class ContainerRecoveredTransition
implements
MultipleArcTransition<RMContainerImpl, RMContainerEvent, RMContainerState> {
@Override
public RMContainerState transition(RMContainerImpl container,
RMContainerEvent event) {
NMContainerStatus report =
((RMContainerRecoverEvent) event).getContainerReport();
if (report.getContainerState().equals(ContainerState.COMPLETE)) {
ContainerStatus status =
ContainerStatus.newInstance(report.getContainerId(),
report.getContainerState(), report.getDiagnostics(),
report.getContainerExitStatus());
new FinishedTransition().transition(container,
new RMContainerFinishedEvent(container.containerId, status,
RMContainerEventType.FINISHED));
return RMContainerState.COMPLETED;
} else if (report.getContainerState().equals(ContainerState.RUNNING)) {
// Tell the app
container.eventHandler.handle(new RMAppRunningOnNodeEvent(container
.getApplicationAttemptId().getApplicationId(), container.nodeId));
return RMContainerState.RUNNING;
} else {
// This can never happen.
LOG.warn("RMContainer received unexpected recover event with container"
+ " state " + report.getContainerState() + " while recovering.");
return RMContainerState.RUNNING;
}
}
}
private static final class ContainerReservedTransition extends
BaseTransition {
@Override
public void transition(RMContainerImpl container, RMContainerEvent event) {
RMContainerReservedEvent e = (RMContainerReservedEvent)event;
container.reservedResource = e.getReservedResource();
container.reservedNode = e.getReservedNode();
container.reservedPriority = e.getReservedPriority();
}
}
private static final class ContainerStartedTransition extends
BaseTransition {
@Override
public void transition(RMContainerImpl container, RMContainerEvent event) {
container.eventHandler.handle(new RMAppAttemptContainerAllocatedEvent(
container.appAttemptId));
}
}
private static final class AcquiredTransition extends BaseTransition {
@Override
public void transition(RMContainerImpl container, RMContainerEvent event) {
// Clear ResourceRequest stored in RMContainer
container.setResourceRequests(null);
// Register with containerAllocationExpirer.
container.containerAllocationExpirer.register(container.getContainerId());
// Tell the app
container.eventHandler.handle(new RMAppRunningOnNodeEvent(container
.getApplicationAttemptId().getApplicationId(), container.nodeId));
}
}
private static final class LaunchedTransition extends BaseTransition {
@Override
public void transition(RMContainerImpl container, RMContainerEvent event) {
// Unregister from containerAllocationExpirer.
container.containerAllocationExpirer.unregister(container
.getContainerId());
}
}
private static final class ContainerRescheduledTransition extends
FinishedTransition {
@Override
public void transition(RMContainerImpl container, RMContainerEvent event) {
// Tell scheduler to recover request of this container to app
container.eventHandler.handle(new ContainerRescheduledEvent(container));
super.transition(container, event);
}
}
private static class FinishedTransition extends BaseTransition {
@Override
public void transition(RMContainerImpl container, RMContainerEvent event) {
RMContainerFinishedEvent finishedEvent = (RMContainerFinishedEvent) event;
container.finishTime = System.currentTimeMillis();
container.finishedStatus = finishedEvent.getRemoteContainerStatus();
// Inform AppAttempt
// container.getContainer() can return null when a RMContainer is a
// reserved container
updateAttemptMetrics(container);
container.eventHandler.handle(new RMAppAttemptContainerFinishedEvent(
container.appAttemptId, finishedEvent.getRemoteContainerStatus(),
container.getAllocatedNode()));
container.rmContext.getRMApplicationHistoryWriter().containerFinished(
container);
container.rmContext.getSystemMetricsPublisher().containerFinished(
container, container.finishTime);
}
private static void updateAttemptMetrics(RMContainerImpl container) {
// If this is a preempted container, update preemption metrics
Resource resource = container.getContainer().getResource();
RMAppAttempt rmAttempt = container.rmContext.getRMApps()
.get(container.getApplicationAttemptId().getApplicationId())
.getCurrentAppAttempt();
if (ContainerExitStatus.PREEMPTED == container.finishedStatus
.getExitStatus()) {
rmAttempt.getRMAppAttemptMetrics().updatePreemptionInfo(resource,
container);
}
if (rmAttempt != null) {
long usedMillis = container.finishTime - container.creationTime;
long memorySeconds = resource.getMemory()
* usedMillis / DateUtils.MILLIS_PER_SECOND;
long vcoreSeconds = resource.getVirtualCores()
* usedMillis / DateUtils.MILLIS_PER_SECOND;
rmAttempt.getRMAppAttemptMetrics()
.updateAggregateAppResourceUsage(memorySeconds,vcoreSeconds);
}
}
}
private static final class ContainerFinishedAtAcquiredState extends
FinishedTransition {
@Override
public void transition(RMContainerImpl container, RMContainerEvent event) {
// Unregister from containerAllocationExpirer.
container.containerAllocationExpirer.unregister(container
.getContainerId());
// Inform AppAttempt
super.transition(container, event);
}
}
private static final class KillTransition extends FinishedTransition {
@Override
public void transition(RMContainerImpl container, RMContainerEvent event) {
// Unregister from containerAllocationExpirer.
container.containerAllocationExpirer.unregister(container
.getContainerId());
// Inform node
container.eventHandler.handle(new RMNodeCleanContainerEvent(
container.nodeId, container.containerId));
// Inform appAttempt
super.transition(container, event);
}
}
@Override
public ContainerReport createContainerReport() {
this.readLock.lock();
ContainerReport containerReport = null;
try {
containerReport = ContainerReport.newInstance(this.getContainerId(),
this.getAllocatedResource(), this.getAllocatedNode(),
this.getAllocatedPriority(), this.getCreationTime(),
this.getFinishTime(), this.getDiagnosticsInfo(), this.getLogURL(),
this.getContainerExitStatus(), this.getContainerState());
} finally {
this.readLock.unlock();
}
return containerReport;
}
}
| apache-2.0 |