repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
krasa/DevSupportApp
src/main/java/krasa/release/service/TokenizationFacade.java
4750
package krasa.release.service; import java.io.File; import java.util.Date; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.google.gson.Gson; import krasa.build.backend.domain.Status; import krasa.build.backend.facade.EventService; import krasa.build.backend.facade.UsernameException; import krasa.core.backend.config.MainConfig; import krasa.core.backend.dao.GenericDAO; import krasa.core.backend.dao.GenericDaoBuilder; import krasa.core.frontend.pages.FileSystemLogUtils; import krasa.core.frontend.web.CookieUtils; import krasa.release.domain.TokenizationJob; import krasa.release.domain.TokenizationPageModel; import krasa.release.tokenization.TokenizationJobParameters; import krasa.release.tokenization.TokenizationJobProcess; import krasa.release.tokenization.TokenizationResult; import krasa.svn.backend.domain.Repository; import krasa.svn.backend.facade.SvnFacade; /** * @author Vojtech Krasa */ @Service @Transactional(value = MainConfig.HSQLDB_TX_MANAGER) public class TokenizationFacade { private static final Logger log = LoggerFactory.getLogger(TokenizationFacade.class); protected GenericDAO<TokenizationJob> tokenizationJobGenericDAO; @Value("${tempDir}") String tempDir; @Value("${tokenization.commit}") Boolean commit; @Autowired SvnFacade facade; @Autowired TokenizationExecutor tokenizationExecutor; @Autowired EventService eventService; @Autowired public void setGenericDAO(GenericDaoBuilder genericDAO) { tokenizationJobGenericDAO = genericDAO.build(TokenizationJob.class); } public TokenizationResult tokenizeSynchronously(TokenizationPageModel json) throws UsernameException { TokenizationJob tokenizationJob = createJob(json); TokenizationJobProcess jobCommand = tokenizationJob.prepareProcess(new File(tempDir), commit); String logName = jobCommand.getLogName(); try { tokenizationJob.setLogName(logName); tokenizationJob.setStatus(Status.RUNNING); save(tokenizationJob); jobCommand.run(); tokenizationJob.setStatus(Status.SUCCESS); tokenizationJob.setEnd(new Date()); save(tokenizationJob); } catch (Throwable e) { log.error(String.valueOf(e.getMessage()), e); tokenizationJob.setStatus(Status.EXCEPTION); tokenizationJob.setEnd(new Date()); save(tokenizationJob); } File logFileByName = FileSystemLogUtils.getLogFileByName(logName); return new TokenizationResult(logFileByName, tokenizationJob.getStatus()); } private TokenizationJob save(TokenizationJob tokenizationJob) { return tokenizationJobGenericDAO.save(tokenizationJob); } public File tokenizeAsync(TokenizationPageModel json) throws UsernameException { TokenizationJob tokenizationJob = createJob(json); TokenizationJobProcess jobCommand = tokenizationJob.prepareProcess(new File(tempDir), commit); String logName = jobCommand.getLogName(); tokenizationJob.setLogName(logName); tokenizationJob.setStatus(Status.PENDING); save(tokenizationJob); tokenizationExecutor.schedule(tokenizationJob); return FileSystemLogUtils.getLogFileByName(logName); } protected TokenizationJob createJob(TokenizationPageModel json) throws UsernameException { String svnUrl = getSvnUrl(); TokenizationJobParameters jobParameters = new Gson().fromJson(json.getJson(), TokenizationJobParameters.class); TokenizationJob tokenizationJobCommand = new TokenizationJob(jobParameters, svnUrl, json.getBranchesPatterns(), CookieUtils.getValidUsername(), json.getCommitMessage()); save(tokenizationJobCommand); return tokenizationJobCommand; } public List<TokenizationJob> getJobs() { return tokenizationJobGenericDAO.findLast(10); } private String getSvnUrl() { Repository defaultRepository = facade.getGlobalSettings().getDefaultRepository(); return defaultRepository.getUrl(); } public void update(TokenizationJob tokenizationJob) { save(tokenizationJob); eventService.sendEvent(new TokenizationEvent()); } // protected TokenizationJobCommand prepareJob2(String branchName, Integer fromVersion, Integer toVersion, String // json) { // Repository defaultRepository = facade.getGlobalSettings().getDefaultRepository(); // final String svnUrl = defaultRepository.getUrl(); // final TokenizationJobParameters jobParameters = new Gson().fromJson(json, TokenizationJobParameters.class); // jobParameters.generatePlaceholdersReplacements(fromVersion, toVersion); // // return new TokenizationJobCommand(jobParameters, svnUrl, new File(tempDir), branchName); // } }
apache-2.0
alexmao86/swing-rcp
src/main/java/net/sf/anpr/rcp/widget/OpenPerspectiveDialog.java
6161
package net.sf.anpr.rcp.widget; import java.awt.BorderLayout; import java.awt.FlowLayout; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.image.BufferedImage; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.List; import javax.imageio.ImageIO; import javax.swing.DefaultListModel; import javax.swing.JButton; import javax.swing.JDialog; import javax.swing.JList; import javax.swing.JOptionPane; import javax.swing.JPanel; import javax.swing.JScrollPane; import javax.swing.JSplitPane; import javax.swing.JTextArea; import javax.swing.ListSelectionModel; import javax.swing.SwingUtilities; import javax.swing.event.ListSelectionEvent; import javax.swing.event.ListSelectionListener; import org.apache.commons.codec.binary.Base64; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import net.sf.anpr.rcp.Platform; import net.sf.anpr.rcp.model.PerspectiveBean; import net.sf.anpr.rcp.service.Services; import net.sf.anpr.rcp.util.SwingResourceManager; import net.sf.anpr.rcp.util.SwingUtil; public class OpenPerspectiveDialog extends JDialog{ /** * */ private static final long serialVersionUID = 1L; private final static Logger LOGGER=LoggerFactory.getLogger(OpenPerspectiveDialog.class); private DefaultListModel model; private JButton okButton; private JButton cancelButton; private JList perspectiveJList; private JTextArea descLabel; private JButton deleteButton; private JImagePanel imagePanel; public OpenPerspectiveDialog() { super(Platform.current().getApplicationWindow()); this.setTitle("Open perspective"); setDefaultCloseOperation(JDialog.DISPOSE_ON_CLOSE); this.setIconImage(SwingResourceManager.getImage(OpenPerspectiveDialog.class, "perspective.png")); this.setResizable(true); setSize(800, 600); setModal(true); getContentPane().setLayout(new BorderLayout(0, 0)); JSplitPane splitPane = new JSplitPane(); getContentPane().add(splitPane, BorderLayout.CENTER); splitPane.setDividerLocation(0.3); JScrollPane scrollPane = new JScrollPane(); splitPane.setLeftComponent(scrollPane); model=new DefaultListModel(); perspectiveJList = new JList(model); perspectiveJList.setSelectionMode( ListSelectionModel.SINGLE_SELECTION); scrollPane.setViewportView(perspectiveJList); JPanel panel = new JPanel(); splitPane.setRightComponent(panel); panel.setLayout(new BorderLayout(0, 0)); imagePanel = new JImagePanel(); panel.add(imagePanel, BorderLayout.CENTER); descLabel = new JTextArea(); descLabel.setRows(8); descLabel.setEditable(false); panel.add(SwingUtil.scrollView(descLabel), BorderLayout.SOUTH); { JPanel buttonPane = new JPanel(); buttonPane.setLayout(new FlowLayout(FlowLayout.RIGHT)); getContentPane().add(buttonPane, BorderLayout.SOUTH); { deleteButton = new JButton("Delete Selected"); buttonPane.add(deleteButton); } { okButton = new JButton("OK"); okButton.setActionCommand("OK"); buttonPane.add(okButton); getRootPane().setDefaultButton(okButton); } { cancelButton = new JButton("Cancel"); cancelButton.setActionCommand("Cancel"); buttonPane.add(cancelButton); } } addListeners(); initData(); } private void initData() { SwingUtilities.invokeLater(new Runnable() { public void run() { model.clear(); List<PerspectiveBean> beanList=Services.perspectiveService.listAll(); for(PerspectiveBean b:beanList){ model.addElement(b); } } }); } private void addListeners() { okButton.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { SwingUtilities.invokeLater(new Runnable() { public void run() { PerspectiveBean bean=(PerspectiveBean)perspectiveJList.getSelectedValue(); if(bean==null){ JOptionPane.showMessageDialog(null, "Please select one saved prespective", "Error", JOptionPane.WARNING_MESSAGE); return ; } dispose(); try { String xml=bean.getXml(); ByteArrayInputStream in=new ByteArrayInputStream(xml.getBytes()); Platform.current().getApplicationWindow().getDockingDesktop().readXML(in); in.close(); //set selected perspective to alive, it will open as default next time bean.setFlag("current,"); Services.perspectiveService.setAsDefault(bean); } catch (Exception e) { e.printStackTrace(); } } }); } }); cancelButton.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { dispose(); } }); deleteButton.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { PerspectiveBean bean=(PerspectiveBean)perspectiveJList.getSelectedValue(); if(bean==null){ return ; } model.removeElementAt(perspectiveJList.getSelectedIndex()); Services.perspectiveService.delete(bean); descLabel.setText(""); imagePanel.setImage(null); } }); perspectiveJList.addListSelectionListener(new ListSelectionListener() { public void valueChanged(ListSelectionEvent e) { if(e.getValueIsAdjusting()){ return ; } final PerspectiveBean bean=(PerspectiveBean)perspectiveJList.getSelectedValue(); if(bean==null){ return ; } SwingUtilities.invokeLater(new Runnable() { public void run() { try { LOGGER.debug("{}", bean); descLabel.setText(bean.getDescription()); if(bean.getSnapshot()!=null&&bean.getSnapshot().length()>0){ byte[] bytes=Base64.decodeBase64(bean.getSnapshot()); ByteArrayInputStream in=new ByteArrayInputStream(bytes); BufferedImage image = ImageIO.read(in); imagePanel.setImage(image); } else { imagePanel.setImage(null); } } catch (IOException e) { e.printStackTrace(); } } }); } }); } }
apache-2.0
EsupPortail/esup-catapp-admin
src/main/java/org/esupportail/catapp/admin/domain/services/mocks/MockApplicationService.java
3528
package org.esupportail.catapp.admin.domain.services.mocks; import com.fasterxml.jackson.databind.ObjectMapper; import fj.F; import fj.F2; import fj.Ordering; import fj.data.List; import org.esupportail.catapp.admin.domain.beans.ApplicationDTO; import org.esupportail.catapp.admin.domain.exceptions.CrudException; import org.esupportail.catapp.admin.domain.services.IApplicationService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import static fj.Function.curry; import static fj.Ord.ord; import static fj.Ord.stringOrd; import static fj.data.Array.array; import static java.lang.String.format; public class MockApplicationService implements IApplicationService { private final Logger log = LoggerFactory.getLogger(getClass()); private final String jsonApplications = "[{\"code\":\"SIFAC-P04\",\"title\":\"Clients légers - Sifac production\",\"caption\":\"mandant 500\",\"description\":\"\",\"url\":\"\",\"group\":\"\",\"domains\":[\"GFC\"],\"activation\":\"Activated\"},{\"code\":\"SIFAC-T04\",\"title\":\"Clients légers - Sifac test\",\"caption\":\"mandant 310\",\"description\":\"\",\"url\":\"\",\"group\":\"\",\"domains\":[\"GFC\"],\"activation\":\"Activated\"},{\"code\":\"SIFAC-DOC\",\"title\":\"Documentations et procédures Sifac\",\"caption\":\"\",\"description\":\"\",\"url\":\"\",\"group\":\"\",\"domains\":[\"GFC\"],\"activation\":\"Activated\"}]"; @Override public boolean exists(final String code) throws InterruptedException { return getList().exists(new F<ApplicationDTO, Boolean>() { public Boolean f(ApplicationDTO dto) { return dto.getCode().equalsIgnoreCase(code); } }); } @Override public ApplicationDTO getOne(final String code) throws InterruptedException { return getList().find(new F<ApplicationDTO, Boolean>() { public Boolean f(ApplicationDTO dto) { return dto.getCode().equalsIgnoreCase(code); } }).toNull(); } @Override public List<ApplicationDTO> getList() throws InterruptedException { try { final ObjectMapper mapper = new ObjectMapper(); return array(mapper.readValue(jsonApplications, ApplicationDTO[].class)) .toList().sort(ord(curry(ordering))); } catch (IOException e) { return List.nil(); } } @Override public void add(final ApplicationDTO dto) throws InterruptedException, CrudException { log.info("ApplicationDTO in update method {}", dto); if (exists(dto.getCode())) { throw new CrudException("ERROR.VALIDATION.UNIQUE"); } // do nothing else } @Override public void update(final ApplicationDTO dto) throws InterruptedException, CrudException { log.info("ApplicationDTO in update method {}", dto); //do nothing } @Override public void delete(final String code) throws InterruptedException, CrudException { //do nothing } public static final F2<ApplicationDTO, ApplicationDTO, Ordering> ordering = new F2<ApplicationDTO, ApplicationDTO, Ordering>() { public Ordering f(final ApplicationDTO a, final ApplicationDTO b) { final String s1 = format("%s %s", a.getTitle(), a.getCaption()).trim().toLowerCase(); final String s2 = format("%s %s", b.getTitle(), b.getCaption()).trim().toLowerCase(); return stringOrd.compare(s1, s2); } }; }
apache-2.0
folio-org/okapi
okapi-core/src/main/java/org/folio/okapi/service/DeploymentStore.java
345
package org.folio.okapi.service; import io.vertx.core.Future; import java.util.List; import org.folio.okapi.bean.DeploymentDescriptor; public interface DeploymentStore { Future<Void> insert(DeploymentDescriptor dd); Future<Boolean> delete(String id); Future<Void> init(boolean reset); Future<List<DeploymentDescriptor>> getAll(); }
apache-2.0
partouf/Chatty-Twitch-Client
src/chatty/TwitchCommands.java
11388
package chatty; import chatty.gui.MainGui; import chatty.util.DateTime; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.Timer; import java.util.TimerTask; import java.util.logging.Logger; /** * Twitch Chat commands. All the Twitch specific commands like /mod, /timeout.. * * @author tduva */ public class TwitchCommands { private static final Logger LOGGER = Logger.getLogger(TwitchCommands.class.getName()); /** * The delay between /mods requests. This is the delay in between each * request, not how often it is requested for one channel (it is currently * only requested once for each channel). */ private static final int REQUEST_MODS_DELAY = 30*1000; /** * Channels which currently wait for a /mods response that should be silent * (no message output). */ private final Set<String> silentModsRequestChannel = Collections.synchronizedSet(new HashSet<String>()); /** * Channels for which the /mods list has already been requested. */ private final Set<String> modsAlreadyRequested = Collections.synchronizedSet(new HashSet<String>()); private MainGui g; private TwitchClient c; public TwitchCommands(MainGui g, TwitchClient c) { this.g = g; this.c = c; } private boolean onChannel(String channel, boolean message) { return c.onChannel(channel, message); } private void sendMessage(String channel, String message, String echo) { c.sendCommandMessage(channel, message, echo); } protected void commandTimeout(String channel, String parameter) { parameter = prepareAndCheckParameters(Helper.USERNAME_REGEX+"( [0-9]+)?", parameter); if (parameter == null) { g.printLine("Usage: /to <nick> [time]"); return; } String[] parts = parameter.split(" "); if (parts.length < 2) { timeout(channel, parts[0], 0); } else { try { long time = Long.parseLong(parts[1]); timeout(channel, parts[0], time); } catch (NumberFormatException ex) { // If the regex is correct, this may never happen g.printLine("Usage: /to <nick> [time] (no valid time specified)"); } } } protected void commandSlowmodeOn(String channel, String parameter) { if (parameter == null || parameter.isEmpty()) { slowmodeOn(channel, 0); } else { try { int time = Integer.parseInt(parameter); slowmodeOn(channel, time); } catch (NumberFormatException ex) { g.printLine("Usage: /slow [time] (invalid time specified)"); } } } protected void commandUnban(String channel, String parameter) { if (prepareAndCheckParameters(Helper.USERNAME_REGEX, parameter) == null) { g.printLine("Usage: /unban <nick>"); return; } unban(channel, parameter); } protected void commandBan(String channel, String parameter) { if (prepareAndCheckParameters(Helper.USERNAME_REGEX, parameter) == null) { g.printLine("Usage: /ban <nick>"); } else { ban(channel, parameter); } } protected void commandMod(String channel, String parameter) { if (prepareAndCheckParameters(Helper.USERNAME_REGEX, parameter) == null) { g.printLine("Usage: /mod <nick>"); } else { mod(channel, parameter); } } protected void commandUnmod(String channel, String parameter) { if (prepareAndCheckParameters(Helper.USERNAME_REGEX, parameter) == null) { g.printLine("Usage: /unmod <nick>"); } else { unmod(channel, parameter); } } protected void commandHostmode(String channel, String parameter) { if (parameter == null) { g.printLine("Usage: /host <stream>"); } else { hostmode(channel, parameter); } } public void hostmode(String channel, String target) { if (onChannel(channel, true)) { sendMessage(channel, ".host "+target, "Trying to host "+target+".."); } } public void hostmodeOff(String channel) { if (onChannel(channel, true)) { sendMessage(channel, ".unhost", "Trying to turn off host mode.."); } } /** * Turn on slowmode with the given amount of seconds or the default time * (without specifying a time). * * @param channel The name of the channel * @param time The time in seconds, 0 or negative numbers will make it give * not time at all */ public void slowmodeOn(String channel, int time) { if (onChannel(channel, true)) { if (time <= 0) { sendMessage(channel,".slow", "Trying to turn on slowmode.."); } else { sendMessage(channel,".slow "+time, "Trying to turn on slowmode ("+time+"s)"); } } } /** * Turns off slowmode in the given channel. * * @param channel The name of the channel. */ public void slowmodeOff(String channel) { if (onChannel(channel, true)) { sendMessage(channel,".slowoff", "Trying to turn off slowmode.."); } } /** * Turns on subscriber only mode in the given channel. * * @param channel The name of the channel. */ public void subscribersOn(String channel) { if (onChannel(channel, true)) { sendMessage(channel,".subscribers", "Trying to turn on subscribers mode.."); } } public void subscribersOff(String channel) { if (onChannel(channel, true)) { sendMessage(channel,".subscribersoff", "Trying to turn off subscribers mode.."); } } public void clearChannel(String channel) { if (onChannel(channel, true)) { sendMessage(channel,".clear", "Trying to clear channel.."); } } public void ban(String channel, String name) { if (onChannel(channel, true)) { sendMessage(channel,".ban "+name, "Trying to ban "+name+".."); } } public void mod(String channel, String name) { if (onChannel(channel, true)) { sendMessage(channel,".mod "+name, "Trying to mod "+name+".."); } } public void unmod(String channel, String name) { if (onChannel(channel, true)) { sendMessage(channel,".unmod "+name, "Trying to unmod "+name+".."); } } /** * Sends a timeout command to the server. * * @param channel * @param name * @param time */ public void timeout(String channel, String name, long time) { if (onChannel(channel, true)) { if (time <= 0) { sendMessage(channel,".timeout "+name, "Trying to timeout "+name+".."); } else { String formatted = DateTime.duration(time, true, false); String onlySeconds = time+"s"; String timeString = formatted.equals(onlySeconds) ? onlySeconds : onlySeconds+"/"+formatted; sendMessage(channel,".timeout "+name+" "+time, "Trying to timeout "+name+" ("+timeString+")"); } } } public void unban(String channel, String name) { if (onChannel(channel, true)) { sendMessage(channel,".unban "+name, "Trying to unban "+name+".."); } } public void mods(String channel) { if (onChannel(channel, true)) { sendMessage(channel,".mods", "Requesting moderator list.."); } } public void modsSilent(String channel) { if (onChannel(channel, true)) { g.printLine(channel, "Trying to fix moderators.."); requestModsSilent(channel); } } public void requestModsSilent(String channel) { if (onChannel(channel, false)) { silentModsRequestChannel.add(channel); c.sendSpamProtectedMessage(channel, ".mods"); } } public boolean removeModsSilent(String channel) { return silentModsRequestChannel.remove(channel); } public boolean waitingForModsSilent() { return !silentModsRequestChannel.isEmpty(); } /** * Prase the list of mods as returned from the Twitch Chat. The * comma-seperated list should start after the first colon ("The moderators * of this room are: .."). * * @param text The text as received from the Twitch Chat * @return A List of moderator names */ public static List<String> parseModsList(String text) { int start = text.indexOf(":") + 1; List<String> modsList = new ArrayList<>(); if (start > 1 && text.length() > start) { String mods = text.substring(start); if (!mods.trim().isEmpty()) { String[] modsArray = mods.split(","); for (String mod : modsArray) { modsList.add(mod.trim()); } } } return modsList; } /** * Starts the timer which requests the /mods list for joined channels. */ public void startAutoRequestMods() { Timer timer = new Timer(true); timer.schedule(new TimerTask() { @Override public void run() { autoRequestMods(); } }, 1000, REQUEST_MODS_DELAY); } /** * If enabled in the settings, requests /mods for one currently joined * channel (and only one), ignoring the ones it was already requested for. */ private void autoRequestMods() { if (!c.settings.getBoolean("autoRequestMods")) { return; } Set<String> joinedChannels = c.getJoinedChannels(); for (String channel : joinedChannels) { if (!modsAlreadyRequested.contains(channel)) { LOGGER.info("Auto-requesting mods for "+channel); modsAlreadyRequested.add(channel); requestModsSilent(channel); return; } } } /** * Removes one or all entries from the list of channels the /mods list was * already requested for. This can be used on part/disconnect, since users * are removed then. * * @param channel The name of the channel to remove, or null to remove all * entries */ public void clearModsAlreadyRequested(String channel) { if (channel == null) { modsAlreadyRequested.clear(); } else { modsAlreadyRequested.remove(channel); } } private String prepareAndCheckParameters(String regex, String parameters) { if (parameters == null) { return null; } parameters = parameters.trim(); return parameters.matches(regex) ? parameters : null; } }
apache-2.0
floodlight/loxigen-artifacts
openflowj/gen-src/main/java/org/projectfloodlight/openflow/protocol/OFBsnControllerConnectionsRequest.java
2008
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University // Copyright (c) 2011, 2012 Open Networking Foundation // Copyright (c) 2012, 2013 Big Switch Networks, Inc. // This library was generated by the LoxiGen Compiler. // See the file LICENSE.txt which should have been included in the source distribution // Automatically generated by LOXI from template of_interface.java // Do not modify package org.projectfloodlight.openflow.protocol; import org.projectfloodlight.openflow.protocol.*; import org.projectfloodlight.openflow.protocol.action.*; import org.projectfloodlight.openflow.protocol.actionid.*; import org.projectfloodlight.openflow.protocol.bsntlv.*; import org.projectfloodlight.openflow.protocol.errormsg.*; import org.projectfloodlight.openflow.protocol.meterband.*; import org.projectfloodlight.openflow.protocol.instruction.*; import org.projectfloodlight.openflow.protocol.instructionid.*; import org.projectfloodlight.openflow.protocol.match.*; import org.projectfloodlight.openflow.protocol.stat.*; import org.projectfloodlight.openflow.protocol.oxm.*; import org.projectfloodlight.openflow.protocol.oxs.*; import org.projectfloodlight.openflow.protocol.queueprop.*; import org.projectfloodlight.openflow.types.*; import org.projectfloodlight.openflow.util.*; import org.projectfloodlight.openflow.exceptions.*; import io.netty.buffer.ByteBuf; public interface OFBsnControllerConnectionsRequest extends OFObject, OFBsnHeader, OFRequest<OFBsnControllerConnectionsReply> { OFVersion getVersion(); OFType getType(); long getXid(); long getExperimenter(); long getSubtype(); void writeTo(ByteBuf channelBuffer); Builder createBuilder(); public interface Builder extends OFBsnHeader.Builder { OFBsnControllerConnectionsRequest build(); OFVersion getVersion(); OFType getType(); long getXid(); Builder setXid(long xid); long getExperimenter(); long getSubtype(); } }
apache-2.0
cyberdrcarr/optaplanner
drools-planner-examples/src/main/java/org/drools/planner/examples/curriculumcourse/domain/Curriculum.java
1157
/* * Copyright 2010 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.planner.examples.curriculumcourse.domain; import com.thoughtworks.xstream.annotations.XStreamAlias; import org.apache.commons.lang.builder.CompareToBuilder; import org.drools.planner.examples.common.domain.AbstractPersistable; @XStreamAlias("Curriculum") public class Curriculum extends AbstractPersistable { private String code; public String getCode() { return code; } public void setCode(String code) { this.code = code; } @Override public String toString() { return code; } }
apache-2.0
xfournet/intellij-community
platform/platform-impl/src/com/intellij/openapi/wm/impl/WindowManagerImpl.java
25838
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.openapi.wm.impl; import com.intellij.ide.DataManager; import com.intellij.ide.RecentProjectsManagerBase; import com.intellij.ide.impl.DataManagerImpl; import com.intellij.openapi.actionSystem.ex.ActionManagerEx; import com.intellij.openapi.application.Application; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.components.*; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.project.Project; import com.intellij.openapi.project.ProjectManager; import com.intellij.openapi.ui.popup.JBPopup; import com.intellij.openapi.util.Disposer; import com.intellij.openapi.util.SystemInfo; import com.intellij.openapi.util.text.StringUtil; import com.intellij.openapi.wm.IdeFrame; import com.intellij.openapi.wm.StatusBar; import com.intellij.openapi.wm.WindowManagerListener; import com.intellij.openapi.wm.ex.WindowManagerEx; import com.intellij.openapi.wm.impl.welcomeScreen.WelcomeFrame; import com.intellij.ui.FrameState; import com.intellij.ui.ScreenUtil; import com.intellij.util.EventDispatcher; import com.intellij.util.ui.JBInsets; import com.intellij.util.ui.JBUI; import com.intellij.util.ui.UIUtil; import com.sun.jna.platform.WindowUtils; import org.jdom.Element; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.awt.*; import java.awt.event.*; import java.awt.geom.Point2D; import java.awt.geom.Rectangle2D; import java.awt.peer.ComponentPeer; import java.awt.peer.FramePeer; import java.util.*; /** * @author Anton Katilin * @author Vladimir Kondratyev */ @State( name = "WindowManager", defaultStateAsResource = true, storages = @Storage(value = "window.manager.xml", roamingType = RoamingType.DISABLED) ) public final class WindowManagerImpl extends WindowManagerEx implements NamedComponent, PersistentStateComponent<Element> { private static final Logger LOG = Logger.getInstance("#com.intellij.openapi.wm.impl.WindowManagerImpl"); @NonNls public static final String FULL_SCREEN = "ide.frame.full.screen"; @NonNls private static final String FOCUSED_WINDOW_PROPERTY_NAME = "focusedWindow"; @NonNls private static final String FRAME_ELEMENT = "frame"; @NonNls private static final String EXTENDED_STATE_ATTR = "extended-state"; static { try { System.loadLibrary("jawt"); } catch (Throwable t) { LOG.info("jawt failed to load", t); } } private Boolean myAlphaModeSupported; private final EventDispatcher<WindowManagerListener> myEventDispatcher = EventDispatcher.create(WindowManagerListener.class); private final CommandProcessor myCommandProcessor = new CommandProcessor(); private final WindowWatcher myWindowWatcher = new WindowWatcher(); /** * That is the default layout. */ private final DesktopLayout myLayout = new DesktopLayout(); // null keys must be supported private final Map<Project, IdeFrameImpl> myProjectToFrame = new HashMap<>(); private final Map<Project, Set<JDialog>> myDialogsToDispose = new HashMap<>(); @NotNull final FrameInfo myDefaultFrameInfo = new FrameInfo(); private final WindowAdapter myActivationListener; private final DataManager myDataManager; private final ActionManagerEx myActionManager; /** * invoked by reflection */ public WindowManagerImpl(DataManager dataManager, ActionManagerEx actionManager) { myDataManager = dataManager; myActionManager = actionManager; if (myDataManager instanceof DataManagerImpl) { ((DataManagerImpl)myDataManager).setWindowManager(this); } final Application application = ApplicationManager.getApplication(); if (!application.isUnitTestMode()) { Disposer.register(application, this::disposeRootFrame); } final KeyboardFocusManager keyboardFocusManager = KeyboardFocusManager.getCurrentKeyboardFocusManager(); keyboardFocusManager.addPropertyChangeListener(FOCUSED_WINDOW_PROPERTY_NAME, myWindowWatcher); myActivationListener = new WindowAdapter() { @Override public void windowActivated(WindowEvent e) { Window activeWindow = e.getWindow(); if (activeWindow instanceof IdeFrameImpl) { // must be proceedDialogDisposalQueue(((IdeFrameImpl)activeWindow).getProject()); } } }; if (UIUtil.hasLeakingAppleListeners()) { UIUtil.addAwtListener(event -> { if (event.getID() == ContainerEvent.COMPONENT_ADDED) { if (((ContainerEvent)event).getChild() instanceof JViewport) { UIUtil.removeLeakingAppleListeners(); } } }, AWTEvent.CONTAINER_EVENT_MASK, application); } } @Override @NotNull public IdeFrameImpl[] getAllProjectFrames() { final Collection<IdeFrameImpl> ideFrames = myProjectToFrame.values(); return ideFrames.toArray(new IdeFrameImpl[0]); } @Override public JFrame findVisibleFrame() { IdeFrameImpl[] frames = getAllProjectFrames(); return frames.length > 0 ? frames[0] : (JFrame)WelcomeFrame.getInstance(); } @Override public void addListener(final WindowManagerListener listener) { myEventDispatcher.addListener(listener); } @Override public void removeListener(final WindowManagerListener listener) { myEventDispatcher.removeListener(listener); } @Override public final Rectangle getScreenBounds() { return ScreenUtil.getAllScreensRectangle(); } @Override public Rectangle getScreenBounds(@NotNull Project project) { final GraphicsEnvironment environment = GraphicsEnvironment.getLocalGraphicsEnvironment(); final Point onScreen = getFrame(project).getLocationOnScreen(); final GraphicsDevice[] devices = environment.getScreenDevices(); for (final GraphicsDevice device : devices) { final Rectangle bounds = device.getDefaultConfiguration().getBounds(); if (bounds.contains(onScreen)) { return bounds; } } return null; } @Override public final boolean isInsideScreenBounds(final int x, final int y, final int width) { return ScreenUtil.getAllScreensShape().contains(x, y, width, 1); } @Override public final boolean isInsideScreenBounds(final int x, final int y) { return ScreenUtil.getAllScreensShape().contains(x, y); } @Override public final boolean isAlphaModeSupported() { if (myAlphaModeSupported == null) { myAlphaModeSupported = calcAlphaModelSupported(); } return myAlphaModeSupported.booleanValue(); } private static boolean calcAlphaModelSupported() { if (AWTUtilitiesWrapper.isTranslucencyAPISupported()) { return AWTUtilitiesWrapper.isTranslucencySupported(AWTUtilitiesWrapper.TRANSLUCENT); } try { return WindowUtils.isWindowAlphaSupported(); } catch (Throwable e) { return false; } } @Override public final void setAlphaModeRatio(final Window window, final float ratio) { if (!window.isDisplayable() || !window.isShowing()) { throw new IllegalArgumentException("window must be displayable and showing. window=" + window); } if (ratio < 0.0f || ratio > 1.0f) { throw new IllegalArgumentException("ratio must be in [0..1] range. ratio=" + ratio); } if (!isAlphaModeSupported() || !isAlphaModeEnabled(window)) { return; } setAlphaMode(window, ratio); } private static void setAlphaMode(Window window, float ratio) { try { if (SystemInfo.isMacOSLeopard) { if (window instanceof JWindow) { ((JWindow)window).getRootPane().putClientProperty("Window.alpha", 1.0f - ratio); } else if (window instanceof JDialog) { ((JDialog)window).getRootPane().putClientProperty("Window.alpha", 1.0f - ratio); } else if (window instanceof JFrame) { ((JFrame)window).getRootPane().putClientProperty("Window.alpha", 1.0f - ratio); } } else if (AWTUtilitiesWrapper.isTranslucencySupported(AWTUtilitiesWrapper.TRANSLUCENT)) { AWTUtilitiesWrapper.setWindowOpacity(window, 1.0f - ratio); } else { WindowUtils.setWindowAlpha(window, 1.0f - ratio); } } catch (Throwable e) { LOG.debug(e); } } @Override public void setWindowMask(final Window window, @Nullable final Shape mask) { try { if (AWTUtilitiesWrapper.isTranslucencySupported(AWTUtilitiesWrapper.PERPIXEL_TRANSPARENT)) { AWTUtilitiesWrapper.setWindowShape(window, mask); } else { WindowUtils.setWindowMask(window, mask); } } catch (Throwable e) { LOG.debug(e); } } @Override public void setWindowShadow(Window window, WindowShadowMode mode) { if (window instanceof JWindow) { JRootPane root = ((JWindow)window).getRootPane(); root.putClientProperty("Window.shadow", mode == WindowShadowMode.DISABLED ? Boolean.FALSE : Boolean.TRUE); root.putClientProperty("Window.style", mode == WindowShadowMode.SMALL ? "small" : null); } } @Override public void resetWindow(final Window window) { try { if (!isAlphaModeSupported()) return; setWindowMask(window, null); setAlphaMode(window, 0f); setWindowShadow(window, WindowShadowMode.NORMAL); } catch (Throwable e) { LOG.debug(e); } } @Override public final boolean isAlphaModeEnabled(final Window window) { if (!window.isDisplayable() || !window.isShowing()) { throw new IllegalArgumentException("window must be displayable and showing. window=" + window); } return isAlphaModeSupported(); } @Override public final void setAlphaModeEnabled(final Window window, final boolean state) { if (!window.isDisplayable() || !window.isShowing()) { throw new IllegalArgumentException("window must be displayable and showing. window=" + window); } } @Override public void hideDialog(JDialog dialog, Project project) { if (project == null) { dialog.dispose(); } else { IdeFrameImpl frame = getFrame(project); if (frame.isActive()) { dialog.dispose(); } else { queueForDisposal(dialog, project); dialog.setVisible(false); } } } @Override public void adjustContainerWindow(Component c, Dimension oldSize, Dimension newSize) { if (c == null) return; Window wnd = SwingUtilities.getWindowAncestor(c); if (wnd instanceof JWindow) { JBPopup popup = (JBPopup)((JWindow)wnd).getRootPane().getClientProperty(JBPopup.KEY); if (popup != null) { if (oldSize.height < newSize.height) { Dimension size = popup.getSize(); size.height += newSize.height - oldSize.height; popup.setSize(size); popup.moveToFitScreen(); } } } } @Override public final void doNotSuggestAsParent(final Window window) { myWindowWatcher.doNotSuggestAsParent(window); } @Override public final void dispatchComponentEvent(final ComponentEvent e) { myWindowWatcher.dispatchComponentEvent(e); } @Override @Nullable public final Window suggestParentWindow(@Nullable final Project project) { return myWindowWatcher.suggestParentWindow(project); } @Override public final StatusBar getStatusBar(final Project project) { IdeFrameImpl frame = myProjectToFrame.get(project); return frame == null ? null : frame.getStatusBar(); } @Override public StatusBar getStatusBar(@NotNull Component c) { return getStatusBar(c, null); } @Override public StatusBar getStatusBar(@NotNull Component c, @Nullable Project project) { Component parent = UIUtil.findUltimateParent(c); if (parent instanceof IdeFrame) { return ((IdeFrame)parent).getStatusBar().findChild(c); } IdeFrame frame = findFrameFor(project); if (frame != null) { return frame.getStatusBar().findChild(c); } assert false : "Cannot find status bar for " + c; return null; } @Override public IdeFrame findFrameFor(@Nullable final Project project) { IdeFrame frame = null; if (project != null) { frame = project.isDefault() ? WelcomeFrame.getInstance() : getFrame(project); if (frame == null) { frame = myProjectToFrame.get(null); } } else { Container eachParent = getMostRecentFocusedWindow(); while(eachParent != null) { if (eachParent instanceof IdeFrame) { frame = (IdeFrame)eachParent; break; } eachParent = eachParent.getParent(); } if (frame == null) { frame = tryToFindTheOnlyFrame(); } } return frame; } private static IdeFrame tryToFindTheOnlyFrame() { IdeFrame candidate = null; final Frame[] all = Frame.getFrames(); for (Frame each : all) { if (each instanceof IdeFrame) { if (candidate == null) { candidate = (IdeFrame)each; } else { candidate = null; break; } } } return candidate; } @Override public final IdeFrameImpl getFrame(@Nullable final Project project) { // no assert! otherwise WindowWatcher.suggestParentWindow fails for default project //LOG.assertTrue(myProject2Frame.containsKey(project)); return myProjectToFrame.get(project); } @Override public IdeFrame getIdeFrame(@Nullable final Project project) { if (project != null) { return getFrame(project); } final Window window = KeyboardFocusManager.getCurrentKeyboardFocusManager().getActiveWindow(); final Component parent = UIUtil.findUltimateParent(window); if (parent instanceof IdeFrame) return (IdeFrame)parent; final Frame[] frames = Frame.getFrames(); for (Frame each : frames) { if (each instanceof IdeFrame) { return (IdeFrame)each; } } return null; } // this method is called when there is some opened project (IDE will not open Welcome Frame, but project) public void showFrame() { final IdeFrameImpl frame = new IdeFrameImpl(myActionManager, myDataManager, ApplicationManager.getApplication()); myProjectToFrame.put(null, frame); Rectangle frameBounds = myDefaultFrameInfo.getBounds(); // set bounds even if maximized because on unmaximize we must restore previous frame bounds // avoid situations when IdeFrame is out of all screens if (frameBounds == null || !ScreenUtil.isVisible(frameBounds)) { frameBounds = ScreenUtil.getMainScreenBounds(); int xOff = frameBounds.width / 8; int yOff = frameBounds.height / 8; //noinspection UseDPIAwareInsets JBInsets.removeFrom(frameBounds, new Insets(yOff, xOff, yOff, xOff)); myDefaultFrameInfo.setBounds(frameBounds); } frame.setBounds(frameBounds); frame.setExtendedState(myDefaultFrameInfo.getExtendedState()); frame.setVisible(true); addFrameStateListener(frame); } @Override public final IdeFrameImpl allocateFrame(@NotNull Project project) { LOG.assertTrue(!myProjectToFrame.containsKey(project)); IdeFrameImpl frame = myProjectToFrame.remove(null); if (frame == null) { frame = new IdeFrameImpl(myActionManager, myDataManager, ApplicationManager.getApplication()); } final FrameInfo frameInfo = ProjectFrameBounds.getInstance(project).getRawFrameInfo(); boolean addComponentListener = frameInfo == null; if (frameInfo != null && frameInfo.getBounds() != null) { // update default frame info - newly created project frame should be the same as last opened myDefaultFrameInfo.copyFrom(frameInfo); Rectangle rawBounds = frameInfo.getBounds(); myDefaultFrameInfo.setBounds(FrameBoundsConverter.convertFromDeviceSpace(rawBounds)); } if (!(FrameState.isMaximized(frame.getExtendedState()) || FrameState.isFullScreen(frame)) || !FrameState.isMaximized(myDefaultFrameInfo.getExtendedState())) // going to quit maximized { Rectangle bounds = myDefaultFrameInfo.getBounds(); if (bounds != null) { frame.setBounds(bounds); } } frame.setExtendedState(myDefaultFrameInfo.getExtendedState()); frame.setProject(project); myProjectToFrame.put(project, frame); frame.setVisible(true); frame.addWindowListener(myActivationListener); if (addComponentListener) { if (RecentProjectsManagerBase.getInstanceEx().isBatchOpening()) { frame.toBack(); } addFrameStateListener(frame); } myEventDispatcher.getMulticaster().frameCreated(frame); return frame; } private void addFrameStateListener(@NotNull IdeFrameImpl frame) { frame.addComponentListener(new ComponentAdapter() { @Override public void componentMoved(@NotNull ComponentEvent e) { updateFrameBounds(frame); } }); } private void proceedDialogDisposalQueue(Project project) { Set<JDialog> dialogs = myDialogsToDispose.get(project); if (dialogs == null) return; for (JDialog dialog : dialogs) { dialog.dispose(); } myDialogsToDispose.put(project, null); } private void queueForDisposal(JDialog dialog, Project project) { Set<JDialog> dialogs = myDialogsToDispose.computeIfAbsent(project, k -> new HashSet<>()); dialogs.add(dialog); } @Override public final void releaseFrame(final IdeFrameImpl frame) { myEventDispatcher.getMulticaster().beforeFrameReleased(frame); final Project project = frame.getProject(); LOG.assertTrue(project != null); frame.removeWindowListener(myActivationListener); proceedDialogDisposalQueue(project); frame.setProject(null); frame.setTitle(null); frame.setFileTitle(null, null); myProjectToFrame.remove(project); if (myProjectToFrame.isEmpty()) { myProjectToFrame.put(null, frame); } else { Disposer.dispose(frame.getStatusBar()); frame.dispose(); } } public final void disposeRootFrame() { if (myProjectToFrame.size() == 1) { final IdeFrameImpl rootFrame = myProjectToFrame.remove(null); if (rootFrame != null) { // disposing last frame if quitting rootFrame.dispose(); } } } @Override public final Window getMostRecentFocusedWindow() { return myWindowWatcher.getFocusedWindow(); } @Override public final Component getFocusedComponent(@NotNull final Window window) { return myWindowWatcher.getFocusedComponent(window); } @Override @Nullable public final Component getFocusedComponent(@Nullable final Project project) { return myWindowWatcher.getFocusedComponent(project); } /** * Private part */ @Override @NotNull public final CommandProcessor getCommandProcessor() { return myCommandProcessor; } @Override public void loadState(@NotNull Element state) { final Element frameElement = state.getChild(FRAME_ELEMENT); if (frameElement != null) { int frameExtendedState = StringUtil.parseInt(frameElement.getAttributeValue(EXTENDED_STATE_ATTR), Frame.NORMAL); if ((frameExtendedState & Frame.ICONIFIED) > 0) { frameExtendedState = Frame.NORMAL; } myDefaultFrameInfo.setBounds(loadFrameBounds(frameElement)); myDefaultFrameInfo.setExtendedState(frameExtendedState); } final Element desktopElement = state.getChild(DesktopLayout.TAG); if (desktopElement != null) { myLayout.readExternal(desktopElement); } } @Nullable private static Rectangle loadFrameBounds(@NotNull Element frameElement) { Rectangle bounds = ProjectFrameBoundsKt.deserializeBounds(frameElement); return bounds == null ? null : FrameBoundsConverter.convertFromDeviceSpace(bounds); } @Nullable @Override public Element getState() { Element frameState = getFrameState(); if (frameState == null) { return null; } Element state = new Element("state"); state.addContent(frameState); // Save default layout Element layoutElement = myLayout.writeExternal(DesktopLayout.TAG); if (layoutElement != null) { state.addContent(layoutElement); } return state; } @Nullable private Element getFrameState() { // Save frame bounds final Project[] projects = ProjectManager.getInstance().getOpenProjects(); if (projects.length == 0) { return null; } Project project = projects[0]; FrameInfo frameInfo = ProjectFrameBoundsKt.getFrameInfoInDeviceSpace(this, project); if (frameInfo == null) { return null; } final Element frameElement = new Element(FRAME_ELEMENT); Rectangle rectangle = frameInfo.getBounds(); if (rectangle != null) { ProjectFrameBoundsKt.serializeBounds(rectangle, frameElement); } if (frameInfo.getExtendedState() != Frame.NORMAL) { frameElement.setAttribute(EXTENDED_STATE_ATTR, Integer.toString(frameInfo.getExtendedState())); } return frameElement; } int updateFrameBounds(@NotNull IdeFrameImpl frame) { int extendedState = frame.getExtendedState(); if (SystemInfo.isMacOSLion) { ComponentPeer peer = frame.getPeer(); if (peer instanceof FramePeer) { // frame.state is not updated by jdk so get it directly from peer extendedState = ((FramePeer)peer).getState(); } } boolean isMaximized = FrameState.isMaximized(extendedState) || isFullScreenSupportedInCurrentOS() && frame.isInFullScreen(); Rectangle frameBounds = myDefaultFrameInfo.getBounds(); boolean usePreviousBounds = isMaximized && frameBounds != null && frame.getBounds().contains(new Point((int)frameBounds.getCenterX(), (int)frameBounds.getCenterY())); if (!usePreviousBounds) { myDefaultFrameInfo.setBounds(frame.getBounds()); } return extendedState; } @Override public final DesktopLayout getLayout() { return myLayout; } @Override public final void setLayout(final DesktopLayout layout) { myLayout.copyFrom(layout); } @Override @NotNull public final String getComponentName() { return "WindowManager"; } public WindowWatcher getWindowWatcher() { return myWindowWatcher; } @Override public boolean isFullScreenSupportedInCurrentOS() { return SystemInfo.isMacOSLion || SystemInfo.isWindows || SystemInfo.isXWindow && X11UiUtil.isFullScreenSupported(); } static boolean isFloatingMenuBarSupported() { return !SystemInfo.isMac && getInstance().isFullScreenSupportedInCurrentOS(); } /** * Converts the frame bounds b/w the user space (JRE-managed HiDPI mode) and the device space (IDE-managed HiDPI mode). * See {@link UIUtil#isJreHiDPIEnabled()} */ static class FrameBoundsConverter { /** * @param bounds the bounds in the device space * @return the bounds in the user space */ @NotNull static Rectangle convertFromDeviceSpace(@NotNull Rectangle bounds) { Rectangle b = bounds.getBounds(); if (!shouldConvert()) return b; try { for (GraphicsDevice gd : GraphicsEnvironment.getLocalGraphicsEnvironment().getScreenDevices()) { Rectangle devBounds = gd.getDefaultConfiguration().getBounds(); // in user space scaleUp(devBounds, gd.getDefaultConfiguration()); // to device space Rectangle2D.Float devBounds2D = new Rectangle2D.Float(devBounds.x, devBounds.y, devBounds.width, devBounds.height); Point2D.Float center2d = new Point2D.Float(b.x + b.width / 2, b.y + b.height / 2); if (devBounds2D.contains(center2d)) { scaleDown(b, gd.getDefaultConfiguration()); break; } } } catch (HeadlessException ignore) { } return b; } /** * @param gc the graphics config * @param bounds the bounds in the user space * @return the bounds in the device space */ public static Rectangle convertToDeviceSpace(GraphicsConfiguration gc, @NotNull Rectangle bounds) { Rectangle b = bounds.getBounds(); if (!shouldConvert()) return b; try { scaleUp(b, gc); } catch (HeadlessException ignore) { } return b; } private static boolean shouldConvert() { if (SystemInfo.isLinux || // JRE-managed HiDPI mode is not yet implemented (pending) SystemInfo.isMac) // JRE-managed HiDPI mode is permanent { return false; } // device space equals user space return UIUtil.isJreHiDPIEnabled(); } private static void scaleUp(@NotNull Rectangle bounds, @NotNull GraphicsConfiguration gc) { scale(bounds, gc.getBounds(), JBUI.sysScale(gc)); } private static void scaleDown(@NotNull Rectangle bounds, @NotNull GraphicsConfiguration gc) { float scale = JBUI.sysScale(gc); assert scale != 0; scale(bounds, gc.getBounds(), 1 / scale); } private static void scale(@NotNull Rectangle bounds, @NotNull Rectangle deviceBounds, float scale) { // On Windows, JB SDK transforms the screen bounds to the user space as follows: // [x, y, width, height] -> [x, y, width / scale, height / scale] // xy are not transformed in order to avoid overlapping of the screen bounds in multi-dpi env. // scale the delta b/w xy and deviceBounds.xy int x = (int)Math.floor(deviceBounds.x + (bounds.x - deviceBounds.x) * scale); int y = (int)Math.floor(deviceBounds.y + (bounds.y - deviceBounds.y) * scale); bounds.setBounds(x, y, (int)Math.ceil(bounds.width * scale), (int)Math.ceil(bounds.height * scale)); } } }
apache-2.0
mentlerd/JDA
src/main/java/net/dv8tion/jda/core/managers/GuildController.java
122809
/* * Copyright 2015-2017 Austin Keener & Michael Ritter & Florian Spieß * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.dv8tion.jda.core.managers; import net.dv8tion.jda.core.AccountType; import net.dv8tion.jda.core.JDA; import net.dv8tion.jda.core.Permission; import net.dv8tion.jda.core.entities.*; import net.dv8tion.jda.core.entities.impl.EmoteImpl; import net.dv8tion.jda.core.entities.impl.GuildImpl; import net.dv8tion.jda.core.entities.impl.MemberImpl; import net.dv8tion.jda.core.exceptions.AccountTypeException; import net.dv8tion.jda.core.exceptions.GuildUnavailableException; import net.dv8tion.jda.core.exceptions.PermissionException; import net.dv8tion.jda.core.requests.Request; import net.dv8tion.jda.core.requests.Response; import net.dv8tion.jda.core.requests.RestAction; import net.dv8tion.jda.core.requests.Route; import net.dv8tion.jda.core.requests.restaction.AuditableRestAction; import net.dv8tion.jda.core.requests.restaction.ChannelAction; import net.dv8tion.jda.core.requests.restaction.RoleAction; import net.dv8tion.jda.core.requests.restaction.WebhookAction; import net.dv8tion.jda.core.requests.restaction.order.ChannelOrderAction; import net.dv8tion.jda.core.requests.restaction.order.RoleOrderAction; import net.dv8tion.jda.core.utils.MiscUtil; import net.dv8tion.jda.core.utils.PermissionUtil; import net.dv8tion.jda.core.utils.Checks; import org.json.JSONArray; import org.json.JSONObject; import javax.annotation.CheckReturnValue; import java.util.*; import java.util.stream.Collectors; import java.util.stream.Stream; /** * A controller that allows to utilize moderation * permissions and create new channels and roles. * * @since 3.0 */ public class GuildController { protected final GuildImpl guild; /** * Creates a new GuildController instance * for the specified Guild instance * * @param guild * The {@link net.dv8tion.jda.core.entities.Guild Guild} * that will be modified */ public GuildController(Guild guild) { this.guild = (GuildImpl) guild; } /** * The underlying {@link net.dv8tion.jda.core.entities.Guild Guild} instance * * @return The underlying {@link net.dv8tion.jda.core.entities.Guild Guild} instance */ public Guild getGuild() { return guild; } /** * The {@link net.dv8tion.jda.core.JDA JDA} instance of this GuildController * * @return the corresponding JDA instance */ public JDA getJDA() { return guild.getJDA(); } /** * Used to move a {@link net.dv8tion.jda.core.entities.Member Member} from one {@link net.dv8tion.jda.core.entities.VoiceChannel VoiceChannel} * to another {@link net.dv8tion.jda.core.entities.VoiceChannel VoiceChannel}. * <br>As a note, you cannot move a Member that isn't already in a VoiceChannel. Also they must be in a VoiceChannel * in the same Guild as the one that you are moving them to. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be moved due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_CHANNEL UNKNOWN_CHANNEL} * <br>The specified channel was deleted before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} that you are moving. * @param voiceChannel * The destination {@link net.dv8tion.jda.core.entities.VoiceChannel VoiceChannel} to which the member is being * moved to. * * @throws IllegalStateException * If the Member isn't currently in a VoiceChannel in this Guild. * @throws IllegalArgumentException * <ul> * <li>If any of the provided arguments is {@code null}</li> * <li>If the provided Member isn't part of this {@link net.dv8tion.jda.core.entities.Guild Guild}</li> * <li>If the provided VoiceChannel isn't part of this {@link net.dv8tion.jda.core.entities.Guild Guild}</li> * </ul> * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If this account doesn't have {@link net.dv8tion.jda.core.Permission#VOICE_MOVE_OTHERS} * in the VoiceChannel that the Member is currently in.</li> * <li>If this account <b>AND</b> the Member being moved don't have * {@link net.dv8tion.jda.core.Permission#VOICE_CONNECT} for the destination VoiceChannel.</li> * </ul> * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.RestAction RestAction} */ @CheckReturnValue public RestAction<Void> moveVoiceMember(Member member, VoiceChannel voiceChannel) { checkAvailable(); Checks.notNull(member, "member"); Checks.notNull(member, "voiceChannel"); checkGuild(member.getGuild(), "member"); checkGuild(voiceChannel.getGuild(), "voiceChannel"); GuildVoiceState vState = member.getVoiceState(); if (!vState.inVoiceChannel()) throw new IllegalStateException("You cannot move a Member who isn't in a VoiceChannel!"); if (!PermissionUtil.checkPermission(vState.getChannel(), guild.getSelfMember(), Permission.VOICE_MOVE_OTHERS)) throw new PermissionException(Permission.VOICE_MOVE_OTHERS, "This account does not have Permission to MOVE_OTHERS out of the channel that the Member is currently in."); if (!PermissionUtil.checkPermission(voiceChannel, guild.getSelfMember(), Permission.VOICE_CONNECT) && !PermissionUtil.checkPermission(voiceChannel, member, Permission.VOICE_CONNECT)) throw new PermissionException(Permission.VOICE_CONNECT, "Neither this account nor the Member that is attempting to be moved have the VOICE_CONNECT permission " + "for the destination VoiceChannel, so the move cannot be done."); JSONObject body = new JSONObject().put("channel_id", voiceChannel.getId()); Route.CompiledRoute route = Route.Guilds.MODIFY_MEMBER.compile(guild.getId(), member.getUser().getId()); return new RestAction<Void>(guild.getJDA(), route, body) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Changes a Member's nickname in this guild. * The nickname is visible to all members of this guild. * * <p>To change the nickname for the currently logged in account * only the Permission {@link net.dv8tion.jda.core.Permission#NICKNAME_CHANGE NICKNAME_CHANGE} is required. * <br>To change the nickname of <b>any</b> {@link net.dv8tion.jda.core.entities.Member Member} for this {@link net.dv8tion.jda.core.entities.Guild Guild} * the Permission {@link net.dv8tion.jda.core.Permission#NICKNAME_MANAGE NICKNAME_MANAGE} is required. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The nickname of the target Member is not modifiable due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} for which the nickname should be changed. * @param nickname * The new nickname of the {@link net.dv8tion.jda.core.entities.Member Member}, provide {@code null} or an * empty String to reset the nickname * * @throws IllegalArgumentException * If the specified {@link net.dv8tion.jda.core.entities.Member Member} * is not from the same {@link net.dv8tion.jda.core.entities.Guild Guild}. * Or if the provided member is {@code null} * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If attempting to set nickname for self and the logged in account has neither {@link net.dv8tion.jda.core.Permission#NICKNAME_CHANGE} * nor {@link net.dv8tion.jda.core.Permission#NICKNAME_MANAGE}</li> * <li>If attempting to set nickname for another member and the logged in account does not have {@link net.dv8tion.jda.core.Permission#NICKNAME_MANAGE}</li> * <li>If attempting to set nickname for another member and the logged in account cannot manipulate the other user due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> setNickname(Member member, String nickname) { checkAvailable(); Checks.notNull(member, "member"); checkGuild(member.getGuild(), "member"); if(member.equals(guild.getSelfMember())) { if(!member.hasPermission(Permission.NICKNAME_CHANGE) && !member.hasPermission(Permission.NICKNAME_MANAGE)) throw new PermissionException(Permission.NICKNAME_CHANGE, "You neither have NICKNAME_CHANGE nor NICKNAME_MANAGE permission!"); } else { checkPermission(Permission.NICKNAME_MANAGE); checkPosition(member); } if (Objects.equals(nickname, member.getNickname())) return new AuditableRestAction.EmptyRestAction<>(getJDA(), null); if (nickname == null) nickname = ""; JSONObject body = new JSONObject().put("nick", nickname); Route.CompiledRoute route; if (member.equals(guild.getSelfMember())) route = Route.Guilds.MODIFY_SELF_NICK.compile(guild.getId()); else route = Route.Guilds.MODIFY_MEMBER.compile(guild.getId(), member.getUser().getId()); return new AuditableRestAction<Void>(guild.getJDA(), route, body) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * This method will prune (kick) all members who were offline for at least <i>days</i> days. * <br>The RestAction returned from this method will return the amount of Members that were pruned. * <br>You can use {@link Guild#getPrunableMemberCount(int)} to determine how many Members would be pruned if you were to * call this method. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The prune cannot finished due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * </ul> * * @param days * Minimum number of days since a member has been offline to get affected. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the account doesn't have {@link net.dv8tion.jda.core.Permission#KICK_MEMBERS KICK_MEMBER} Permission. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws IllegalArgumentException * If the provided days are less than {@code 1} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} - Type: Integer * <br>The amount of Members that were pruned from the Guild. */ @CheckReturnValue public AuditableRestAction<Integer> prune(int days) { checkAvailable(); checkPermission(Permission.KICK_MEMBERS); if (days < 1) throw new IllegalArgumentException("Days amount must be at minimum 1 day."); Route.CompiledRoute route = Route.Guilds.PRUNE_MEMBERS.compile(guild.getId()).withQueryParams("days", Integer.toString(days)); return new AuditableRestAction<Integer>(guild.getJDA(), route) { @Override protected void handleResponse(Response response, Request<Integer> request) { if (response.isOk()) request.onSuccess(response.getObject().getInt("pruned")); else request .onFailure(response); } }; } /** * Kicks a {@link net.dv8tion.jda.core.entities.Member Member} from the {@link net.dv8tion.jda.core.entities.Guild Guild}. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the {@link net.dv8tion.jda.core.entities.User User} * until Discord sends the {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be kicked due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} to kick * from the from the {@link net.dv8tion.jda.core.entities.Guild Guild}. * @param reason * The reason for this action or {@code null} if there is no specified reason * * @throws java.lang.IllegalArgumentException * If the provided member is not a Member of this Guild or is {@code null} * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#KICK_MEMBERS} permission.</li> * <li>If the logged in account cannot kick the other member due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} * Kicks the provided Member from the current Guild */ @CheckReturnValue public AuditableRestAction<Void> kick(Member member, String reason) { checkAvailable(); Checks.notNull(member, "member"); checkGuild(member.getGuild(), "member"); checkPermission(Permission.KICK_MEMBERS); checkPosition(member); final String userId = member.getUser().getId(); final String guildId = guild.getId(); Route.CompiledRoute route = Route.Guilds.KICK_MEMBER.compile(guildId, userId); if (reason != null && !reason.isEmpty()) route = route.withQueryParams("reason", MiscUtil.encodeUTF8(reason)); return new AuditableRestAction<Void>(guild.getJDA(), route) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Kicks the {@link net.dv8tion.jda.core.entities.Member Member} specified by the userId from the from the {@link net.dv8tion.jda.core.entities.Guild Guild}. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the {@link net.dv8tion.jda.core.entities.User User} * until Discord sends the {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be kicked due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param userId * The id of the {@link net.dv8tion.jda.core.entities.User User} to kick * from the from the {@link net.dv8tion.jda.core.entities.Guild Guild}. * @param reason * The reason for this action or {@code null} if there is no specified reason * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#KICK_MEMBERS} permission.</li> * <li>If the logged in account cannot kick the other member due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws java.lang.IllegalArgumentException * If the userId provided does not correspond to a Member in this Guild or the provided {@code userId} is blank/null. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> kick(String userId, String reason) { Checks.notBlank(userId, "userId"); Member member = guild.getMemberById(userId); if (member == null) throw new IllegalArgumentException("The provided userId does not correspond to a member in this guild! Provided userId: " + userId); return kick(member, reason); } /** * Kicks a {@link net.dv8tion.jda.core.entities.Member Member} from the {@link net.dv8tion.jda.core.entities.Guild Guild}. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the {@link net.dv8tion.jda.core.entities.User User} * until Discord sends the {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be kicked due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} to kick from the from the {@link net.dv8tion.jda.core.entities.Guild Guild}. * * @throws java.lang.IllegalArgumentException * If the provided member is not a Member of this Guild or is {@code null} * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#KICK_MEMBERS} permission.</li> * <li>If the logged in account cannot kick the other member due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} * Kicks the provided Member from the current Guild */ @CheckReturnValue public AuditableRestAction<Void> kick(Member member) { return kick(member, null); } /** * Kicks the {@link net.dv8tion.jda.core.entities.Member Member} specified by the userId from the from the {@link net.dv8tion.jda.core.entities.Guild Guild}. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the {@link net.dv8tion.jda.core.entities.User User} * until Discord sends the {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be kicked due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param userId * The id of the {@link net.dv8tion.jda.core.entities.User User} to kick from the from the {@link net.dv8tion.jda.core.entities.Guild Guild}. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#KICK_MEMBERS} permission.</li> * <li>If the logged in account cannot kick the other member due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws java.lang.IllegalArgumentException * If the userId provided does not correspond to a Member in this Guild or the provided {@code userId} is blank/null. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> kick(String userId) { return kick(userId, null); } /** * Bans a {@link net.dv8tion.jda.core.entities.Member Member} and deletes messages sent by the user * based on the amount of delDays. * <br>If you wish to ban a member without deleting any messages, provide delDays with a value of 0. * This change will be applied immediately. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the * {@link net.dv8tion.jda.core.entities.Member Member} until Discord sends the * {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be banned due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} to ban. * @param delDays * The history of messages, in days, that will be deleted. * @param reason * The reason for this action or {@code null} if there is no specified reason * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#BAN_MEMBERS} permission.</li> * <li>If the logged in account cannot ban the other user due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws java.lang.IllegalArgumentException * <ul> * <li>If the provided amount of days (delDays) is less than 0.</li> * <li>If the provided member is {@code null}</li> * </ul> * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> ban(Member member, int delDays, String reason) { checkAvailable(); Checks.notNull(member, "member"); //Don't check if the provided member is from this guild. It doesn't matter if they are or aren't. return ban(member.getUser(), delDays, reason); } /** * Bans a {@link net.dv8tion.jda.core.entities.User User} and deletes messages sent by the user * based on the amount of delDays. * <br>If you wish to ban a user without deleting any messages, provide delDays with a value of 0. * This change will be applied immediately. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the {@link net.dv8tion.jda.core.entities.User User's} * {@link net.dv8tion.jda.core.entities.Member Member} object (if the User was in the Guild) * until Discord sends the {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be banned due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param user * The {@link net.dv8tion.jda.core.entities.User User} to ban. * @param delDays * The history of messages, in days, that will be deleted. * @param reason * The reason for this action or {@code null} if there is no specified reason * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#BAN_MEMBERS} permission.</li> * <li>If the logged in account cannot ban the other user due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws java.lang.IllegalArgumentException * <ul> * <li>If the provided amount of days (delDays) is less than 0.</li> * <li>If the provided user is null</li> * </ul> * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> ban(User user, int delDays, String reason) { checkAvailable(); Checks.notNull(user, "user"); checkPermission(Permission.BAN_MEMBERS); if (guild.isMember(user)) // If user is in guild. Check if we are able to ban. checkPosition(guild.getMember(user)); if (delDays < 0) throw new IllegalArgumentException("Provided delDays cannot be less that 0. How can you delete messages that are -1 days old?"); final String userId = user.getId(); Route.CompiledRoute route = Route.Guilds.BAN.compile(guild.getId(), userId); if (reason != null && !reason.isEmpty()) route = route.withQueryParams("reason", MiscUtil.encodeUTF8(reason)); if (delDays > 0) route = route.withQueryParams("delete-message-days", Integer.toString(delDays)); return new AuditableRestAction<Void>(guild.getJDA(), route) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Bans the a user specified by the userId and deletes messages sent by the user * based on the amount of delDays. * <br>If you wish to ban a user without deleting any messages, provide delDays with a value of 0. * This change will be applied immediately. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the {@link net.dv8tion.jda.core.entities.User User's} * {@link net.dv8tion.jda.core.entities.Member Member} object (if the User was in the Guild) * until Discord sends the {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be banned due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param userId * The id of the {@link net.dv8tion.jda.core.entities.User User} to ban. * @param delDays * The history of messages, in days, that will be deleted. * @param reason * The reason for this action or {@code null} if there is no specified reason * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#BAN_MEMBERS} permission.</li> * <li>If the logged in account cannot ban the other user due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws IllegalArgumentException * If the provided amount of days (delDays) is less than 0. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> ban(String userId, int delDays, String reason) { checkAvailable(); Checks.notBlank(userId, "userId"); checkPermission(Permission.BAN_MEMBERS); User user = guild.getJDA().getUserById(userId); if (user != null) // If we have the user cached then we should use the additional information available to use during the ban process. { return ban(user, delDays, reason); } Route.CompiledRoute route = Route.Guilds.BAN.compile(guild.getId(), userId); if (reason != null && !reason.isEmpty()) route = route.withQueryParams("reason", MiscUtil.encodeUTF8(reason)); if (delDays > 0) route = route.withQueryParams("delete-message-days", Integer.toString(delDays)); return new AuditableRestAction<Void>(guild.getJDA(), route) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else if (response.code == 404) request.onFailure(new IllegalArgumentException("User with provided id \"" + userId + "\" does not exist! Cannot ban a non-existent user!")); else request.onFailure(response); } }; } /** * Bans a {@link net.dv8tion.jda.core.entities.Member Member} and deletes messages sent by the user * based on the amount of delDays. * <br>If you wish to ban a member without deleting any messages, provide delDays with a value of 0. * This change will be applied immediately. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the * {@link net.dv8tion.jda.core.entities.Member Member} until Discord sends the * {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be banned due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} to ban. * @param delDays * The history of messages, in days, that will be deleted. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#BAN_MEMBERS} permission.</li> * <li>If the logged in account cannot ban the other user due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws java.lang.IllegalArgumentException * <ul> * <li>If the provided amount of days (delDays) is less than 0.</li> * <li>If the provided member is {@code null}</li> * </ul> * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> ban(Member member, int delDays) { return ban(member, delDays, null); } /** * Bans a {@link net.dv8tion.jda.core.entities.Member Member} and deletes messages sent by the user * based on the amount of delDays. * <br>If you wish to ban a member without deleting any messages, provide delDays with a value of 0. * This change will be applied immediately. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the * {@link net.dv8tion.jda.core.entities.Member Member} until Discord sends the * {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be banned due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param user * The {@link net.dv8tion.jda.core.entities.User User} to ban. * @param delDays * The history of messages, in days, that will be deleted. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#BAN_MEMBERS} permission.</li> * <li>If the logged in account cannot ban the other user due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws java.lang.IllegalArgumentException * <ul> * <li>If the provided amount of days (delDays) is less than 0.</li> * <li>If the provided member is {@code null}</li> * </ul> * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> ban(User user, int delDays) { return ban(user, delDays, null); } /** * Bans the a user specified by the userId and deletes messages sent by the user * based on the amount of delDays. * <br>If you wish to ban a user without deleting any messages, provide delDays with a value of 0. * This change will be applied immediately. * * <p><b>Note:</b> {@link net.dv8tion.jda.core.entities.Guild#getMembers()} will still contain the {@link net.dv8tion.jda.core.entities.User User's} * {@link net.dv8tion.jda.core.entities.Member Member} object (if the User was in the Guild) * until Discord sends the {@link net.dv8tion.jda.core.events.guild.member.GuildMemberLeaveEvent GuildMemberLeaveEvent}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be banned due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param userId * The id of the {@link net.dv8tion.jda.core.entities.User User} to ban. * @param delDays * The history of messages, in days, that will be deleted. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#BAN_MEMBERS} permission.</li> * <li>If the logged in account cannot ban the other user due to permission hierarchy position. * <br>See {@link net.dv8tion.jda.core.utils.PermissionUtil#canInteract(Member, Member) PermissionUtil.canInteract(Member, Member)}</li> * </ul> * @throws IllegalArgumentException * If the provided amount of days (delDays) is less than 0. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> ban(String userId, int delDays) { return ban(userId, delDays, null); } /** * Unbans the specified {@link net.dv8tion.jda.core.entities.User User} from this Guild. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be unbanned due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_USER UNKNOWN_USER} * <br>The specified User is invalid</li> * </ul> * * @param user * The id of the {@link net.dv8tion.jda.core.entities.User User} to unban. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#BAN_MEMBERS} permission. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws IllegalArgumentException * If the provided user is null * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> unban(User user) { Checks.notNull(user, "user"); return unban(user.getId()); } /** * Unbans the a user specified by the userId from this Guild. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be unbanned due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_USER UNKNOWN_USER} * <br>The specified User is invalid</li> * </ul> * * @param userId * The id of the {@link net.dv8tion.jda.core.entities.User User} to unban. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#BAN_MEMBERS} permission. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws IllegalArgumentException * If the provided id is null or blank * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> unban(String userId) { checkAvailable(); Checks.notBlank(userId, "userId"); checkPermission(Permission.BAN_MEMBERS); Route.CompiledRoute route = Route.Guilds.UNBAN.compile(guild.getId(), userId); return new AuditableRestAction<Void>(guild.getJDA(), route) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else if (response.code == 404) request.onFailure(new IllegalArgumentException("User with provided id \"" + userId + "\" does not exist! Cannot unban a non-existent user!")); else request.onFailure(response); } }; } /** * Sets the Guild Deafened state state of the {@link net.dv8tion.jda.core.entities.Member Member} based on the provided * boolean. * * <p><b>Note:</b> The Member's {@link net.dv8tion.jda.core.entities.GuildVoiceState#isGuildDeafened() GuildVoiceState.isGuildDeafened()} value won't change * until JDA receives the {@link net.dv8tion.jda.core.events.guild.voice.GuildVoiceGuildDeafenEvent GuildVoiceGuildDeafenEvent} event related to this change. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be deafened due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} who's {@link GuildVoiceState VoiceState} is being changed. * @param deafen * Whether this {@link net.dv8tion.jda.core.entities.Member Member} should be deafened or undeafened. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#VOICE_DEAF_OTHERS} permission.</li> * <li>If the provided member is the Guild's owner. You cannot modify the owner of a Guild.</li> * </ul> * @throws IllegalArgumentException * If the provided member is not from this Guild or null. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> setDeafen(Member member, boolean deafen) { checkAvailable(); Checks.notNull(member, "member"); checkGuild(member.getGuild(), "member"); checkPermission(Permission.VOICE_DEAF_OTHERS); //We check the owner instead of Position because, apparently, Discord doesn't care about position for // muting and deafening, only whether the affected Member is the owner. if (guild.getOwner().equals(member)) throw new PermissionException("Cannot modified Guild Deafen status the Owner of the Guild"); if (member.getVoiceState().isGuildDeafened() == deafen) return new AuditableRestAction.EmptyRestAction<>(getJDA(), null); JSONObject body = new JSONObject().put("deaf", deafen); Route.CompiledRoute route = Route.Guilds.MODIFY_MEMBER.compile(guild.getId(), member.getUser().getId()); return new AuditableRestAction<Void>(guild.getJDA(), route, body) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Sets the Guild Muted state state of the {@link net.dv8tion.jda.core.entities.Member Member} based on the provided * boolean. * * <p><b>Note:</b> The Member's {@link net.dv8tion.jda.core.entities.GuildVoiceState#isGuildMuted() GuildVoiceState.isGuildMuted()} value won't change * until JDA receives the {@link net.dv8tion.jda.core.events.guild.voice.GuildVoiceGuildMuteEvent GuildVoiceGuildMuteEvent} event related to this change. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The target Member cannot be muted due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The specified Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} who's {@link GuildVoiceState VoiceState} is being changed. * @param mute * Whether this {@link net.dv8tion.jda.core.entities.Member Member} should be muted or unmuted. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#VOICE_MUTE_OTHERS} permission.</li> * <li>If the provided member is the Guild's owner. You cannot modify the owner of a Guild.</li> * </ul> * @throws java.lang.IllegalArgumentException * If the provided member is not from this Guild or null. * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> setMute(Member member, boolean mute) { checkAvailable(); Checks.notNull(member, "member"); checkGuild(member.getGuild(), "member"); checkPermission(Permission.VOICE_MUTE_OTHERS); //We check the owner instead of Position because, apparently, Discord doesn't care about position for // muting and deafening, only whether the affected Member is the owner. if (guild.getOwner().equals(member)) throw new PermissionException("Cannot modified Guild Mute status the Owner of the Guild"); if (member.getVoiceState().isGuildMuted() == mute) return new AuditableRestAction.EmptyRestAction<>(getJDA(), null); JSONObject body = new JSONObject().put("mute", mute); Route.CompiledRoute route = Route.Guilds.MODIFY_MEMBER.compile(guild.getId(), member.getUser().getId()); return new AuditableRestAction<Void>(guild.getJDA(), route, body) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Atomically assigns the provided {@link net.dv8tion.jda.core.entities.Role Role} to the specified {@link net.dv8tion.jda.core.entities.Member Member}. * <br><b>This can be used together with other role modification methods as it does not require an updated cache!</b> * * <p>If multiple roles should be added/removed (efficiently) in one request * you may use {@link #modifyMemberRoles(Member, Collection, Collection) modifyMemberRoles(Member, Collection, Collection)} or similar methods. * * <p>If the specified role is already present in the member's set of roles this does nothing. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_ROLE UNKNOWN_ROLE} * <br>If the specified Role does not exist</li> * </ul> * * @param member * The target member who will receive the new role * @param role * The role which should be assigned atomically * * @throws java.lang.IllegalArgumentException * <ul> * <li>If the specified member/role are not from the current Guild</li> * <li>Either member or role are {@code null}</li> * </ul> * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the provided role is above the highest role of the currently logged in account</li> * <li>If the currently logged in account does not have * the permission {@link net.dv8tion.jda.core.Permission#MANAGE_ROLES Permission.MANAGE_ROLES}</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> addSingleRoleToMember(Member member, Role role) { Checks.notNull(member, "Member"); Checks.notNull(role, "Role"); checkGuild(member.getGuild(), "Member is not from the same Guild!"); checkGuild(role.getGuild(), "Role is not from the same Guild!"); checkPermission(Permission.MANAGE_ROLES); checkPosition(role); Route.CompiledRoute route = Route.Guilds.ADD_MEMBER_ROLE.compile(guild.getId(), member.getUser().getId(), role.getId()); return new AuditableRestAction<Void>(getJDA(), route) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Atomically removes the provided {@link net.dv8tion.jda.core.entities.Role Role} from the specified {@link net.dv8tion.jda.core.entities.Member Member}. * <br><b>This can be used together with other role modification methods as it does not require an updated cache!</b> * * <p>If multiple roles should be added/removed (efficiently) in one request * you may use {@link #modifyMemberRoles(Member, Collection, Collection) modifyMemberRoles(Member, Collection, Collection)} or similar methods. * * <p>If the specified role is not present in the member's set of roles this does nothing. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_ROLE UNKNOWN_ROLE} * <br>If the specified Role does not exist</li> * </ul> * * @param member * The target member who will lose the specified role * @param role * The role which should be removed atomically * * @throws java.lang.IllegalArgumentException * <ul> * <li>If the specified member/role are not from the current Guild</li> * <li>Either member or role are {@code null}</li> * </ul> * @throws net.dv8tion.jda.core.exceptions.PermissionException * <ul> * <li>If the provided role is above the highest role of the currently logged in account</li> * <li>If the currently logged in account does not have * the permission {@link net.dv8tion.jda.core.Permission#MANAGE_ROLES Permission.MANAGE_ROLES}</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> removeSingleRoleFromMember(Member member, Role role) { Checks.notNull(member, "Member"); Checks.notNull(role, "Role"); checkGuild(member.getGuild(), "Member is not from the same Guild!"); checkGuild(role.getGuild(), "Role is not from the same Guild!"); checkPermission(Permission.MANAGE_ROLES); checkPosition(role); Route.CompiledRoute route = Route.Guilds.REMOVE_MEMBER_ROLE.compile(guild.getId(), member.getUser().getId(), role.getId()); return new AuditableRestAction<Void>(getJDA(), route) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Adds all provided {@link net.dv8tion.jda.core.entities.Role Roles} * to the specified {@link net.dv8tion.jda.core.entities.Member Member} * * <h1>Warning</h1> * <b>This may <u>not</u> be used together with any other role add/remove/modify methods for the same Member * within one event listener cycle! The changes made by this require cache updates which are triggered by * lifecycle events which are received later. This may only be called again once the specific Member has been updated * by a {@link net.dv8tion.jda.core.events.guild.member.GuildMemberRoleAddEvent GuildMemberRoleAddEvent}. * <br>To add <u>and</u> remove Roles from a Member you should use {@link #modifyMemberRoles(Member, Collection, Collection)}</b> * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * Not-null {@link net.dv8tion.jda.core.entities.Member Member} that will receive all provided roles * @param roles * Not-null Roles that should be added to the specified Member * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the provided roles are higher in the Guild's hierarchy * and thus cannot be modified by the currently logged in account * @throws IllegalArgumentException * <ul> * <li>If any or the provided entities is null</li> * <li>If any of the provided entities is not in this Guild</li> * <li>If any of the provided {@link net.dv8tion.jda.core.entities.Role Roles} is the {@code Public Role} of the Guild</li> * <li>If any of the provided {@link net.dv8tion.jda.core.entities.Role Roles} are {@code managed} * <br>Managed Roles can only be applied by the applications that manage them (e.g. Twitch Subscriber Roles)</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} * * @see #addRolesToMember(Member, Collection) * @see #modifyMemberRoles(Member, Role...) */ @CheckReturnValue public AuditableRestAction<Void> addRolesToMember(Member member, Role... roles) { return modifyMemberRoles(member, Arrays.asList(roles), Collections.emptyList()); } /** * Adds all provided {@link net.dv8tion.jda.core.entities.Role Roles} * to the specified {@link net.dv8tion.jda.core.entities.Member Member} * * <h1>Warning</h1> * <b>This may <u>not</u> be used together with any other role add/remove/modify methods for the same Member * within one event listener cycle! The changes made by this require cache updates which are triggered by * lifecycle events which are received later. This may only be called again once the specific Member has been updated * by a {@link net.dv8tion.jda.core.events.guild.member.GuildMemberRoleAddEvent GuildMemberRoleAddEvent}. * <br>To add <u>and</u> remove Roles from a Member you should use {@link #modifyMemberRoles(Member, Collection, Collection)}</b> * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * Not-null {@link net.dv8tion.jda.core.entities.Member Member} that will receive all provided roles * @param roles * Not-null Roles that should be added to the specified Member * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the provided roles are higher in the Guild's hierarchy * and thus cannot be modified by the currently logged in account * @throws IllegalArgumentException * <ul> * <li>If any or the provided entities is null</li> * <li>If any of the provided entities is not in this Guild</li> * <li>If any of the provided {@link net.dv8tion.jda.core.entities.Role Roles} is the {@code Public Role} of the Guild</li> * <li>If any of the provided {@link net.dv8tion.jda.core.entities.Role Roles} are {@code managed} * <br>Managed Roles can only be applied by the applications that manage them (e.g. Twitch Subscriber Roles)</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} * * @see #addRolesToMember(Member, Role...) * @see #modifyMemberRoles(Member, Collection) */ @CheckReturnValue public AuditableRestAction<Void> addRolesToMember(Member member, Collection<Role> roles) { return modifyMemberRoles(member, roles, Collections.emptyList()); } /** * Removes all provided {@link net.dv8tion.jda.core.entities.Role Roles} * from the specified {@link net.dv8tion.jda.core.entities.Member Member} * * <h1>Warning</h1> * <b>This may <u>not</u> be used together with any other role add/remove/modify methods for the same Member * within one event listener cycle! The changes made by this require cache updates which are triggered by * lifecycle events which are received later. This may only be called again once the specific Member has been updated * by a {@link net.dv8tion.jda.core.events.guild.member.GuildMemberRoleRemoveEvent GuildMemberRoleRemoveEvent}. * <br>To add <u>and</u> remove Roles from a Member you should use {@link #modifyMemberRoles(Member, Collection, Collection)}</b> * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * Not-null {@link net.dv8tion.jda.core.entities.Member Member} from which to remove the {@link net.dv8tion.jda.core.entities.Role Roles} * @param roles * Not-null Roles that should be removed from the specified Member * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the provided roles are higher in the Guild's hierarchy * and thus cannot be modified by the currently logged in account * @throws IllegalArgumentException * <ul> * <li>If any or the provided entities is null</li> * <li>If any of the provided entities is not in this Guild</li> * <li>If any of the provided {@link net.dv8tion.jda.core.entities.Role Roles} is the {@code Public Role} of the Guild</li> * <li>If any of the provided {@link net.dv8tion.jda.core.entities.Role Roles} are {@code managed} * <br>Managed Roles can only be applied by the applications that manage them (e.g. Twitch Subscriber Roles)</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} * * @see #addRolesToMember(Member, Collection) * @see #modifyMemberRoles(Member, Role...) */ @CheckReturnValue public AuditableRestAction<Void> removeRolesFromMember(Member member, Role... roles) { return modifyMemberRoles(member, Collections.emptyList(), Arrays.asList(roles)); } /** * Removes all provided {@link net.dv8tion.jda.core.entities.Role Roles} * from the specified {@link net.dv8tion.jda.core.entities.Member Member} * * <h1>Warning</h1> * <b>This may <u>not</u> be used together with any other role add/remove/modify methods for the same Member * within one event listener cycle! The changes made by this require cache updates which are triggered by * lifecycle events which are received later. This may only be called again once the specific Member has been updated * by a {@link net.dv8tion.jda.core.events.guild.member.GuildMemberRoleRemoveEvent GuildMemberRoleRemoveEvent}. * <br>To add <u>and</u> remove Roles from a Member you should use {@link #modifyMemberRoles(Member, Collection, Collection)}</b> * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * Not-null {@link net.dv8tion.jda.core.entities.Member Member} from which to remove the {@link net.dv8tion.jda.core.entities.Role Roles} * @param roles * Not-null Roles that should be removed from the specified Member * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the provided roles are higher in the Guild's hierarchy * and thus cannot be modified by the currently logged in account * @throws IllegalArgumentException * <ul> * <li>If any or the provided entities is null</li> * <li>If any of the provided entities is not in this Guild</li> * <li>If any of the provided {@link net.dv8tion.jda.core.entities.Role Roles} is the {@code Public Role} of the Guild</li> * <li>If any of the provided {@link net.dv8tion.jda.core.entities.Role Roles} are {@code managed} * <br>Managed Roles can only be applied by the applications that manage them (e.g. Twitch Subscriber Roles)</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} * * @see #addRolesToMember(Member, Role...) * @see #modifyMemberRoles(Member, Collection) */ @CheckReturnValue public AuditableRestAction<Void> removeRolesFromMember(Member member, Collection<Role> roles) { return modifyMemberRoles(member, Collections.emptyList(), roles); } /** * Modifies the {@link net.dv8tion.jda.core.entities.Role Roles} of the specified {@link net.dv8tion.jda.core.entities.Member Member} * by adding and removing a collection of roles. * <br>None of the provided roles may be the <u>Public Role</u> of the current Guild. * <br>If a role is both in {@code rolesToAdd} and {@code rolesToRemove} it will be removed. * * <p>None of the provided collections may be null * <br>To only add or remove roles use either {@link #removeRolesFromMember(Member, Collection)} or {@link #addRolesToMember(Member, Collection)} * * <h1>Warning</h1> * <b>This may <u>not</u> be used together with any other role add/remove/modify methods for the same Member * within one event listener cycle! The changes made by this require cache updates which are triggered by * lifecycle events which are received later. This may only be called again once the specific Member has been updated * by a {@link net.dv8tion.jda.core.events.guild.member.GenericGuildMemberEvent GenericGuildMemberEvent} targeting the same Member.</b> * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} that should be modified * @param rolesToAdd * A {@link java.util.Collection Collection} of {@link net.dv8tion.jda.core.entities.Role Roles} * to add to the current Roles the specified {@link net.dv8tion.jda.core.entities.Member Member} already has * @param rolesToRemove * A {@link java.util.Collection Collection} of {@link net.dv8tion.jda.core.entities.Role Roles} * to remove from the current Roles the specified {@link net.dv8tion.jda.core.entities.Member Member} already has * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the provided roles are higher in the Guild's hierarchy * and thus cannot be modified by the currently logged in account * @throws IllegalArgumentException * <ul> * <li>If any of the provided arguments is {@code null}</li> * <li>If any of the specified Roles is managed or is the {@code Public Role} of the Guild</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> modifyMemberRoles(Member member, Collection<Role> rolesToAdd, Collection<Role> rolesToRemove) { checkAvailable(); Checks.notNull(member, "member"); Checks.notNull(rolesToAdd, "Collection containing roles to be added to the member"); Checks.notNull(rolesToRemove, "Collection containing roles to be removed from the member"); checkGuild(member.getGuild(), "member"); checkPermission(Permission.MANAGE_ROLES); rolesToAdd.forEach(role -> { Checks.notNull(role, "role in rolesToAdd"); checkGuild(role.getGuild(), "role: " + role.toString()); checkPosition(role); if (role.isManaged()) throw new IllegalArgumentException("Cannot add a Managed role to a Member. Role: " + role.toString()); }); rolesToRemove.forEach(role -> { Checks.notNull(role, "role in rolesToRemove"); checkGuild(role.getGuild(), "role: " + role.toString()); checkPosition(role); if (role.isManaged()) throw new IllegalArgumentException("Cannot remove a Managed role from a Member. Role: " + role.toString()); }); Set<Role> currentRoles = new HashSet<>(((MemberImpl) member).getRoleSet()); currentRoles.addAll(rolesToAdd); currentRoles.removeAll(rolesToRemove); if (currentRoles.contains(guild.getPublicRole())) throw new IllegalArgumentException("Cannot add the PublicRole of a Guild to a Member. All members have this role by default!"); JSONObject body = new JSONObject() .put("roles", currentRoles.stream().map(Role::getId).collect(Collectors.toList())); Route.CompiledRoute route = Route.Guilds.MODIFY_MEMBER.compile(guild.getId(), member.getUser().getId()); return new AuditableRestAction<Void>(guild.getJDA(), route, body) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Modifies the complete {@link net.dv8tion.jda.core.entities.Role Role} set of the specified {@link net.dv8tion.jda.core.entities.Member Member} * <br>The provided roles will replace all current Roles of the specified Member. * * <h1>Warning</h1> * <b>This may <u>not</u> be used together with any other role add/remove/modify methods for the same Member * within one event listener cycle! The changes made by this require cache updates which are triggered by * lifecycle events which are received later. This may only be called again once the specific Member has been updated * by a {@link net.dv8tion.jda.core.events.guild.member.GenericGuildMemberEvent GenericGuildMemberEvent} targeting the same Member.</b> * * <p><b>The new roles <u>must not</u> contain the Public Role of the Guild</b> * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * A {@link net.dv8tion.jda.core.entities.Member Member} of which to override the Roles of * @param roles * New collection of {@link net.dv8tion.jda.core.entities.Role Roles} for the specified Member * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the provided roles are higher in the Guild's hierarchy * and thus cannot be modified by the currently logged in account * @throws IllegalArgumentException * <ul> * <li>If any of the provided arguments is {@code null}</li> * <li>If any of the provided arguments is not from this Guild</li> * <li>If any of the specified {@link net.dv8tion.jda.core.entities.Role Roles} is managed</li> * <li>If any of the specified {@link net.dv8tion.jda.core.entities.Role Roles} is the {@code Public Role} of this Guild</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} * * @see #modifyMemberRoles(Member, Collection) */ @CheckReturnValue public AuditableRestAction<Void> modifyMemberRoles(Member member, Role... roles) { return modifyMemberRoles(member, Arrays.asList(roles)); } /** * Modifies the complete {@link net.dv8tion.jda.core.entities.Role Role} set of the specified {@link net.dv8tion.jda.core.entities.Member Member} * <br>The provided roles will replace all current Roles of the specified Member. * * <p><u>The new roles <b>must not</b> contain the Public Role of the Guild</u> * * <h1>Warning</h1> * <b>This may <u>not</u> be used together with any other role add/remove/modify methods for the same Member * within one event listener cycle! The changes made by this require cache updates which are triggered by * lifecycle events which are received later. This may only be called again once the specific Member has been updated * by a {@link net.dv8tion.jda.core.events.guild.member.GenericGuildMemberEvent GenericGuildMemberEvent} targeting the same Member.</b> * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The Members Roles could not be modified due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * </ul> * * @param member * A {@link net.dv8tion.jda.core.entities.Member Member} of which to override the Roles of * @param roles * New collection of {@link net.dv8tion.jda.core.entities.Role Roles} for the specified Member * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the provided roles are higher in the Guild's hierarchy * and thus cannot be modified by the currently logged in account * @throws IllegalArgumentException * <ul> * <li>If any of the provided arguments is {@code null}</li> * <li>If any of the provided arguments is not from this Guild</li> * <li>If any of the specified {@link net.dv8tion.jda.core.entities.Role Roles} is managed</li> * <li>If any of the specified {@link net.dv8tion.jda.core.entities.Role Roles} is the {@code Public Role} of this Guild</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} * * @see #modifyMemberRoles(Member, Collection) */ @CheckReturnValue public AuditableRestAction<Void> modifyMemberRoles(Member member, Collection<Role> roles) { checkAvailable(); Checks.notNull(member, "member"); Checks.notNull(roles, "roles"); checkGuild(member.getGuild(), "member"); roles.forEach(role -> { Checks.notNull(role, "role in collection"); checkGuild(role.getGuild(), "role: " + role.toString()); checkPosition(role); }); if (roles.contains(guild.getPublicRole())) throw new IllegalArgumentException("Cannot add the PublicRole of a Guild to a Member. All members have this role by default!"); //Make sure that the current managed roles are preserved and no new ones are added. List<Role> currentManaged = roles.stream().filter(Role::isManaged).collect(Collectors.toList()); List<Role> newManaged = roles.stream().filter(Role::isManaged).collect(Collectors.toList()); if (currentManaged.size() != 0 || newManaged.size() != 0) { currentManaged.removeIf(newManaged::contains); if (currentManaged.size() > 0) throw new IllegalArgumentException("Cannot remove managed roles from a member! Roles: " + currentManaged.toString()); if (newManaged.size() > 0) throw new IllegalArgumentException("Cannot add managed roles to a member! Roles: " + newManaged.toString()); } //This is identical to the rest action stuff in #modifyMemberRoles(Member, Collection<Role>, Collection<Role>) JSONObject body = new JSONObject() .put("roles", roles.stream().map(Role::getId).collect(Collectors.toList())); Route.CompiledRoute route = Route.Guilds.MODIFY_MEMBER.compile(guild.getId(), member.getUser().getId()); return new AuditableRestAction<Void>(guild.getJDA(), route, body) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Transfers the Guild ownership to the specified {@link net.dv8tion.jda.core.entities.Member Member} * <br>Only available if the currently logged in account is the owner of this Guild * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The currently logged in account lost ownership before completing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MEMBER UNKNOWN_MEMBER} * <br>The target Member was removed from the Guild before finishing the task</li> * </ul> * * @param newOwner * Not-null Member to transfer ownership to * * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the currently logged in account is not the owner of this Guild * @throws IllegalArgumentException * <ul> * <li>If the specified Member is {@code null} or not from the same Guild</li> * <li>If the specified Member already is the Guild owner</li> * <li>If the specified Member is a bot account ({@link net.dv8tion.jda.core.AccountType#BOT AccountType.BOT})</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} */ @CheckReturnValue public AuditableRestAction<Void> transferOwnership(Member newOwner) { checkAvailable(); Checks.notNull(newOwner, "newOwner member"); checkGuild(newOwner.getGuild(), "newOwner member"); if (!guild.getOwner().equals(guild.getSelfMember())) throw new PermissionException("The logged in account must be the owner of this Guild to be able to transfer ownership"); if (guild.getSelfMember().equals(newOwner)) throw new IllegalArgumentException("The member provided as the newOwner is the currently logged in account. Provide a different member to give ownership to."); if (newOwner.getUser().isBot()) throw new IllegalArgumentException("Cannot transfer ownership of a Guild to a Bot!"); JSONObject body = new JSONObject().put("owner_id", newOwner.getUser().getId()); Route.CompiledRoute route = Route.Guilds.MODIFY_GUILD.compile(guild.getId()); return new AuditableRestAction<Void>(guild.getJDA(), route, body) { @Override protected void handleResponse(Response response, Request<Void> request) { if (response.isOk()) request.onSuccess(null); else request.onFailure(response); } }; } /** * Creates a new {@link net.dv8tion.jda.core.entities.TextChannel TextChannel} in this Guild. * For this to be successful, the logged in account has to have the {@link net.dv8tion.jda.core.Permission#MANAGE_CHANNEL MANAGE_CHANNEL} Permission * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The channel could not be created due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * </ul> * * @param name * The name of the TextChannel to create * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#MANAGE_CHANNEL} permission * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws IllegalArgumentException * If the provided name is {@code null} or less than 2 characters or greater than 100 characters in length * * @return A specific {@link net.dv8tion.jda.core.requests.restaction.ChannelAction ChannelAction} * <br>This action allows to set fields for the new TextChannel before creating it */ @CheckReturnValue public ChannelAction createTextChannel(String name) { checkAvailable(); checkPermission(Permission.MANAGE_CHANNEL); Checks.notNull(name, "name"); if (name.length() < 2 || name.length() > 100) throw new IllegalArgumentException("Provided name must be 2 - 100 characters in length"); Route.CompiledRoute route = Route.Guilds.CREATE_CHANNEL.compile(guild.getId()); return new ChannelAction(route, name, guild, false); } /** * Creates a new {@link net.dv8tion.jda.core.entities.VoiceChannel VoiceChannel} in this Guild. * For this to be successful, the logged in account has to have the {@link net.dv8tion.jda.core.Permission#MANAGE_CHANNEL MANAGE_CHANNEL} Permission. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The channel could not be created due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * </ul> * * @param name * The name of the VoiceChannel to create * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#MANAGE_CHANNEL} permission * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws IllegalArgumentException * If the provided name is {@code null} or less than 2 characters or greater than 100 characters in length * * @return A specific {@link net.dv8tion.jda.core.requests.restaction.ChannelAction ChannelAction} * <br>This action allows to set fields for the new VoiceChannel before creating it */ @CheckReturnValue public ChannelAction createVoiceChannel(String name) { checkAvailable(); checkPermission(Permission.MANAGE_CHANNEL); Checks.notNull(name, "name"); if (name.length() < 2 || name.length() > 100) throw new IllegalArgumentException("Provided name must be 2 to 100 characters in length"); Route.CompiledRoute route = Route.Guilds.CREATE_CHANNEL.compile(guild.getId()); return new ChannelAction(route, name, guild, true); } /** * Creates a copy of the specified {@link net.dv8tion.jda.core.entities.Channel Channel} * in this {@link net.dv8tion.jda.core.entities.Guild Guild}. * <br>The provided channel need not be in the same Guild for this to work! * * This copies the following elements: * <ol> * <li>Name</li> * <li>Voice Elements (Bitrate, Userlimit)</li> * <li>Text Elements (Topic)</li> * <li>All permission overrides for Members/Roles</li> * </ol> * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The channel could not be created due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * </ul> * * @param channel * The {@link net.dv8tion.jda.core.entities.Channel Channel} to use for the copy template * * @throws java.lang.IllegalArgumentException * If the provided channel is {@code null} * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the currently logged in account does not have the {@link net.dv8tion.jda.core.Permission#MANAGE_CHANNEL MANAGE_CHANNEL} Permission * * @return A specific {@link net.dv8tion.jda.core.requests.restaction.ChannelAction ChannelAction} * <br>This action allows to set fields for the new Channel before creating it! * * @since 3.1 * * @see #createTextChannel(String) * @see #createVoiceChannel(String) * @see net.dv8tion.jda.core.requests.restaction.ChannelAction ChannelAction */ @CheckReturnValue public ChannelAction createCopyOfChannel(Channel channel) { Checks.notNull(channel, "Channel"); checkPermission(Permission.MANAGE_CHANNEL); boolean isVoice = channel instanceof VoiceChannel; Route.CompiledRoute route = Route.Guilds.CREATE_CHANNEL.compile(guild.getId()); final ChannelAction action = new ChannelAction(route, channel.getName(), guild, isVoice); if (isVoice) { VoiceChannel voice = (VoiceChannel) channel; action.setBitrate(voice.getBitrate()) .setUserlimit(voice.getUserLimit()); } else { TextChannel text = (TextChannel) channel; action.setTopic(text.getTopic()); } for (PermissionOverride o : channel.getPermissionOverrides()) { if (o.isMemberOverride()) action.addPermissionOverride(o.getMember(), o.getAllowedRaw(), o.getDeniedRaw()); else action.addPermissionOverride(o.getRole(), o.getAllowedRaw(), o.getDeniedRaw()); } return action; } /** * Creates a new {@link net.dv8tion.jda.core.entities.Webhook Webhook} for the specified * {@link net.dv8tion.jda.core.entities.TextChannel TextChannel}. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The webhook could not be created due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * </ul> * * @param channel * The target TextChannel to attach a new Webhook to. * @param name * The default name for the new Webhook. * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If you do not hold the permission {@link net.dv8tion.jda.core.Permission#MANAGE_WEBHOOKS Manage Webhooks} * on the selected channel * @throws IllegalArgumentException * <ul> * <li>If any of the provided arguments is {@code null}</li> * <li>If the provided {@link net.dv8tion.jda.core.entities.TextChannel TextChannel} is not from this Guild</li> * </ul> * * @return A specific {@link net.dv8tion.jda.core.requests.restaction.WebhookAction WebhookAction} * <br>This action allows to set fields for the new webhook before creating it */ @CheckReturnValue public WebhookAction createWebhook(TextChannel channel, String name) { Checks.notNull(name, "Webhook name"); Checks.notNull(channel, "TextChannel"); checkGuild(channel.getGuild(), "channel"); if (!guild.getSelfMember().hasPermission(channel, Permission.MANAGE_WEBHOOKS)) throw new PermissionException(Permission.MANAGE_WEBHOOKS); Route.CompiledRoute route = Route.Channels.CREATE_WEBHOOK.compile(channel.getId()); return new WebhookAction(getJDA(), route, name); } /** * Creates a new {@link net.dv8tion.jda.core.entities.Role Role} in this Guild. * <br>It will be placed at the bottom (just over the Public Role) to avoid permission hierarchy conflicts. * <br>For this to be successful, the logged in account has to have the {@link net.dv8tion.jda.core.Permission#MANAGE_ROLES MANAGE_ROLES} Permission * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The role could not be created due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MAX_ROLES_PER_GUILD MAX_ROLES_PER_GUILD} * <br>There are too many roles in this Guild</li> * </ul> * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#MANAGE_ROLES} Permission * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * * @return {@link net.dv8tion.jda.core.requests.restaction.RoleAction RoleAction} * <br>Creates a new role with previously selected field values */ @CheckReturnValue public RoleAction createRole() { checkAvailable(); checkPermission(Permission.MANAGE_ROLES); Route.CompiledRoute route = Route.Roles.CREATE_ROLE.compile(guild.getId()); return new RoleAction(route, guild); } /** * Creates a new {@link net.dv8tion.jda.core.entities.Role Role} in this {@link net.dv8tion.jda.core.entities.Guild Guild} * with the same settings as the given {@link net.dv8tion.jda.core.entities.Role Role}. * <br>The position of the specified Role does not matter in this case! * * <p>It will be placed at the bottom (just over the Public Role) to avoid permission hierarchy conflicts. * <br>For this to be successful, the logged in account has to have the {@link net.dv8tion.jda.core.Permission#MANAGE_ROLES MANAGE_ROLES} Permission * and all {@link net.dv8tion.jda.core.Permission Permissions} the given {@link net.dv8tion.jda.core.entities.Role Role} has. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The role could not be created due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MAX_ROLES_PER_GUILD MAX_ROLES_PER_GUILD} * <br>There are too many roles in this Guild</li> * </ul> * * @param role * The {@link net.dv8tion.jda.core.entities.Role Role} that should be copied * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#MANAGE_ROLES} Permission and every Permission the provided Role has * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws java.lang.IllegalArgumentException * If the specified role is {@code null} or not from this Guild * * @return {@link net.dv8tion.jda.core.requests.restaction.RoleAction RoleAction} * <br>RoleAction with already copied values from the specified {@link net.dv8tion.jda.core.entities.Role Role} */ @CheckReturnValue public RoleAction createCopyOfRole(Role role) { return createRole() .setColor(role.getColor()) .setPermissions(role.getPermissionsRaw()) .setName(role.getName()) .setHoisted(role.isHoisted()) .setMentionable(role.isMentionable()); } /** * Creates a new {@link net.dv8tion.jda.core.entities.Emote Emote} in this Guild. * <br>If one or more Roles are specified the new Emote will only be available to Members with any of the specified Roles (see {@link Member#canInteract(Emote)}) * <br>For this to be successful, the logged in account has to have the {@link net.dv8tion.jda.core.Permission#MANAGE_EMOTES MANAGE_EMOTES} Permission. * * <p><b><u>Unicode emojis are not included as {@link net.dv8tion.jda.core.entities.Emote Emote}!</u></b> * <br>Roles may only be available for whitelisted accounts. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} caused by * the returned {@link net.dv8tion.jda.core.requests.RestAction RestAction} include the following: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>The emote could not be created due to a permission discrepancy</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>We were removed from the Guild before finishing the task</li> * </ul> * * @param name * The name for the new Emote * @param icon * The {@link net.dv8tion.jda.core.entities.Icon} for the new Emote * @param roles * The {@link net.dv8tion.jda.core.entities.Role Roles} the new Emote should be restricted to * <br>If no roles are provided the Emote will be available to all Members of this Guild * * @throws net.dv8tion.jda.core.exceptions.PermissionException * If the logged in account does not have the {@link net.dv8tion.jda.core.Permission#MANAGE_EMOTES MANAGE_EMOTES} Permission * @throws net.dv8tion.jda.core.exceptions.GuildUnavailableException * If the guild is temporarily not {@link net.dv8tion.jda.core.entities.Guild#isAvailable() available} * @throws net.dv8tion.jda.core.exceptions.AccountTypeException * If the logged in account is not from {@link net.dv8tion.jda.core.AccountType#CLIENT AccountType.CLIENT} * * @return {@link net.dv8tion.jda.core.requests.restaction.AuditableRestAction AuditableRestAction} - Type: {@link net.dv8tion.jda.core.entities.Emote Emote} * <br>The newly created Emote */ @CheckReturnValue public AuditableRestAction<Emote> createEmote(String name, Icon icon, Role... roles) { checkAvailable(); checkPermission(Permission.MANAGE_EMOTES); Checks.notNull(name, "emote name"); Checks.notNull(icon, "emote icon"); if (getJDA().getAccountType() != AccountType.CLIENT) throw new AccountTypeException(AccountType.CLIENT); JSONObject body = new JSONObject(); body.put("name", name); body.put("image", icon.getEncoding()); if (roles.length > 0) // making sure none of the provided roles are null before mapping them to the snowflake id body.put("roles", Stream.of(roles).filter(Objects::nonNull).map(ISnowflake::getId).collect(Collectors.toSet())); Route.CompiledRoute route = Route.Emotes.CREATE_EMOTE.compile(guild.getId()); return new AuditableRestAction<Emote>(getJDA(), route, body) { @Override protected void handleResponse(Response response, Request<Emote> request) { if (response.isOk()) { JSONObject obj = response.getObject(); final long id = obj.getLong("id"); String name = obj.getString("name"); EmoteImpl emote = new EmoteImpl(id, guild).setName(name); // managed is false by default, should always be false for emotes created by client accounts. JSONArray rolesArr = obj.getJSONArray("roles"); Set<Role> roleSet = emote.getRoleSet(); for (int i = 0; i < rolesArr.length(); i++) { roleSet.add(guild.getRoleById(rolesArr.getString(i))); } // put emote into cache ((GuildImpl) guild).getEmoteMap().put(id, emote); request.onSuccess(emote); } else request.onFailure(response); } }; } /** * Modifies the positional order of {@link net.dv8tion.jda.core.entities.Guild#getTextChannels() Guild.getTextChannels()} * using a specific {@link net.dv8tion.jda.core.requests.RestAction RestAction} extension to allow moving Channels * {@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveUp(int) up}/{@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveDown(int) down} * or {@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveTo(int) to} a specific position. * <br>This uses <b>ascending</b> order with a 0 based index. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} include: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_CHANNEL UNNKOWN_CHANNEL} * <br>One of the channels has been deleted before the completion of the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>The currently logged in account was removed from the Guild</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.order.ChannelOrderAction ChannelOrderAction} - Type: {@link net.dv8tion.jda.core.entities.TextChannel TextChannel} */ @CheckReturnValue public ChannelOrderAction<TextChannel> modifyTextChannelPositions() { return new ChannelOrderAction<>(guild, ChannelType.TEXT); } /** * Modifies the positional order of {@link net.dv8tion.jda.core.entities.Guild#getVoiceChannels() Guild.getVoiceChannels()} * using a specific {@link net.dv8tion.jda.core.requests.RestAction RestAction} extension to allow moving Channels * {@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveUp(int) up}/{@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveDown(int) down} * or {@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveTo(int) to} a specific position. * <br>This uses <b>ascending</b> order with a 0 based index. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} include: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_CHANNEL UNNKOWN_CHANNEL} * <br>One of the channels has been deleted before the completion of the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>The currently logged in account was removed from the Guild</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.order.ChannelOrderAction ChannelOrderAction} - Type: {@link net.dv8tion.jda.core.entities.VoiceChannel VoiceChannel} */ @CheckReturnValue public ChannelOrderAction<VoiceChannel> modifyVoiceChannelPositions() { return new ChannelOrderAction<>(guild, ChannelType.VOICE); } /** * Modifies the positional order of {@link net.dv8tion.jda.core.entities.Guild#getRoles() Guild.getRoles()} * using a specific {@link net.dv8tion.jda.core.requests.RestAction RestAction} extension to allow moving Roles * {@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveUp(int) up}/{@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveDown(int) down} * or {@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveTo(int) to} a specific position. * * <p>This uses the ordering defined by Discord, which is <b>descending</b>! * <br>This means the highest role appears at index {@code 0} and the lower role at index {@code n - 1}. * <br>Providing {@code false} to {@link #modifyRolePositions(boolean)} will result in the ordering being * in ascending order, with the lower role at index {@code 0} and the highest at index {@code n - 1}. * <br>As a note: {@link net.dv8tion.jda.core.entities.Member#getRoles() Member.getRoles()} * and {@link net.dv8tion.jda.core.entities.Guild#getRoles() Guild.getRoles()} are both in descending order. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} include: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_ROLE UNKNOWN_ROLE} * <br>One of the roles was deleted before the completion of the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>The currently logged in account was removed from the Guild</li> * </ul> * * @return {@link net.dv8tion.jda.core.requests.restaction.order.RoleOrderAction RoleOrderAction} */ @CheckReturnValue public RoleOrderAction modifyRolePositions() { return modifyRolePositions(true); } /** * Modifies the positional order of {@link net.dv8tion.jda.core.entities.Guild#getRoles() Guild.getRoles()} * using a specific {@link net.dv8tion.jda.core.requests.RestAction RestAction} extension to allow moving Roles * {@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveUp(int) up}/{@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveDown(int) down} * or {@link net.dv8tion.jda.core.requests.restaction.order.OrderAction#moveTo(int) to} a specific position. * * <p>Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} include: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_ROLE UNKNOWN_ROLE} * <br>One of the roles was deleted before the completion of the task</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>The currently logged in account was removed from the Guild</li> * </ul> * * @param useDiscordOrder * Defines the ordering of the OrderAction. If {@code true}, the OrderAction will be in the ordering * defined by Discord for roles, which is Descending. This means that the highest role appears at index {@code 0} * and the lowest role at index {@code n - 1}. Providing {@code false} will result in the ordering being * in ascending order, with the lower role at index {@code 0} and the highest at index {@code n - 1}. * <br>As a note: {@link net.dv8tion.jda.core.entities.Member#getRoles() Member.getRoles()} * and {@link net.dv8tion.jda.core.entities.Guild#getRoles() Guild.getRoles()} are both in descending order. * * @return {@link net.dv8tion.jda.core.requests.restaction.order.RoleOrderAction RoleOrderAction} */ @CheckReturnValue public RoleOrderAction modifyRolePositions(boolean useDiscordOrder) { return new RoleOrderAction(guild, useDiscordOrder); } protected void checkAvailable() { if (!guild.isAvailable()) throw new GuildUnavailableException(); } protected void checkGuild(Guild providedGuild, String comment) { if (!guild.equals(providedGuild)) throw new IllegalArgumentException("Provided " + comment + " is not part of this Guild!"); } protected void checkPermission(Permission perm) { if (!guild.getSelfMember().hasPermission(perm)) throw new PermissionException(perm); } protected void checkPosition(Member member) { if(!guild.getSelfMember().canInteract(member)) throw new PermissionException("Can't modify a member with higher or equal highest role than yourself!"); } protected void checkPosition(Role role) { if(!guild.getSelfMember().canInteract(role)) throw new PermissionException("Can't modify a role with higher or equal highest role than yourself! Role: " + role.toString()); } }
apache-2.0
wapalxj/Java
javaworkplace/source/src/com/sun/corba/se/spi/activation/_ServerImplBase.java
2433
package com.sun.corba.se.spi.activation; /** * com/sun/corba/se/spi/activation/_ServerImplBase.java . * Generated by the IDL-to-Java compiler (portable), version "3.2" * from c:/re/workspace/8-2-build-windows-amd64-cygwin/jdk8u112/7884/corba/src/share/classes/com/sun/corba/se/spi/activation/activation.idl * Thursday, September 22, 2016 9:33:08 PM PDT */ /** Server callback API, passed to Activator in active method. */ public abstract class _ServerImplBase extends org.omg.CORBA.portable.ObjectImpl implements Server, org.omg.CORBA.portable.InvokeHandler { // Constructors public _ServerImplBase () { } private static java.util.Hashtable _methods = new java.util.Hashtable (); static { _methods.put ("shutdown", new Integer (0)); _methods.put ("install", new Integer (1)); _methods.put ("uninstall", new Integer (2)); } public org.omg.CORBA.portable.OutputStream _invoke (String $method, org.omg.CORBA.portable.InputStream in, org.omg.CORBA.portable.ResponseHandler $rh) { org.omg.CORBA.portable.OutputStream out = null; Integer __method = (Integer)_methods.get ($method); if (__method == null) throw new org.omg.CORBA.BAD_OPERATION (0, org.omg.CORBA.CompletionStatus.COMPLETED_MAYBE); switch (__method.intValue ()) { /** Shutdown this server. Returns after orb.shutdown() completes. */ case 0: // activation/Server/shutdown { this.shutdown (); out = $rh.createReply(); break; } /** Install the server. Returns after the install hook completes * execution in the server. */ case 1: // activation/Server/install { this.install (); out = $rh.createReply(); break; } /** Uninstall the server. Returns after the uninstall hook * completes execution. */ case 2: // activation/Server/uninstall { this.uninstall (); out = $rh.createReply(); break; } default: throw new org.omg.CORBA.BAD_OPERATION (0, org.omg.CORBA.CompletionStatus.COMPLETED_MAYBE); } return out; } // _invoke // Type-specific CORBA::Object operations private static String[] __ids = { "IDL:activation/Server:1.0"}; public String[] _ids () { return (String[])__ids.clone (); } } // class _ServerImplBase
apache-2.0
DanDits/WhatsThat
app/src/main/java/dan/dit/whatsthat/riddle/control/RiddleController.java
22693
/* * Copyright 2015 Daniel Dittmar * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package dan.dit.whatsthat.riddle.control; import android.content.Context; import android.content.res.Resources; import android.graphics.Canvas; import android.os.Handler; import android.os.HandlerThread; import android.support.annotation.NonNull; import android.util.Log; import android.view.Gravity; import android.view.MotionEvent; import android.view.ViewGroup; import android.view.animation.AccelerateInterpolator; import com.github.johnpersano.supertoasts.SuperToast; import com.plattysoft.leonids.ParticleField; import com.plattysoft.leonids.ParticleSystem; import java.sql.Types; import dan.dit.whatsthat.R; import dan.dit.whatsthat.achievement.AchievementManager; import dan.dit.whatsthat.achievement.AchievementProperties; import dan.dit.whatsthat.riddle.Riddle; import dan.dit.whatsthat.riddle.RiddleInitializer; import dan.dit.whatsthat.riddle.RiddleManager; import dan.dit.whatsthat.riddle.RiddleView; import dan.dit.whatsthat.riddle.achievement.AchievementDataRiddleType; import dan.dit.whatsthat.riddle.types.PracticalRiddleType; import dan.dit.whatsthat.riddle.types.TypesHolder; import dan.dit.whatsthat.testsubject.TestSubject; import dan.dit.whatsthat.testsubject.TestSubjectToast; import dan.dit.whatsthat.util.general.MathFunction; /** * A riddle controller is the class between the RiddleView and the RiddleGame. If closed the controller can * no longer be used, it is directly bound to the lifecycle of a RiddleGame. * The controller manages the communication between the different threads involved for keeping * the riddle running. The game thread is always running and a single thread dedicated to process * events like motion events, orientation events and periodic events in a single thread. Also the * periodic game drawing is done on this thread. In the background there can exist a separate * periodic thread that produces periodic events for the riddle (if requested on startup) or for * riddle animations. Keep in mind that starting a ParticleSystem needs to be done over the * RiddleGame.<br>The ui thread is invoked for some drawing, for startup and closing a riddle. * main UI thread is * Created by daniel on 05.04.15. */ public class RiddleController implements RiddleAnimationController.OnAnimationCountChangedListener { private volatile RiddleGame mRiddleGame; private Riddle mRiddle; private ViewGroup mRiddleViewContainer; private volatile RiddleView mRiddleView; private GamePeriodicThread mPeriodicThread; private RiddleAnimationController mRiddleAnimationController; private Handler mMainHandler; private GameHandlerThread mGameThread; private final Runnable mDrawAction; private final Runnable mPeriodicAction; private volatile int mPeriodActionPostedCount; private volatile boolean mIsClosing; /** * Initializes the RiddleController with the RiddleGame that decorates the given Riddle. * @param riddleGame The game that decorates the Riddle parameter. * @param riddle The riddle decorated by the game. */ RiddleController(@NonNull RiddleGame riddleGame, @NonNull Riddle riddle) { mRiddleGame = riddleGame; mRiddle = riddle; mRiddleAnimationController = new RiddleAnimationController(this); mDrawAction = new Runnable() { @Override public void run() { if (mRiddleView != null) { mRiddleView.draw(); } } }; mPeriodicAction = new Runnable() { private long mMissingUpdateTime; // will only be zero at start for first game controlled @Override public void run() { long requiredDrawingTime = mRiddleView.performDrawRiddle(); long updateTime = mMissingUpdateTime + requiredDrawingTime; long periodicEventStartTime = System.nanoTime(); if (updateTime > 0) { mRiddleGame.onPeriodicEvent(updateTime); mRiddleAnimationController.update(updateTime); } mMissingUpdateTime = (System.nanoTime() - periodicEventStartTime) / 1000000; --mPeriodActionPostedCount; } }; } public Riddle getRiddle() { return mRiddle; } public void forbidRiddleBonusScore() { mRiddleGame.setForbidBonus(); } private class GameHandlerThread extends HandlerThread { private static final long MIN_TIME_BETWEEN_MOTION_MOVE_EVENTS = 30L; private Handler mHandler; private long mLastMotionMoveTimestamp; public GameHandlerThread() { super("GameHandlerThread"); start(); Log.d("Riddle", "GameThread started."); mHandler = new Handler(getLooper()); } public void onMotionEvent(MotionEvent event) { if (event.getActionMasked() == MotionEvent.ACTION_MOVE) { long now = System.currentTimeMillis(); if (now - mLastMotionMoveTimestamp < MIN_TIME_BETWEEN_MOTION_MOVE_EVENTS) { return; } mLastMotionMoveTimestamp = now; } final MotionEvent eventCopy = MotionEvent.obtain(event); mHandler.post(new Runnable() { @Override public void run() { if (mRiddleGame != null && mRiddleGame.onMotionEvent(eventCopy)) { mDrawAction.run(); } eventCopy.recycle(); } }); } public void onOrientationEvent(final float azimuth, final float pitch, final float roll) { mHandler.post(new Runnable() { @Override public void run() { if (mRiddleGame != null && mRiddleGame.onOrientationEvent(azimuth, pitch, roll)) { mDrawAction.run(); } } }); } public void onPeriodicEvent() { if (mPeriodActionPostedCount == 0 && !mIsClosing) { ++mPeriodActionPostedCount; mHandler.post(mPeriodicAction); } } public Handler getHandler() { return mHandler; } public void onCloseRiddle(final Context context) { mIsClosing = true; mHandler.post(new Runnable() { @Override public void run() { //at this point we can be sure that the looper doesn't currently process a // periodic event which could lead to concurrency issues mGameThread.quit(); // do not process any more actions! Log.d("Riddle", "Game thread quit."); // stop periodic event in a safe way, as soon as it is stopped really close // riddle in the main ui thread stopPeriodicEvent(mMainHandler, new Runnable() { @Override public void run() { if (riddleAvailable()) { Log.d("Riddle", "Executing close riddle!"); onPreRiddleClose(); mRiddleAnimationController.clear(); mRiddleGame.close(); mRiddleGame = null; onRiddleClosed(context); } mRiddleView = null; mRiddleViewContainer = null; } }); } }); } } /** * The controller is closing, close the riddle and make it save its state. After this method * returns the controller is invalid. * @param context A context object required for saving state to permanent storage. */ public final void onCloseRiddle(@NonNull final Context context) { Log.d("Riddle", "On close riddle."); if (mGameThread != null) { mGameThread.onCloseRiddle(context); } } /** * Invoked on closure of the controller before the RiddleGame's onClose method is invoked. */ void onPreRiddleClose() { RiddleManager.addToCache(mRiddle, mRiddleGame.makeSnapshot()); } // this is overwritten if we don't want the manager to know of this riddle and dont want it saved /** * Invoked on closure of the controller after the RiddleGame's onClose method returned. The RiddleGame is not * a valid member anymore. By default saving achievement data and decorated riddle object. * @param context The context required to save to permanent storage. */ void onRiddleClosed(final Context context) { Log.d("Riddle", "On riddle closed."); if (mRiddle.isSolved() && (mRiddle.isRemade() || !mRiddle.isCustom())) { mRiddle.getType().getAchievementData(AchievementManager.getInstance()).onSolvedGame(); } mRiddle.saveToDatabase(context); if (mRiddle.isSolved()) { RiddleInitializer.INSTANCE.getRiddleManager().onRiddleSolved(mRiddle); } else { RiddleInitializer.INSTANCE.getRiddleManager().onUnsolvedRiddle(mRiddle); } } /* ************* LAYOUT RELATED METHODS ********************************************************/ /** * Draws the RiddleGame on the given canvas if there is a valid riddle. * @param canvas The canvas to draw onto. */ public void draw(Canvas canvas) { if (riddleAvailable()) { mRiddleAnimationController.draw(canvas, null, RiddleAnimationController .LEVEL_GROUNDING); mRiddleAnimationController.draw(canvas, null, RiddleAnimationController .LEVEL_BACKGROUND); mRiddleGame.draw(canvas); mRiddleAnimationController.draw(canvas, null, RiddleAnimationController .LEVEL_ON_TOP); } } protected void addAnimation(@NonNull RiddleAnimation animation) { mRiddleAnimationController.addAnimation(animation); } protected void addAnimation(@NonNull RiddleAnimation animation, long delay) { mRiddleAnimationController.addAnimation(animation, delay); } /* ************ INPUT RELATED METHODS *********************************************************/ /** * Invoked if there happened some MotionEvent of any kind to the RiddleView. * If possible forwards the event to the RiddleGame, redrawing the game if onMotionEvent suggests so. * @param event The event to forward. */ public void onMotionEvent(MotionEvent event) { if (riddleAvailable()) { mGameThread.onMotionEvent(event); } } /** * Invoked if there happened some OrientationEvent that changed the orientation of the device in the world's * coordinate system and orientation sensor is required. * If possible forwards the orientation event to the RiddleGame, redrawing the game if onOrientationEvent suggests so. * Given angles in radians, for specification see Wikipedia. * @param azimuth The new azimuth. * @param pitch The new pitch. * @param roll The new roll. */ public void onOrientationEvent(float azimuth, float pitch, float roll) { if (riddleAvailable() && requiresOrientationSensor()) { mGameThread.onOrientationEvent(azimuth, pitch, roll); } } private boolean riddleAvailable() { return mRiddleGame != null && mRiddleGame.isNotClosed(); } /** * Invoked when the RiddleView got visible and valid. On the UI thread. * @param riddleViewContainer The container that contains the valid RiddleView */ public final void onRiddleVisible(@NonNull ViewGroup riddleViewContainer) { mRiddleViewContainer = riddleViewContainer; mIsClosing = false; mRiddleView = (RiddleView) mRiddleViewContainer.findViewById(R.id.riddle_view); mMainHandler = new Handler(); mGameThread = new GameHandlerThread(); mGameThread.setUncaughtExceptionHandler(Thread.currentThread().getUncaughtExceptionHandler()); mRiddleGame.onGotVisible(); //startRiddleGotVisibleAnimation(); // nice but requires extra periodic thread to be // started and overall extra work for little gain onRiddleGotVisible(); } protected void startRiddleGotVisibleAnimation() { long animationTime = 450; float yDelta = -100f; mRiddleAnimationController.addAnimation(new RiddleCanvasAnimation.Builder() .setInterpolator(new MathFunction.AnimationInterpolator(new AccelerateInterpolator(2.f))) .addTranslate(0, yDelta, 0, -yDelta, animationTime) .addScale(1f, 1f, 0.5f, 0.0f, animationTime) .build()); } // this is overwritten if we don't want the manager to know of this riddle /** * The riddle just got visible, by default tell the manager this happened. */ void onRiddleGotVisible() { RiddleInitializer.INSTANCE.getRiddleManager().onUnsolvedRiddle(mRiddle); // especially important that, if saving this riddle when finished excludes the image from the list since saving is async } /** * Get the id of the currently used riddle. Not necessarily a valid id for newly created riddles! * @return The riddle id. Can be an invalid id! */ public long getRiddleId() { return mRiddle.getId(); } /** * If any valid game, check if its type requires the orientation sensor. * @return If the orientation sensor is required. */ public boolean requiresOrientationSensor() { return riddleAvailable() && mRiddleGame.requiresOrientationSensor(); } /** * Pause the periodic event, stopping future invocations and periodic renderings. */ private synchronized void stopPeriodicEvent(Handler handler, final Runnable toExecute) { if (mPeriodicThread != null && mPeriodicThread.isRunning()) { Log.d("Riddle", "Stopping periodic event that is running."); mPeriodicThread.stopPeriodicEvent(handler, new Runnable() { @Override public void run() { if (toExecute != null) { toExecute.run(); } onPeriodicThreadStopped(); } }); } else if (toExecute != null) { Log.d("Riddle", "Stopping periodic event that was not running."); if (handler != null) { handler.post(toExecute); } else { toExecute.run(); } } } public synchronized void stopPeriodicEvent() { stopPeriodicEvent(null, null); } private void resumePeriodicEventExecute() { mPeriodicThread = new GamePeriodicThread(RiddleController.this); mPeriodicThread.setUncaughtExceptionHandler(Thread.getDefaultUncaughtExceptionHandler()); mPeriodicThread.startPeriodicEvent(); } // invoked on ui thread. periodic event stopped completely, any other code to execute after // stopping was executed, so check if there is still a riddle available before doing stuff to // riddle or periodic thread private synchronized void onPeriodicThreadStopped() { onAnimationCountChanged(); // check again if we need to resume the periodic thread, can // be relevant when the thread is about to be stopped when another animation is added } /** * If there is a valid riddle and a positive periodic event period, resume (or restart) the rendering and periodic threads. */ public synchronized void resumePeriodicEventIfRequired() { if (mRiddleGame != null && requiresPeriodicEvent()) { resumePeriodicEvent(); } } private boolean requiresPeriodicEvent() { return mRiddleGame.requiresPeriodicEvent() || mRiddleView.getActiveParticleSystemsCount() > 0 || mRiddleAnimationController.getActiveAnimationsCount() > 0; } private synchronized void resumePeriodicEvent() { if (riddleAvailable() && mRiddleView != null) { if (mPeriodicThread == null || !mPeriodicThread.isRunning()) { // if thread is not running yet or not anymore, (re)start. // use runnable that is posted by previous running thread, if any, to the ui // thread to ensure that no concurrency issues can appear stopPeriodicEvent(mMainHandler, new Runnable() { @Override public void run() { resumePeriodicEventExecute(); } }); } } } /** * If the type requires orientation sensor but the device does not supply (all) required sensors, the game is told so * and can enable an alternative to the orientation sensor if possible. */ public void enableNoOrientationSensorAlternative() { if (riddleAvailable()) { mRiddleGame.enableNoOrientationSensorAlternative(); } } /** * Returns the type of controller's Riddle. * @return The type of the current riddle. */ public PracticalRiddleType getRiddleType() { return mRiddle.getType(); } /** * Returns the image's hash of the controller's Riddle ('s image). * @return The hash of the current riddle. */ public String getImageHash() { return mRiddle.getImageHash(); } /** * The periodic event happened, forward to the RiddleGame if possible. */ void onPeriodicEvent() { if (riddleAvailable()) { mGameThread.onPeriodicEvent(); } } public boolean hasRunningPeriodicThread() { return mPeriodicThread != null && mPeriodicThread.isRunning(); } public void checkParty(@NonNull Resources res, @NonNull RiddleView.PartyCallback callback) { if (!TestSubject.isInitialized()) { return; } TestSubject subject = TestSubject.getInstance(); TestSubjectToast toast = new TestSubjectToast(Gravity.CENTER, 0, 0, mRiddle.getType().getIconResId(), 0, SuperToast.Duration.MEDIUM); toast.mAnimations = SuperToast.Animations.POPUP; toast.mBackgroundColor = res.getColor(R.color.main_background); String[] candies = res.getStringArray(subject.getRiddleSolvedResIds()); RiddleScore riddleScore = mRiddleGame.getGainedScore(false); int score = riddleScore.getTotalScore(); int party = riddleScore.getBonus(); if (riddleScore.hasBonus()) { AchievementProperties data = mRiddle.getType().getAchievementData(null); if (data != null) { data.increment(AchievementDataRiddleType.KEY_BONUS_GAINED_COUNT, 1L, 0L); } } StringBuilder builder = new StringBuilder(); if (candies != null && candies.length > 0) { int candyIndex = score - TypesHolder.SCORE_MINIMAL; if (candyIndex >= candies.length) { float bestFrac = 1f/3f; candyIndex = (int) (candies.length * (1f - bestFrac) + Math.random() * candies.length * bestFrac); } builder.append(candies[candyIndex < 0 ? 0 : candyIndex >= candies.length ? candies.length - 1 : candyIndex]); } if (score > 0) { builder.append(" +") .append(score); } // for each multiplier add an exclamation mark for (int i = 0; i < riddleScore.getMultiplicator() - 1; i++) { builder.append("!"); } toast.mText = builder.toString(); toast.mTextSize = 40; callback.giveCandy(toast); if (party > 0) { callback.doParty(party); } if (score > 0) { callback.showMoneyEarned(score); } } @Override public void onAnimationCountChanged() { if (!riddleAvailable() || mRiddleGame.requiresPeriodicEvent()) { return; } int count = mRiddleAnimationController.getActiveAnimationsCount(); handlePeriodicEventForCount(count); } public void onParticleSystemCountChanged() { if (!riddleAvailable() || mRiddleGame.requiresPeriodicEvent()) { return; } int count = mRiddleView.getActiveParticleSystemsCount(); handlePeriodicEventForCount(count); } private void handlePeriodicEventForCount(int count) { if (mRiddleView != null && mRiddleView.isPaused()) { return; } // ensure the following actions take place on ui thread if (count == 0) { mMainHandler.post(new Runnable() { @Override public void run() { stopPeriodicEvent(mGameThread.getHandler(), mDrawAction); } }); } else if (count > 0) { mMainHandler.post(new Runnable() { @Override public void run() { resumePeriodicEvent(); } }); } } public ParticleSystem makeParticleSystem(Resources res, int maxParticles, int drawableResId, long timeToLive) { ParticleField field = mRiddleView; if (field == null) { return null; } ParticleSystem system = new ParticleSystem(field, res, maxParticles, timeToLive); system.initParticles(res.getDrawable(drawableResId)); system.setIgnorePositionInParent(); return system; } }
apache-2.0
rahulmadhavan/clippy
android/src/com/rahulmadhavan/clippy/Message.java
627
package com.rahulmadhavan.clippy; public class Message { private String value; public Message(){ } public Message(String _value){ this.value = _value; } public synchronized boolean isValueSame(String data){ boolean result = false; if( null == data || null == value ){ }else if(data.compareTo(value) != 0){ }else{ result = true; } return result; } public synchronized String getValue() { return value; } public synchronized void setValue(String value) { this.value = value; } @Override public synchronized String toString(){ return this.value; } }
apache-2.0
Espenhh/panopticon
backend/src/main/java/no/panopticon/api/external/UpdatedStatus.java
1855
package no.panopticon.api.external; import no.panopticon.storage.RunningUnit; import no.panopticon.storage.StatusSnapshot; import java.time.LocalDateTime; import java.util.List; import static java.util.stream.Collectors.toList; public class UpdatedStatus { public String environment; public String system; public String component; public String server; public List<UpdatedMeasurement> measurements; @Override public String toString() { return "UpdatedStatus{" + "environment='" + environment + '\'' + ", system='" + system + '\'' + ", component='" + component + '\'' + ", server='" + server + '\'' + ", measurements=" + measurements + '}'; } public RunningUnit toRunningUnit() { return new RunningUnit(environment, system, component, server); } public StatusSnapshot toStatusSnapshot() { return new StatusSnapshot(LocalDateTime.now(), measurements.stream() .map(m -> new StatusSnapshot.Measurement(m.key, m.status, m.displayValue, m.numericValue, m.description)) .collect(toList()) ); } public static class UpdatedMeasurement { public String key; public String status; public String displayValue; public long numericValue; public String description; @Override public String toString() { return "UpdatedMeasurement{" + "key='" + key + '\'' + ", status='" + status + '\'' + ", displayValue='" + displayValue + '\'' + ", numericValue=" + numericValue + '\'' + ", description=" + description + '}'; } } }
apache-2.0
florinpatrascu/jpublish
java/src/org/jpublish/util/PathUtilities.java
5804
/* * Copyright 2004-2007 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.jpublish.util; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import java.io.File; /** * Utility class for working with request paths. * * @author Anthony Eden */ public final class PathUtilities { public static final String WILDCARD = "*"; public static final String TEMPLATE_PROTOCOL = "template"; public static final String REPOSITORY_PROTOCOL = "repository"; private static final Log log = LogFactory.getLog(PathUtilities.class); /** * Internal constructor. */ private PathUtilities() { // no op } /** * Match a path which may contain a wildcard. * * @param requestPath The request path submitted by the client * @param exPath The match path */ public static boolean match(String requestPath, String exPath) { int wildcardIndex = exPath.indexOf(WILDCARD); if (wildcardIndex == -1) { return requestPath.equals(exPath); } else { if (log.isDebugEnabled()) log.debug("Wildcard index: " + wildcardIndex); if (wildcardIndex == (exPath.length() - 1)) { String checkString = exPath.substring(0, exPath.length() - 1); return requestPath.startsWith(checkString); } else { String preMatch = exPath.substring(0, wildcardIndex); String postMatch = exPath.substring(wildcardIndex + 1); return requestPath.startsWith(preMatch) && requestPath.endsWith(postMatch); } } } /** * Extract the page name from the given path. The page name is the * name of the file in the path without its suffix. * * @param path The request path * @return The page name */ public static String extractPageName(String path) { File file = new File(path); String fileName = file.getName(); int dotIndex = fileName.lastIndexOf("."); if (dotIndex < 0) { return null; } return fileName.substring(0, dotIndex); } /** * Extract the page path from the given request path. This method * will return the path from the page root to the page descriptor * file. * * @param path The request path * @return The page path */ public static String extractPagePath(String path) { File file = new File(path); File parentDirectory = file.getParentFile(); String pagePath = null; if (parentDirectory == null) { pagePath = extractPageName(path); } else { String pageName = extractPageName(path); if (pageName != null) { pagePath = new File(parentDirectory.getPath(), pageName).getPath(); pagePath = pagePath.replace(File.separatorChar, '/'); } } return pagePath; } /** * Return the page type extracting it from the path. For example: * index.html would return "html" as the page type. If the type * cannot be determined then this method returns null. * * @param path The path * @return The page type */ public static String extractPageType(String path) { File file = new File(path); String fileName = file.getName(); int dotIndex = fileName.lastIndexOf("."); if (dotIndex < 0) { return null; } return fileName.substring(dotIndex + 1); } /** * Return a path String which includes a starting slash so that it * matches the requirements of the Servlet API's getResource() methods. * * @param path The path * @return The correct resource path * @since 2.0 */ public static String toResourcePath(String path) { if (path.startsWith("/")) { return path; } else { return "/" + path; } } /** * Make a URI path for a template. * * @param path The relative template path * @return The URI * @since 2.0 */ // Note: maybe move this into the TemplateManager? public static String makeTemplateURI(String path) { InternalURI uri = new InternalURI(); uri.setProtocol(TEMPLATE_PROTOCOL); uri.setPath(path); return uri.toURI(); } /** * Make a URI path for a repository item. * * @param repositoryName The repository name * @param path The relative path * @return The URI * @since 2.0 */ // Note: maybe move this into the Repository? public static String makeRepositoryURI(String repositoryName, String path) { StringBuffer buffer = new StringBuffer(); buffer.append(REPOSITORY_PROTOCOL); buffer.append(":"); buffer.append(repositoryName); buffer.append("://"); buffer.append(path); return buffer.toString(); } }
apache-2.0
objectos/way
way-compiler/src/test/resources/code/ui/login-iter7/LoginAction7.java
904
package br.com.objectos.testing.ui.login.iter7; import br.com.objectos.way.ui.AbstractUiAction2; import br.com.objectos.way.ui.Action; import br.com.objectos.way.ui.Ui; @Ui abstract class LoginAction7 extends AbstractUiAction2<String, String> { abstract UiHomeActivity7 homeActivity(); abstract UiLoginForm7 form(); LoginAction7() { } @Override public Action execute(String login, String password) { if (loginValid(login, password)) { return homeActivity().setUser(findUserByLogin(login)) .and(homeActivity().startActivity()); } else { return form().setErrorMessage("Invalid credentials!"); } } private UiUser findUserByLogin(String login) { User user = AutoUser.of("Mr. " + login); return UiUser.from(user); } private boolean loginValid(String login, String password) { return login.equals("foo") && password.equals("bar"); } }
apache-2.0
devacfr/commons
commons-util/src/main/java/org/cfr/commons/util/collection/CollectionReorderer.java
4278
package org.cfr.commons.util.collection; import java.util.Collection; import java.util.List; import java.util.Map; public class CollectionReorderer<T> { public void moveToStart(final List<T> objects, final T toMove) { assertContains(objects, toMove); objects.remove(toMove); objects.add(0, toMove); } public void moveToEnd(final List<T> objects, final T toMove) { assertContains(objects, toMove); objects.remove(toMove); objects.add(objects.size(), toMove); } public void decreasePosition(final List<T> objects, final T toMove) { assertContains(objects, toMove); final int initialPosition = objects.indexOf(toMove); if ((initialPosition < 0) || (initialPosition == (objects.size() - 1))) { return; } moveToPosition(objects, initialPosition, initialPosition + 1); } public void increasePosition(final List<T> objects, final T toMove) { assertContains(objects, toMove); final int initialPosition = objects.indexOf(toMove); if (initialPosition < 1) { return; } moveToPosition(objects, initialPosition, initialPosition - 1); } /** * Move the 'toMove' to the position after the 'target'. To insert to the head of the list set afterThis to null * @param objects list of objects to look up and order in * @param toMove the object to move * @param target the position to move to */ public void moveToPositionAfter(final List<T> objects, final T toMove, final T target) { assertContains(objects, toMove); final int initialPosition = objects.indexOf(toMove); //if null move the object to the first position int targetPosition; if (target == null) { targetPosition = -1; } else //move the object to where the target object is { assertContains(objects, target); targetPosition = objects.indexOf(target); } //if target position is before the initial position, add 1 so that it is added after the target object if (targetPosition < initialPosition) { targetPosition++; } moveToPosition(objects, initialPosition, targetPosition); } /** * Moves multiple objects in the objects list to given destination indexes * * @param objects the list of objects * @param positionToObjects a naturally sorted map with destination indexes as keys * and the objects to move as values */ public void moveToPosition(final List<T> objects, final Map<Integer, T> positionToObjects) { for (final T o : positionToObjects.values()) { objects.remove(o); } for (final Map.Entry<Integer, T> entry : positionToObjects.entrySet()) { objects.add(entry.getKey(), entry.getValue()); } } /** * If moving more than one object at the same time please use {@link #moveToPosition(java.util.List, java.util.Map)} * <p/> * Moves an object at initialPosition index in the objects list to the targetPosition index * </p> * * @param objects the list of objects to modify * @param initialPosition the current index of the object that should be moved in the objects list * @param targetPosition the destination index */ public void moveToPosition(final List<T> objects, final int initialPosition, final int targetPosition) { final int objectsSize = objects.size(); // If target position is outside the collection final boolean outsideTarget = (targetPosition < 0) || (targetPosition >= objectsSize); // If initial position is outside the collection final boolean outsideInitial = (initialPosition < 0) || (initialPosition >= objectsSize); if (!outsideTarget && !outsideInitial) { objects.add(targetPosition, objects.remove(initialPosition)); } } private void assertContains(final Collection<T> objects, final T o) { if (!objects.contains(o)) { throw new IllegalArgumentException("Object " + o + " not contained in Collection " + objects); } } }
apache-2.0
andremanuelbarbosa/intelligent-web-search
build/classes/rcm/awt/PopupDialog.java
11859
/* * Copyright (c) 1998-2002 Carnegie Mellon University. All rights * reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND * ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY * NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package rcm.awt; import java.awt.*; import java.awt.event.*; import java.util.Vector; import rcm.util.Win; // Note: after creating a PopupDialog (like any other top-level window, it // seems), the JDK 1.1 runtime won't exit by itself, even if the PopupDialog // is properly disposed. Need to force it to exit using System.exit(). public class PopupDialog extends Dialog { public static final int YES = 0; public static final int OK = 0; public static final int NO = 1; public static final int CANCEL = 2; Component parent; int answer = -1; String text; TextField textfield; Button okButton, noButton, cancelButton; public static String ask (Component comp, String topic, String question, String defaultAnswer) { PopupDialog d = new PopupDialog (comp, topic, true, question, defaultAnswer, "OK", null, "Cancel"); d.show (); switch (d.getAnswer ()) { case OK: return d.getText (); default: return null; } } public static String ask (Component comp, String topic, String question) { return ask (comp, topic, question, ""); } public static boolean okcancel (Component comp, String topic, String question) { PopupDialog d = new PopupDialog (comp, topic, true, question, null, "OK", null, "Cancel"); d.show (); return (d.getAnswer () == OK); } public static boolean yesno (Component comp, String topic, String question) { PopupDialog d = new PopupDialog (comp, topic, true, question, null, "Yes", "No", null); d.show (); return (d.getAnswer () == YES); } public static int yesnocancel (Component comp, String topic, String question) { PopupDialog d = new PopupDialog (comp, topic, true, question, null, "Yes", "No", "Cancel"); d.show (); return d.getAnswer (); } public static void warn (Component comp, String topic, String message) { PopupDialog d = new PopupDialog (comp, topic, true, message, null, "OK", null, null); d.show (); } public static String currentDirectory = ""; public static String askFilename (Component comp, String topic, String defaultFilename, boolean loading) { try { FileDialog fd = new FileDialog (Win.findFrame(comp), topic, loading ? FileDialog.LOAD : FileDialog.SAVE); if (currentDirectory != null) fd.setDirectory (currentDirectory); if (defaultFilename != null) fd.setFile (defaultFilename); fd.show (); String dir = fd.getDirectory(); String file = fd.getFile (); if (dir == null || file == null) return null; currentDirectory = dir; return dir + file; } catch (AWTError e) { return ask (comp, topic, "Filename:", defaultFilename); } } public static String askDirectory (Component comp, String topic, String defaultFilename, boolean loading) { try { FileDialog fd = new FileDialog (Win.findFrame(comp), topic, loading ? FileDialog.LOAD : FileDialog.SAVE); if (currentDirectory != null) fd.setDirectory (currentDirectory); if (defaultFilename != null) fd.setFile (defaultFilename); fd.show (); String dir = fd.getDirectory(); if (dir != null) currentDirectory = dir; return dir; } catch (AWTError e) { return ask (comp, topic, "Directory:", defaultFilename); } } public PopupDialog (Component parent, String title, boolean modal) { super (Win.findFrameOrMakeFrame (parent), title, modal); this.parent = parent; } public PopupDialog (Component parent, String title, boolean modal, String question, String initialEntry, String okOrYes, String no, String cancel) { this (parent, title, modal); if (parent != null) setFont (parent.getFont ()); Panel middle = new Panel (); add ("Center", BorderPanel.wrap (middle, 10, 10, 10, 5)); middle.setLayout (new BorderLayout ()); MultiLineLabel questionLabel = new MultiLineLabel (question, Label.LEFT); middle.add ("Center", questionLabel); if (initialEntry != null) { textfield = new TextField (Math.max (40, initialEntry.length()+1)); middle.add ("South", textfield); textfield.setText (initialEntry); textfield.selectAll (); textfield.addActionListener (new ActionListener () { public void actionPerformed (ActionEvent event) { answer = OK; close (); } }); } Panel bottom = new Panel (); add ("South", bottom); if (okOrYes != null) { okButton = new Button (okOrYes); okButton.addActionListener (new ActionListener () { public void actionPerformed (ActionEvent event) { answer = OK; close (); } }); bottom.add (okButton); } if (no != null) { noButton = new Button (no); noButton.addActionListener (new ActionListener () { public void actionPerformed (ActionEvent event) { answer = NO; close (); } }); bottom.add (noButton); } if (cancel != null) { cancelButton = new Button (cancel); cancelButton.addActionListener (new ActionListener () { public void actionPerformed (ActionEvent event) { answer = CANCEL; close (); } }); bottom.add (cancelButton); } addWindowListener (new WindowAdapter () { public void windowClosing (WindowEvent event) { if (cancelButton != null) { answer = CANCEL; close (); } else if (noButton == null && cancelButton == null) { answer = OK; close (); } } }); // if (System.getProperty ("java.vendor").startsWith ("Netscape")) { // // pack() doesn't work under Netscape! // Dimension d = questionLabel.preferredSize(); // resize (Math.max (100, d.width), 100 + d.height); // } // else pack (); } public static void centerWindow (Window window, Component ref) { Dimension size = window.getSize(); Dimension refSize = (ref != null) ? ref.getSize() : Toolkit.getDefaultToolkit().getScreenSize(); Point origin = (ref != null) ? ref.getLocationOnScreen () : new Point (0, 0); if (refSize != null) { int x = Math.max (0, origin.x + (refSize.width - size.width) / 2); int y = Math.max (0, origin.y + (refSize.height - size.height) / 2); window.setLocation (x, y); } } public void show () { centerWindow (this, parent); super.show (); if (textfield != null) textfield.requestFocus (); } public int getAnswer () { return answer; } public void setAnswer (int answer) { this.answer = answer; } public String getText () { return text; } Vector listeners = new Vector (); public synchronized void addPopupListener (PopupListener listener) { listeners.addElement (listener); } public synchronized void removePopupListener (PopupListener listener) { listeners.removeElement (listener); } public synchronized void close () { text = (answer == OK && textfield != null) ? textfield.getText () : null; dispose (); if (parent == null) ((Frame)getParent()).dispose (); else parent.requestFocus (); if (answer != -1) { PopupEvent e = new PopupEvent (answer, text); for (int i=0; i<listeners.size (); ++i) { PopupListener p = (PopupListener) (listeners.elementAt (i)); switch (e.getID ()) { case YES: p.yes (e); break; case NO: p.no (e); break; case CANCEL: p.cancel (e); break; } } } try { finalize (); } catch (Throwable t) { throw new RuntimeException (t.toString()); } } /* * Testing * */ public static void main (String[] args) { String name = ask (null, "Enter Name", "Enter your full name:"); if (name != null) { switch (yesnocancel (null, "Confirm", "Hello, " + name + ".\nIs this your name?")) { case PopupDialog.YES: if (okcancel (null, "Thanks", "Great!\nDo you want to play a game?")) { warn (null, "Sorry", "Too bad, my mommy won't let me out of the house."); } break; case PopupDialog.NO: warn (null, "D'oh", "Oops. My bad."); break; } } System.exit (0); } }
apache-2.0
dagnir/aws-sdk-java
aws-java-sdk-cloudsearch/src/main/java/com/amazonaws/services/cloudsearchdomain/model/QueryParser.java
1678
/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.cloudsearchdomain.model; import javax.annotation.Generated; /** * */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public enum QueryParser { Simple("simple"), Structured("structured"), Lucene("lucene"), Dismax("dismax"); private String value; private QueryParser(String value) { this.value = value; } @Override public String toString() { return this.value; } /** * Use this in place of valueOf. * * @param value * real value * @return QueryParser corresponding to the value */ public static QueryParser fromValue(String value) { if (value == null || "".equals(value)) { throw new IllegalArgumentException("Value cannot be null or empty!"); } for (QueryParser enumEntry : QueryParser.values()) { if (enumEntry.toString().equals(value)) { return enumEntry; } } throw new IllegalArgumentException("Cannot create enum from " + value + " value!"); } }
apache-2.0
emil-wcislo/sbql4j8
sbql4j8/src/test/openjdk/tools/javac/doctree/LiteralTest.java
3119
/* * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * @test * @bug 7021614 * @summary extend sbql4j8.com.sun.source API to support parsing javadoc comments * @build DocCommentTester * @run main DocCommentTester LiteralTest.java */ class LiteralTest { /** {@literal if (a < b) { }} */ void minimal() { } /* DocComment[DOC_COMMENT, pos:0 firstSentence: 1 Literal[LITERAL, pos:0, if_(a_<_b)_{_}] body: empty block tags: empty ] */ /** [{@literal if (a < b) { }}] */ void in_brackets() { } /* DocComment[DOC_COMMENT, pos:0 firstSentence: 3 Text[TEXT, pos:0, [] Literal[LITERAL, pos:1, if_(a_<_b)_{_}] Text[TEXT, pos:26, ]] body: empty block tags: empty ] */ /** [ {@literal if (a < b) { }} ] */ void in_brackets_with_whitespace() { } /* DocComment[DOC_COMMENT, pos:0 firstSentence: 3 Text[TEXT, pos:0, [_] Literal[LITERAL, pos:2, if_(a_<_b)_{_}] Text[TEXT, pos:27, _]] body: empty block tags: empty ] */ /** * {@literal {@literal nested} } */ void nested() { } /* DocComment[DOC_COMMENT, pos:1 firstSentence: 1 Literal[LITERAL, pos:1, {@literal_nested}_] body: empty block tags: empty ] */ /** * {@literal if (a < b) { * } * } */ void embedded_newline() { } /* DocComment[DOC_COMMENT, pos:1 firstSentence: 1 Literal[LITERAL, pos:1, if_(a_<_b)_{|________}|_] body: empty block tags: empty ] */ /** {@literal if (a < b) { } */ void unterminated_1() { } /* DocComment[DOC_COMMENT, pos:0 firstSentence: 1 Erroneous[ERRONEOUS, pos:0 code: compiler.err.dc.unterminated.inline.tag body: {@literal_if_(a_<_b)_{_} ] body: empty block tags: empty ] */ /** * {@literal if (a < b) { } * @author jjg */ void unterminated_2() { } /* DocComment[DOC_COMMENT, pos:1 firstSentence: 1 Erroneous[ERRONEOUS, pos:1 code: compiler.err.dc.unterminated.inline.tag body: {@literal_if_(a_<_b)_{_} ] body: empty block tags: 1 Author[AUTHOR, pos:27 name: 1 Text[TEXT, pos:35, jjg] ] ] */ }
apache-2.0
allan-huang/remote-procedure-call
src/main/java/tw/me/ychuang/rpc/json/ResponseTypeAdapter.java
4051
package tw.me.ychuang.rpc.json; import java.lang.reflect.Type; import org.apache.commons.lang3.ClassUtils; import org.apache.commons.lang3.exception.ExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import tw.me.ychuang.rpc.Response; import tw.me.ychuang.rpc.Result; import com.google.gson.JsonDeserializationContext; import com.google.gson.JsonDeserializer; import com.google.gson.JsonElement; import com.google.gson.JsonNull; import com.google.gson.JsonObject; import com.google.gson.JsonParseException; import com.google.gson.JsonPrimitive; import com.google.gson.JsonSerializationContext; import com.google.gson.JsonSerializer; /** * Serializes and deserializes a response to / from one JSON string. * * @author Y.C. Huang */ public class ResponseTypeAdapter<Respose> implements JsonSerializer<Response>, JsonDeserializer<Response> { private static final Logger log = LoggerFactory.getLogger(ResponseTypeAdapter.class); /** * Serializes a response to a json element */ @Override public JsonElement serialize(Response response, Type responseClass, JsonSerializationContext context) { JsonObject jsonResponse = new JsonObject(); jsonResponse.addProperty("id", response.getId()); Result result = response.getResult(); if (result == null) { jsonResponse.add("result", JsonNull.INSTANCE); return jsonResponse; } JsonObject jsonResult = new JsonObject(); jsonResponse.add("result", jsonResult); Class resultClass = result.getReturnClass(); JsonPrimitive jsonResultClass = new JsonPrimitive(resultClass.getName()); JsonPrimitive jsonExceptional = new JsonPrimitive(result.isExceptional()); JsonElement jsonReturn = null; if (result.getReturn() != null) { if (false == result.isExceptional()) { jsonReturn = context.serialize(result.getReturn(), resultClass); } else { Throwable cause = (Throwable) result.getReturn(); String stackTraceMessage = ExceptionUtils.getStackTrace(cause); jsonReturn = new JsonPrimitive(stackTraceMessage); } } else { jsonReturn = JsonNull.INSTANCE; } jsonResult.add("return", jsonReturn); jsonResult.add("returnClass", jsonResultClass); jsonResult.add("exceptional", jsonExceptional); return jsonResponse; } /** * Deserialize a json element to a response */ @Override public Response deserialize(JsonElement jsonElement, Type responseClass, JsonDeserializationContext context) throws JsonParseException { JsonObject jsonResponse = (JsonObject) jsonElement; long id = jsonResponse.getAsJsonPrimitive("id").getAsLong(); JsonElement jsonRsElement = jsonResponse.get("result"); Response response = null; if (jsonRsElement.isJsonNull()) { response = new Response(id, null, null); return response; } JsonObject jsonResult = jsonRsElement.getAsJsonObject(); JsonElement jsonReturn = jsonResult.get("return"); JsonPrimitive jsonReturnClass = jsonResult.getAsJsonPrimitive("returnClass"); JsonPrimitive jsonExceptional = jsonResult.getAsJsonPrimitive("exceptional"); Class resultClass = null; try { resultClass = ClassUtils.getClass(jsonReturnClass.getAsString()); } catch (ClassNotFoundException e) { throw new JsonParseException("Cannot find a matching class by name: " + jsonReturnClass.getAsString(), e); } Result result = null; if (jsonReturn.isJsonNull()) { result = new Result(null, resultClass); } else { Object resultObj = null; if (resultClass.isPrimitive()) { resultObj = PrimitiveWrapperUtils.toPrimitive(jsonReturn.getAsString(), resultClass); } else if (ClassUtils.isPrimitiveOrWrapper(resultClass)) { resultObj = PrimitiveWrapperUtils.toPrimitiveWrapper(jsonReturn.getAsString(), resultClass); } else if (Throwable.class.isAssignableFrom(resultClass)) { resultObj = jsonReturn.getAsString(); } else { resultObj = context.deserialize(jsonReturn, resultClass); } result = new Result(resultObj, resultClass); } response = new Response(id, result, resultClass.getName()); return response; } }
apache-2.0
wzhnsc/TestViewPageAutoScroll
app/src/main/java/com/wzhnsc/testviewpageautoscroll/ViewPageAutoScroll.java
17179
package com.wzhnsc.testviewpageautoscroll; import android.content.Context; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Shader; import android.graphics.drawable.BitmapDrawable; import android.os.Build; import android.os.Handler; import android.os.Parcelable; import android.support.v4.view.PagerAdapter; import android.support.v4.view.ViewPager; import android.util.AttributeSet; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.FrameLayout; import android.widget.ImageView; import android.widget.TextView; import android.widget.Toast; import com.nostra13.universalimageloader.cache.disc.impl.UnlimitedDiskCache; import com.nostra13.universalimageloader.cache.disc.naming.HashCodeFileNameGenerator; import com.nostra13.universalimageloader.cache.memory.impl.WeakMemoryCache; import com.nostra13.universalimageloader.core.DisplayImageOptions; import com.nostra13.universalimageloader.core.ImageLoader; import com.nostra13.universalimageloader.core.ImageLoaderConfiguration; import com.nostra13.universalimageloader.core.assist.FailReason; import com.nostra13.universalimageloader.core.assist.ImageScaleType; import com.nostra13.universalimageloader.core.assist.QueueProcessingType; import com.nostra13.universalimageloader.core.download.BaseImageDownloader; import com.nostra13.universalimageloader.core.listener.ImageLoadingListener; import java.io.File; import java.util.ArrayList; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import static com.nostra13.universalimageloader.utils.StorageUtils.getOwnCacheDirectory; /** * 自动且左右循环 */ public class ViewPageAutoScroll extends FrameLayout { private Context mContext; // 标识是不是自动翻页 private boolean mbIsAutoScroll = false; public ViewPageAutoScroll(Context context) { super(context); mContext = context; LayoutInflater.from(mContext).inflate(R.layout.banner, this); } public ViewPageAutoScroll(Context context, AttributeSet attrs) { super(context, attrs); mContext = context; LayoutInflater.from(mContext).inflate(R.layout.banner, this); } public ViewPageAutoScroll(Context context, AttributeSet attrs, int defStyle) { super(context, attrs, defStyle); mContext = context; LayoutInflater.from(mContext).inflate(R.layout.banner, this); } public static class BannerInfo { // 标题 String strTitle; public String getStrTitle() { return strTitle; } public void setStrTitle(String strTitle) { this.strTitle = strTitle; } // 背景图 URL String strBgUrl; public String getStrBgUrl() { return strBgUrl; } public void setStrBgUrl(String strBgUrl) { this.strBgUrl = strBgUrl; } } // 图片缓存路径 public static final String IMAGE_CACHE_PATH = "imageloader/Cache"; private ViewPager mViewPager; private ImageView mImagePage[] = new ImageView[3]; // 滑动的图片集合 private List<ImageView> mImageViewList; // 图片标题正文的那些点 private List<View> mDotList; private TextView mtvTitle; private TextView mtvPagination; private ScheduledExecutorService mScheduledExecutorService; // 异步加载图片 private ImageLoader mImageLoader; private DisplayImageOptions milOptions; // 轮播banner的数据 private List<BannerInfo> mBannerDataList; private Handler mHandler = new Handler(); private Runnable doUpdateUI = new Runnable() { public void run() { mViewPager.setCurrentItem(mViewPager.getCurrentItem() + 1); } }; private void initImageLoader() { // 获取 ImageLoader 实例 mImageLoader = ImageLoader.getInstance(); File cacheDir = getOwnCacheDirectory(mContext, IMAGE_CACHE_PATH); ImageLoaderConfiguration config = new ImageLoaderConfiguration.Builder(mContext) .threadPoolSize(3) // default .threadPriority(Thread.NORM_PRIORITY - 1) // default .tasksProcessingOrder(QueueProcessingType.FIFO) // default .denyCacheImageMultipleSizesInMemory() .memoryCache(new WeakMemoryCache()) .diskCache(new UnlimitedDiskCache(cacheDir)) // default .diskCacheFileNameGenerator(new HashCodeFileNameGenerator()) // default .defaultDisplayImageOptions(DisplayImageOptions.createSimple()) // default .imageDownloader(new BaseImageDownloader(mContext)) .build(); mImageLoader.init(config); milOptions = new DisplayImageOptions.Builder() .cacheInMemory(true) .cacheOnDisk(true) .showImageOnFail(R.drawable.top_banner_android) .showImageOnLoading(R.drawable.top_banner_android) .showImageForEmptyUri(R.drawable.top_banner_android) .imageScaleType(ImageScaleType.IN_SAMPLE_INT) .bitmapConfig(Bitmap.Config.ARGB_8888) .resetViewBeforeLoading(true) .build(); } // 轮播图模拟数据 private List<BannerInfo> getTestData() { List<BannerInfo> listBannerInfo = new ArrayList<BannerInfo>(); BannerInfo biData = new BannerInfo(); biData.setStrTitle("这是第一页"); biData.setStrBgUrl("http://g.hiphotos.baidu.com/image/w%3D310/sign=bb99d6add2c8a786be2a4c0f5708c9c7/d50735fae6cd7b8900d74cd40c2442a7d9330e29.jpg"); listBannerInfo.add(biData); BannerInfo biData2 = new BannerInfo(); biData2.setStrTitle("这是第二页"); biData2.setStrBgUrl("http://g.hiphotos.baidu.com/image/w%3D310/sign=7cbcd7da78f40ad115e4c1e2672e1151/eaf81a4c510fd9f9a1edb58b262dd42a2934a45e.jpg"); listBannerInfo.add(biData2); // BannerInfo biData3 = new BannerInfo(); // biData3.setStrTitle("这是第三页"); // biData3.setStrBgUrl("http://e.hiphotos.baidu.com/image/w%3D310/sign=392ce7f779899e51788e3c1572a6d990/8718367adab44aed22a58aeeb11c8701a08bfbd4.jpg"); // listBannerInfo.add(biData3); // // BannerInfo biData4 = new BannerInfo(); // biData4.setStrTitle("这是第四页"); // biData4.setStrBgUrl("http://d.hiphotos.baidu.com/image/w%3D310/sign=54884c82b78f8c54e3d3c32e0a282dee/a686c9177f3e670932e4cf9338c79f3df9dc55f2.jpg"); // listBannerInfo.add(biData4); // // BannerInfo biData5 = new BannerInfo(); // biData5.setStrTitle("这是第五页"); // biData5.setStrBgUrl("http://e.hiphotos.baidu.com/image/w%3D310/sign=66270b4fe8c4b7453494b117fffd1e78/0bd162d9f2d3572c7dad11ba8913632762d0c30d.jpg"); // listBannerInfo.add(biData5); return listBannerInfo; } private void createImageView(int Position) { ImageView imageView = new ImageView(mContext); // 保持原图大小,以原图的几何中心点和ImagView的几何中心点为基准,只绘制ImagView大小的图像 imageView.setScaleType(ImageView.ScaleType.CENTER_CROP); // 异步加载图片 mImageLoader.displayImage(mBannerDataList.get(Position).getStrBgUrl(), imageView, milOptions, new ImageLoadingListener() { @Override public void onLoadingStarted(String s, View view) { Log.i("ViewPageAutoScroll", "onLoadingStarted"); } @Override public void onLoadingFailed(String s, View view, FailReason failReason) { Log.i("ViewPageAutoScroll", "onLoadingFailed"); } @Override public void onLoadingComplete(String s, View view, Bitmap bitmap) { Log.i("ViewPageAutoScroll", "onLoadingComplete"); } @Override public void onLoadingCancelled(String s, View view) { Log.i("ViewPageAutoScroll", "onLoadingCancelled"); } }); mImageViewList.add(imageView); } private void initInterface() { // 使用 ImageLoader 之前先要初始化 initImageLoader(); mScrollTask = new ScrollTask(); // 广告数据 mBannerDataList = getTestData(); mImageViewList = new ArrayList<ImageView>(); // 定义的五个指示点 mDotList = new ArrayList<View>(); View dot0 = findViewById(R.id.v_dot0); View dot1 = findViewById(R.id.v_dot1); View dot2 = findViewById(R.id.v_dot2); View dot3 = findViewById(R.id.v_dot3); View dot4 = findViewById(R.id.v_dot4); mDotList.add(dot0); mDotList.add(dot1); mDotList.add(dot2); mDotList.add(dot3); mDotList.add(dot4); mtvTitle = (TextView)findViewById(R.id.tv_title); // 设置标题 mtvTitle.setText(mBannerDataList.get(0).getStrTitle()); mtvPagination = (TextView)findViewById(R.id.tv_pagination); // 设置页码 mtvPagination.setText(1 + "/" + mBannerDataList.size()); mViewPager = (ViewPager)findViewById(R.id.vp_banner); // 设置填充ViewPager页面的适配器 mViewPager.setAdapter(new ViewPageAdapter()); // 设置一个监听器,当ViewPager中的页面改变时调用 mViewPager.addOnPageChangeListener(new ViewPageChangeListener()); mViewPager.setCurrentItem(mViewPager.getAdapter().getCount() / 2); ImageView ivMasking = (ImageView)findViewById(R.id.iv_masking); Bitmap bitmap = BitmapFactory.decodeResource(getResources(), R.drawable.masking_banner); BitmapDrawable bd = new BitmapDrawable(getResources(), bitmap); bd.setTileModeXY(Shader.TileMode.REPEAT, Shader.TileMode.REPEAT); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) { ivMasking.setBackground(bd); } else { ivMasking.setBackgroundDrawable(bd); } // 动态添加图片和下面指示的圆点 // 初始化图片资源 for (int i = 0; i < mBannerDataList.size(); i++) { createImageView(i); mDotList.get(i).setVisibility(View.VISIBLE); } mImagePage[0] = new ImageView(mContext); mImagePage[0].setScaleType(ImageView.ScaleType.CENTER_CROP); mImagePage[1] = new ImageView(mContext); mImagePage[1].setScaleType(ImageView.ScaleType.CENTER_CROP); mImagePage[2] = new ImageView(mContext); mImagePage[2].setScaleType(ImageView.ScaleType.CENTER_CROP); } private class ScrollTask implements Runnable { @Override public void run() { synchronized (mViewPager) { mHandler.post(doUpdateUI); } } } private ScrollTask mScrollTask; private class ViewPageChangeListener implements ViewPager.OnPageChangeListener { private int mOldPosition = 0; @Override public void onPageScrollStateChanged(int arg0) { Log.i("ViewPageAutoScroll", "ViewPageChangeListener - onPageScrollStateChanged - arg0 = " + arg0); // 正在滑动 if (1 == arg0) { // 手动翻页时要停了定时器 if (!mbIsAutoScroll) { mScheduledExecutorService.shutdown(); } } // 滑动完毕 else if (2 == arg0) { // 手动翻页时要停了定时器 if (!mbIsAutoScroll) { mScheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); // 当 ViewPage 显示出来后,每五秒切换一次图片显示 mScheduledExecutorService.scheduleAtFixedRate(mScrollTask, 5, 5, TimeUnit.SECONDS); } else { mbIsAutoScroll = false; } } } @Override public void onPageScrolled(int arg0, float arg1, int arg2) { Log.i("ViewPageAutoScroll", "ViewPageChangeListener - onPageScrolled - " + "arg0 = " + arg0 + " arg1 = " + arg1 + " arg2 = " + arg2); } @Override public void onPageSelected(int position) { final int mapPos = position % mBannerDataList.size(); Log.i("ViewPageAutoScroll", "ViewPageChangeListener - onPageScrollStateChanged - " + "position = " + position + " mapPos = " + mapPos); BannerInfo bannerInfo = mBannerDataList.get(mapPos); // 设置标题 mtvTitle.setText(bannerInfo.getStrTitle()); // 设置页码 mtvPagination.setText(mapPos + 1 + "/" + mBannerDataList.size()); mDotList.get(mOldPosition).setBackgroundResource(R.drawable.dot_normal); mDotList.get(mapPos).setBackgroundResource(R.drawable.dot_focused); mOldPosition = mapPos; } } private class ViewPageAdapter extends PagerAdapter { @Override public int getCount() { return (1 == mBannerDataList.size()) ? 1 : Integer.MAX_VALUE; } @Override public Object instantiateItem(ViewGroup container, final int position) { final int mapPos = position % mBannerDataList.size(); Log.i("ViewPageAutoScroll", "ViewPageAdapter - instantiateItem - " + "position = " + position + " mapPos = " + mapPos); ImageView iv = mImagePage[position % 3]; if (null != iv.getParent()) { ((ViewGroup)iv.getParent()).removeView(iv); } iv.setImageBitmap(((BitmapDrawable)mImageViewList.get(mapPos).getDrawable()).getBitmap()); ((ViewPager)container).addView(iv); final BannerInfo bannerInfo = mBannerDataList.get(mapPos); // 在这个方法里面设置图片的点击事件 iv.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { // 处理跳转逻辑 Toast.makeText(mContext, "You click" + mapPos + "Image", Toast.LENGTH_LONG) .show(); } }); return iv; } @Override public void destroyItem(ViewGroup arg0, int arg1, Object arg2) { Log.i("ViewPageAutoScroll", "ViewPageAdapter - destroyItem - arg1 = " + arg1); // 循环时不可以在这里释放重复利用的视图, // 因为左翻时是先instantiateItem创建新位置视图再在这里释放最右边的旧视图,就会出现空白页 // 右翻时则是先释放,再创建 /* 11-21 18:43:41.910 19297-19297/com.wzhnsc.testviewpageautoscroll I/ViewPageAutoScroll: ViewPageAdapter - destroyItem - arg1 = 1073741826 11-21 18:43:41.910 19297-19297/com.wzhnsc.testviewpageautoscroll I/ViewPageAutoScroll: ViewPageAdapter - instantiateItem - position = 1073741829 mapPos = 1 11-21 18:45:45.180 19297-19297/com.wzhnsc.testviewpageautoscroll I/ViewPageAutoScroll: ViewPageAdapter - instantiateItem - position = 1073741826 mapPos = 0 11-21 18:45:45.180 19297-19297/com.wzhnsc.testviewpageautoscroll I/ViewPageAutoScroll: ViewPageAdapter - destroyItem - arg1 = 1073741829 */ // ((ViewPager)arg0).removeView((View)arg2); } @Override public boolean isViewFromObject(View arg0, Object arg1) { return arg0 == arg1; } @Override public void restoreState(Parcelable arg0, ClassLoader arg1) { } @Override public Parcelable saveState() { return null; } @Override public void startUpdate(ViewGroup arg0) { } @Override public void finishUpdate(ViewGroup arg0) { } } @Override protected void onFinishInflate() { super.onFinishInflate(); initInterface(); } @Override protected void onAttachedToWindow() { super.onAttachedToWindow(); mScheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); // 当 ViewPage 显示出来后,每两秒切换一次图片显示 mScheduledExecutorService.scheduleAtFixedRate(mScrollTask, 5, 5, TimeUnit.SECONDS); } @Override protected void onDetachedFromWindow() { super.onDetachedFromWindow(); // 当 View 不可见的时候停止切换 mScheduledExecutorService.shutdown(); } }
apache-2.0
adbrowning/HadoopUtils
src/test/java/hadoop/writable/OffHeapWritableTest.java
6891
/* * Copyright 2014 Adam Browning * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package hadoop.writable; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.logging.Level; import java.util.logging.Logger; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.*; /** * * @author adam */ public class OffHeapWritableTest { private OffHeapWritable writable; public OffHeapWritableTest() { } @BeforeClass public static void setUpClass() { } @AfterClass public static void tearDownClass() { } @Before public void setUp() { writable = new OffHeapWritable(10, 30); } @After public void tearDown() { } /** * Test of write method, of class OffHeapWritable. */ @Test public void testSimpleWrite_DataOutput() throws Exception { System.out.println("write"); ByteArrayOutputStream bout = new ByteArrayOutputStream(); writable.write(5); DataOutputStream d = new DataOutputStream(bout); d.flush(); int dataStartsAt = bout.size(); System.out.println("Data starts at: " + dataStartsAt); writable.write(d); d.close(); assertEquals("resulting bytes: " + Arrays.toString(bout.toByteArray()), 1, bout.toByteArray()[dataStartsAt+3]); assertEquals("resulting bytes: " + Arrays.toString(bout.toByteArray()), 1, bout.toByteArray()[dataStartsAt+11]); assertEquals("resulting bytes: " + Arrays.toString(bout.toByteArray()), 5, bout.toByteArray()[dataStartsAt+12]); } @Test public void testFullPageWrite() throws Exception { ByteArrayOutputStream bout = new ByteArrayOutputStream(); for(int i = 0; i < 10; ++i) { writable.write(5); } DataOutputStream d = new DataOutputStream(bout); d.flush(); int dataStartsAt = bout.size(); System.out.println("Data starts at: " + dataStartsAt); writable.write(d); d.close(); byte[] writtenBytes = bout.toByteArray(); byte[] dataBytes = Arrays.copyOfRange(writtenBytes, dataStartsAt, writtenBytes.length); byte[] expectedBytes = generateExpectedBytes(5, 10); assertArrayEquals(expectedBytes, dataBytes); } @Test public void testFullPageWritePlusOne() throws Exception { for(int i = 0; i < 11; ++i) { writable.write(7); } ByteArrayOutputStream sink = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(sink); writable.write(out); out.close(); assertArrayEquals(generateExpectedBytes(7, 11), sink.toByteArray()); } @Test public void testReset() throws Exception { for(int i = 0; i < 11; ++i) { writable.write(7); } writable.reset(); for(int i = 0; i < 3; ++i) { writable.write(3); } ByteArrayOutputStream sink = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(sink); writable.write(out); out.close(); assertArrayEquals(generateExpectedBytes(3, 3), sink.toByteArray()); } @Test public void testRead() throws Exception { byte[] readFrom = generateExpectedBytes(3, 15); writable.readFields(new DataInputStream(new ByteArrayInputStream(readFrom))); ByteArrayOutputStream sink = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(sink); writable.write(out); out.close(); assertArrayEquals(readFrom, sink.toByteArray()); } @Test public void testGenerateExpectedBytes() { byte[] generated = generateExpectedBytes((byte) 3, 0x1001); byte[] expected = new byte[0x1001+12]; System.arraycopy(new byte[]{0,0,0,1,0,0,0,0,0,0,0x10,1}, 0, expected, 0, 12); for(int i = 0; i < 0x1001; ++i) { expected[i+12] = 3; } assertArrayEquals("Expected: " + Arrays.toString(Arrays.copyOfRange(expected, 0, 14)) + "\nActual: " + Arrays.toString(Arrays.copyOfRange(generated,0, 14)), expected, generated); } @Test public void testInputStreamHalfPage() throws Exception { for(int i = 0; i < 5; ++i) { writable.write(i); } InputStream in = writable.asInputStream(); for(int i = 0; i < 5; ++i) { assertEquals(i, in.read()); } assertEquals(-1, in.read()); } @Test public void testInputStreamExactPage() throws Exception { for(int i = 0; i < 10; ++i) { writable.write(i); } InputStream in = writable.asInputStream(); for(int i = 0; i < 10; ++i) { assertEquals(i, in.read()); } assertEquals(-1, in.read()); } @Test public void testInputStreamPagePlusOne() throws Exception { for(int i = 0; i < 11; ++i) { writable.write(i); } InputStream in = writable.asInputStream(); for(int i = 0; i < 11; ++i) { assertEquals(i, in.read()); } assertEquals(-1, in.read()); } private byte[] generateExpectedBytes(int fillWith, int count) { ByteArrayOutputStream out = new ByteArrayOutputStream(); try { out.write(new byte[]{0,0,0,1,0,0,0,0}); System.out.println(count + " => " + ((count & 0xFF000000) >> 24) + " " + ((count & 0x00FF0000) >> 16) + " " + ((count & 0x0000FF00) >> 8) + " " + (count & 0x000000FF)); out.write((count & 0xFF000000) >> 24); out.write((count & 0x00FF0000) >> 16); out.write((count & 0x0000FF00) >> 8); out.write(count & 0x000000FF); for(int i = 0; i < count; ++i) { out.write(fillWith); } } catch (IOException ex) { Logger.getLogger(OffHeapWritableTest.class.getName()).log(Level.SEVERE, null, ex); } return out.toByteArray(); } }
apache-2.0
jnthnclt/lab
lab-roaring/src/test/java/com/github/jnthnclt/os/lab/core/bitmaps/LABIndexKeyRangeTest.java
891
package com.github.jnthnclt.os.lab.core.bitmaps; import org.junit.Assert; import org.testng.annotations.Test; public class LABIndexKeyRangeTest { @Test public void testCompareTo() throws Exception { byte[] startInclusiveKey = { 1, 2, 3, 4 }; byte[] stopExclusiveKey = { 2, 2, 3, 4 }; LABIndexKeyRange a = new LABIndexKeyRange( startInclusiveKey, stopExclusiveKey); Assert.assertEquals(startInclusiveKey, a.getStartInclusiveKey()); Assert.assertEquals(stopExclusiveKey, a.getStopExclusiveKey()); Assert.assertFalse(a.contains(new byte[]{1, 2, 3, 3})); Assert.assertTrue(a.contains(startInclusiveKey)); Assert.assertTrue(a.contains(new byte[]{1,3,3,3})); Assert.assertFalse(a.contains(stopExclusiveKey)); Assert.assertFalse(a.contains(new byte[]{2, 2, 3, 5})); } }
apache-2.0
jdepend/cooper
cooper-standalone/src/main/java/jdepend/client/ui/result/panel/ArchitectPatternPanel.java
938
package jdepend.client.ui.result.panel; import jdepend.framework.exception.JDependException; import jdepend.framework.ui.util.ExceptionPrinter; import jdepend.knowledge.architectpattern.ArchitectPatternMgr; import jdepend.knowledge.architectpattern.ArchitectPatternResult; import jdepend.model.result.AnalysisResult; import jdepend.client.ui.result.framework.ResultPanel; import jdepend.client.ui.result.framework.SubResultTabPanel; public final class ArchitectPatternPanel extends SubResultTabPanel { @Override protected void init(AnalysisResult result) { ArchitectPatternResult apResult = null; try { apResult = ArchitectPatternMgr.getInstance().identify(result); } catch (JDependException e) { e.printStackTrace(); this.add(ExceptionPrinter.createComponent(e)); } if (apResult != null) { this.add(ResultPanel.createTextViewer(new StringBuilder(apResult.getResult()))); } } }
apache-2.0
nvoron23/cassandra
src/java/org/apache/cassandra/db/ColumnFamilyStore.java
114395
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.db; import java.io.*; import java.lang.management.ManagementFactory; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; import javax.management.*; import javax.management.openmbean.*; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.*; import com.google.common.collect.*; import com.google.common.util.concurrent.*; import org.apache.cassandra.io.FSWriteError; import org.json.simple.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.cache.*; import org.apache.cassandra.concurrent.*; import org.apache.cassandra.config.*; import org.apache.cassandra.config.CFMetaData.SpeculativeRetry; import org.apache.cassandra.db.commitlog.CommitLog; import org.apache.cassandra.db.commitlog.ReplayPosition; import org.apache.cassandra.db.compaction.*; import org.apache.cassandra.db.composites.CellName; import org.apache.cassandra.db.composites.CellNameType; import org.apache.cassandra.db.composites.Composite; import org.apache.cassandra.db.filter.ColumnSlice; import org.apache.cassandra.db.filter.ExtendedFilter; import org.apache.cassandra.db.filter.IDiskAtomFilter; import org.apache.cassandra.db.filter.QueryFilter; import org.apache.cassandra.db.filter.SliceQueryFilter; import org.apache.cassandra.db.index.SecondaryIndex; import org.apache.cassandra.db.index.SecondaryIndexManager; import org.apache.cassandra.dht.*; import org.apache.cassandra.dht.Range; import org.apache.cassandra.exceptions.ConfigurationException; import org.apache.cassandra.io.FSReadError; import org.apache.cassandra.io.compress.CompressionParameters; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.*; import org.apache.cassandra.io.sstable.format.*; import org.apache.cassandra.io.sstable.metadata.CompactionMetadata; import org.apache.cassandra.io.sstable.metadata.MetadataType; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.metrics.ColumnFamilyMetrics; import org.apache.cassandra.metrics.ColumnFamilyMetrics.Sampler; import org.apache.cassandra.service.CacheService; import org.apache.cassandra.service.StorageService; import org.apache.cassandra.streaming.StreamLockfile; import org.apache.cassandra.tracing.Tracing; import org.apache.cassandra.utils.*; import org.apache.cassandra.utils.concurrent.*; import org.apache.cassandra.utils.TopKSampler.SamplerResult; import org.apache.cassandra.utils.memory.MemtableAllocator; import com.clearspring.analytics.stream.Counter; public class ColumnFamilyStore implements ColumnFamilyStoreMBean { private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyStore.class); private static final ExecutorService flushExecutor = new JMXEnabledThreadPoolExecutor(DatabaseDescriptor.getFlushWriters(), StageManager.KEEPALIVE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("MemtableFlushWriter"), "internal"); // post-flush executor is single threaded to provide guarantee that any flush Future on a CF will never return until prior flushes have completed private static final ExecutorService postFlushExecutor = new JMXEnabledThreadPoolExecutor(1, StageManager.KEEPALIVE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("MemtablePostFlush"), "internal"); private static final ExecutorService reclaimExecutor = new JMXEnabledThreadPoolExecutor(1, StageManager.KEEPALIVE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("MemtableReclaimMemory"), "internal"); private static final String[] COUNTER_NAMES = new String[]{"raw", "count", "error", "string"}; private static final String[] COUNTER_DESCS = new String[] { "partition key in raw hex bytes", "value of this partition for given sampler", "value is within the error bounds plus or minus of this", "the partition key turned into a human readable format" }; private static final CompositeType COUNTER_COMPOSITE_TYPE; private static final TabularType COUNTER_TYPE; private static final String[] SAMPLER_NAMES = new String[]{"cardinality", "partitions"}; private static final String[] SAMPLER_DESCS = new String[] { "cardinality of partitions", "list of counter results" }; private static final String SAMPLING_RESULTS_NAME = "SAMPLING_RESULTS"; private static final CompositeType SAMPLING_RESULT; static { try { OpenType<?>[] counterTypes = new OpenType[] { SimpleType.STRING, SimpleType.LONG, SimpleType.LONG, SimpleType.STRING }; COUNTER_COMPOSITE_TYPE = new CompositeType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, COUNTER_NAMES, COUNTER_DESCS, counterTypes); COUNTER_TYPE = new TabularType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, COUNTER_COMPOSITE_TYPE, COUNTER_NAMES); OpenType<?>[] samplerTypes = new OpenType[] { SimpleType.LONG, COUNTER_TYPE }; SAMPLING_RESULT = new CompositeType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, SAMPLER_NAMES, SAMPLER_DESCS, samplerTypes); } catch (OpenDataException e) { throw Throwables.propagate(e); } } public final Keyspace keyspace; public final String name; public final CFMetaData metadata; public final IPartitioner partitioner; private final String mbeanName; private volatile boolean valid = true; /** * Memtables and SSTables on disk for this column family. * * We synchronize on the DataTracker to ensure isolation when we want to make sure * that the memtable we're acting on doesn't change out from under us. I.e., flush * syncronizes on it to make sure it can submit on both executors atomically, * so anyone else who wants to make sure flush doesn't interfere should as well. */ private final DataTracker data; /* The read order, used to track accesses to off-heap memtable storage */ public final OpOrder readOrdering = new OpOrder(); /* This is used to generate the next index for a SSTable */ private final AtomicInteger fileIndexGenerator = new AtomicInteger(0); public final SecondaryIndexManager indexManager; /* These are locally held copies to be changed from the config during runtime */ private volatile DefaultInteger minCompactionThreshold; private volatile DefaultInteger maxCompactionThreshold; private final WrappingCompactionStrategy compactionStrategyWrapper; public final Directories directories; public final ColumnFamilyMetrics metric; public volatile long sampleLatencyNanos; private final ScheduledFuture<?> latencyCalculator; public static void shutdownPostFlushExecutor() throws InterruptedException { postFlushExecutor.shutdown(); postFlushExecutor.awaitTermination(60, TimeUnit.SECONDS); } public void reload() { // metadata object has been mutated directly. make all the members jibe with new settings. // only update these runtime-modifiable settings if they have not been modified. if (!minCompactionThreshold.isModified()) for (ColumnFamilyStore cfs : concatWithIndexes()) cfs.minCompactionThreshold = new DefaultInteger(metadata.getMinCompactionThreshold()); if (!maxCompactionThreshold.isModified()) for (ColumnFamilyStore cfs : concatWithIndexes()) cfs.maxCompactionThreshold = new DefaultInteger(metadata.getMaxCompactionThreshold()); compactionStrategyWrapper.maybeReloadCompactionStrategy(metadata); scheduleFlush(); indexManager.reload(); // If the CF comparator has changed, we need to change the memtable, // because the old one still aliases the previous comparator. if (data.getView().getCurrentMemtable().initialComparator != metadata.comparator) switchMemtable(); } void scheduleFlush() { int period = metadata.getMemtableFlushPeriod(); if (period > 0) { logger.debug("scheduling flush in {} ms", period); WrappedRunnable runnable = new WrappedRunnable() { protected void runMayThrow() throws Exception { synchronized (data) { Memtable current = data.getView().getCurrentMemtable(); // if we're not expired, we've been hit by a scheduled flush for an already flushed memtable, so ignore if (current.isExpired()) { if (current.isClean()) { // if we're still clean, instead of swapping just reschedule a flush for later scheduleFlush(); } else { // we'll be rescheduled by the constructor of the Memtable. forceFlush(); } } } } }; ScheduledExecutors.scheduledTasks.schedule(runnable, period, TimeUnit.MILLISECONDS); } } public void setCompactionStrategyClass(String compactionStrategyClass) { try { metadata.compactionStrategyClass = CFMetaData.createCompactionStrategy(compactionStrategyClass); compactionStrategyWrapper.maybeReloadCompactionStrategy(metadata); } catch (ConfigurationException e) { throw new IllegalArgumentException(e.getMessage()); } } public String getCompactionStrategyClass() { return metadata.compactionStrategyClass.getName(); } public Map<String,String> getCompressionParameters() { return metadata.compressionParameters().asThriftOptions(); } public void setCompressionParameters(Map<String,String> opts) { try { metadata.compressionParameters = CompressionParameters.create(opts); } catch (ConfigurationException e) { throw new IllegalArgumentException(e.getMessage()); } } public void setCrcCheckChance(double crcCheckChance) { try { for (SSTableReader sstable : keyspace.getAllSSTables()) if (sstable.compression) sstable.getCompressionMetadata().parameters.setCrcCheckChance(crcCheckChance); } catch (ConfigurationException e) { throw new IllegalArgumentException(e.getMessage()); } } private ColumnFamilyStore(Keyspace keyspace, String columnFamilyName, IPartitioner partitioner, int generation, CFMetaData metadata, Directories directories, boolean loadSSTables) { assert metadata != null : "null metadata for " + keyspace + ":" + columnFamilyName; this.keyspace = keyspace; name = columnFamilyName; this.metadata = metadata; this.minCompactionThreshold = new DefaultInteger(metadata.getMinCompactionThreshold()); this.maxCompactionThreshold = new DefaultInteger(metadata.getMaxCompactionThreshold()); this.partitioner = partitioner; this.directories = directories; this.indexManager = new SecondaryIndexManager(this); this.metric = new ColumnFamilyMetrics(this); fileIndexGenerator.set(generation); sampleLatencyNanos = DatabaseDescriptor.getReadRpcTimeout() / 2; CachingOptions caching = metadata.getCaching(); logger.info("Initializing {}.{}", keyspace.getName(), name); // scan for sstables corresponding to this cf and load them data = new DataTracker(this); if (loadSSTables) { Directories.SSTableLister sstableFiles = directories.sstableLister().skipTemporary(true); Collection<SSTableReader> sstables = SSTableReader.openAll(sstableFiles.list().entrySet(), metadata, this.partitioner); data.addInitialSSTables(sstables); } if (caching.keyCache.isEnabled()) CacheService.instance.keyCache.loadSaved(this); // compaction strategy should be created after the CFS has been prepared this.compactionStrategyWrapper = new WrappingCompactionStrategy(this); if (maxCompactionThreshold.value() <= 0 || minCompactionThreshold.value() <=0) { logger.warn("Disabling compaction strategy by setting compaction thresholds to 0 is deprecated, set the compaction option 'enabled' to 'false' instead."); this.compactionStrategyWrapper.disable(); } // create the private ColumnFamilyStores for the secondary column indexes for (ColumnDefinition info : metadata.allColumns()) { if (info.getIndexType() != null) indexManager.addIndexedColumn(info); } // register the mbean String type = this.partitioner instanceof LocalPartitioner ? "IndexColumnFamilies" : "ColumnFamilies"; mbeanName = "org.apache.cassandra.db:type=" + type + ",keyspace=" + this.keyspace.getName() + ",columnfamily=" + name; try { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName nameObj = new ObjectName(mbeanName); mbs.registerMBean(this, nameObj); } catch (Exception e) { throw new RuntimeException(e); } logger.debug("retryPolicy for {} is {}", name, this.metadata.getSpeculativeRetry()); latencyCalculator = ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(new Runnable() { public void run() { SpeculativeRetry retryPolicy = ColumnFamilyStore.this.metadata.getSpeculativeRetry(); switch (retryPolicy.type) { case PERCENTILE: // get percentile in nanos sampleLatencyNanos = (long) (metric.coordinatorReadLatency.getSnapshot().getValue(retryPolicy.value) * 1000d); break; case CUSTOM: // convert to nanos, since configuration is in millisecond sampleLatencyNanos = (long) (retryPolicy.value * 1000d * 1000d); break; default: sampleLatencyNanos = Long.MAX_VALUE; break; } } }, DatabaseDescriptor.getReadRpcTimeout(), DatabaseDescriptor.getReadRpcTimeout(), TimeUnit.MILLISECONDS); } /** call when dropping or renaming a CF. Performs mbean housekeeping and invalidates CFS to other operations */ public void invalidate() { valid = false; try { unregisterMBean(); } catch (Exception e) { JVMStabilityInspector.inspectThrowable(e); // this shouldn't block anything. logger.warn("Failed unregistering mbean: {}", mbeanName, e); } latencyCalculator.cancel(false); compactionStrategyWrapper.shutdown(); SystemKeyspace.removeTruncationRecord(metadata.cfId); data.unreferenceSSTables(); indexManager.invalidate(); invalidateCaches(); } /** * Removes every SSTable in the directory from the DataTracker's view. * @param directory the unreadable directory, possibly with SSTables in it, but not necessarily. */ void maybeRemoveUnreadableSSTables(File directory) { data.removeUnreadableSSTables(directory); } void unregisterMBean() throws MalformedObjectNameException, InstanceNotFoundException, MBeanRegistrationException { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName nameObj = new ObjectName(mbeanName); if (mbs.isRegistered(nameObj)) mbs.unregisterMBean(nameObj); // unregister metrics metric.release(); } public static ColumnFamilyStore createColumnFamilyStore(Keyspace keyspace, String columnFamily, boolean loadSSTables) { return createColumnFamilyStore(keyspace, columnFamily, StorageService.getPartitioner(), Schema.instance.getCFMetaData(keyspace.getName(), columnFamily), loadSSTables); } public static ColumnFamilyStore createColumnFamilyStore(Keyspace keyspace, String columnFamily, IPartitioner partitioner, CFMetaData metadata) { return createColumnFamilyStore(keyspace, columnFamily, partitioner, metadata, true); } private static synchronized ColumnFamilyStore createColumnFamilyStore(Keyspace keyspace, String columnFamily, IPartitioner partitioner, CFMetaData metadata, boolean loadSSTables) { // get the max generation number, to prevent generation conflicts Directories directories = new Directories(metadata); Directories.SSTableLister lister = directories.sstableLister().includeBackups(true); List<Integer> generations = new ArrayList<Integer>(); for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) { Descriptor desc = entry.getKey(); generations.add(desc.generation); if (!desc.isCompatible()) throw new RuntimeException(String.format("Incompatible SSTable found. Current version %s is unable to read file: %s. Please run upgradesstables.", desc.getFormat().getLatestVersion(), desc)); } Collections.sort(generations); int value = (generations.size() > 0) ? (generations.get(generations.size() - 1)) : 0; return new ColumnFamilyStore(keyspace, columnFamily, partitioner, value, metadata, directories, loadSSTables); } /** * Removes unnecessary files from the cf directory at startup: these include temp files, orphans, zero-length files * and compacted sstables. Files that cannot be recognized will be ignored. */ public static void scrubDataDirectories(CFMetaData metadata) { Directories directories = new Directories(metadata); // remove any left-behind SSTables from failed/stalled streaming FileFilter filter = new FileFilter() { public boolean accept(File pathname) { return pathname.getPath().endsWith(StreamLockfile.FILE_EXT); } }; for (File dir : directories.getCFDirectories()) { File[] lockfiles = dir.listFiles(filter); // lock files can be null if I/O error happens if (lockfiles == null || lockfiles.length == 0) continue; logger.info("Removing SSTables from failed streaming session. Found {} files to cleanup.", lockfiles.length); for (File lockfile : lockfiles) { StreamLockfile streamLockfile = new StreamLockfile(lockfile); streamLockfile.cleanup(); streamLockfile.delete(); } } logger.debug("Removing compacted SSTable files from {} (see http://wiki.apache.org/cassandra/MemtableSSTable)", metadata.cfName); for (Map.Entry<Descriptor,Set<Component>> sstableFiles : directories.sstableLister().list().entrySet()) { Descriptor desc = sstableFiles.getKey(); Set<Component> components = sstableFiles.getValue(); if (desc.type.isTemporary) { SSTable.delete(desc, components); continue; } File dataFile = new File(desc.filenameFor(Component.DATA)); if (components.contains(Component.DATA) && dataFile.length() > 0) // everything appears to be in order... moving on. continue; // missing the DATA file! all components are orphaned logger.warn("Removing orphans for {}: {}", desc, components); for (Component component : components) { FileUtils.deleteWithConfirm(desc.filenameFor(component)); } } // cleanup incomplete saved caches Pattern tmpCacheFilePattern = Pattern.compile(metadata.ksName + "-" + metadata.cfName + "-(Key|Row)Cache.*\\.tmp$"); File dir = new File(DatabaseDescriptor.getSavedCachesLocation()); if (dir.exists()) { assert dir.isDirectory(); for (File file : dir.listFiles()) if (tmpCacheFilePattern.matcher(file.getName()).matches()) if (!file.delete()) logger.warn("could not delete {}", file.getAbsolutePath()); } // also clean out any index leftovers. for (ColumnDefinition def : metadata.allColumns()) { if (def.isIndexed()) { CellNameType indexComparator = SecondaryIndex.getIndexComparator(metadata, def); if (indexComparator != null) { CFMetaData indexMetadata = CFMetaData.newIndexMetadata(metadata, def, indexComparator); scrubDataDirectories(indexMetadata); } } } } /** * Replacing compacted sstables is atomic as far as observers of DataTracker are concerned, but not on the * filesystem: first the new sstables are renamed to "live" status (i.e., the tmp marker is removed), then * their ancestors are removed. * * If an unclean shutdown happens at the right time, we can thus end up with both the new ones and their * ancestors "live" in the system. This is harmless for normal data, but for counters it can cause overcounts. * * To prevent this, we record sstables being compacted in the system keyspace. If we find unfinished * compactions, we remove the new ones (since those may be incomplete -- under LCS, we may create multiple * sstables from any given ancestor). */ public static void removeUnfinishedCompactionLeftovers(CFMetaData metadata, Map<Integer, UUID> unfinishedCompactions) { Directories directories = new Directories(metadata); Set<Integer> allGenerations = new HashSet<>(); for (Descriptor desc : directories.sstableLister().list().keySet()) allGenerations.add(desc.generation); // sanity-check unfinishedCompactions Set<Integer> unfinishedGenerations = unfinishedCompactions.keySet(); if (!allGenerations.containsAll(unfinishedGenerations)) { HashSet<Integer> missingGenerations = new HashSet<>(unfinishedGenerations); missingGenerations.removeAll(allGenerations); logger.debug("Unfinished compactions of {}.{} reference missing sstables of generations {}", metadata.ksName, metadata.cfName, missingGenerations); } // remove new sstables from compactions that didn't complete, and compute // set of ancestors that shouldn't exist anymore Set<Integer> completedAncestors = new HashSet<>(); for (Map.Entry<Descriptor, Set<Component>> sstableFiles : directories.sstableLister().skipTemporary(true).list().entrySet()) { Descriptor desc = sstableFiles.getKey(); Set<Integer> ancestors; try { CompactionMetadata compactionMetadata = (CompactionMetadata) desc.getMetadataSerializer().deserialize(desc, MetadataType.COMPACTION); ancestors = compactionMetadata.ancestors; } catch (IOException e) { throw new FSReadError(e, desc.filenameFor(Component.STATS)); } if (!ancestors.isEmpty() && unfinishedGenerations.containsAll(ancestors) && allGenerations.containsAll(ancestors)) { // any of the ancestors would work, so we'll just lookup the compaction task ID with the first one UUID compactionTaskID = unfinishedCompactions.get(ancestors.iterator().next()); assert compactionTaskID != null; logger.debug("Going to delete unfinished compaction product {}", desc); SSTable.delete(desc, sstableFiles.getValue()); SystemKeyspace.finishCompaction(compactionTaskID); } else { completedAncestors.addAll(ancestors); } } // remove old sstables from compactions that did complete for (Map.Entry<Descriptor, Set<Component>> sstableFiles : directories.sstableLister().list().entrySet()) { Descriptor desc = sstableFiles.getKey(); if (completedAncestors.contains(desc.generation)) { // if any of the ancestors were participating in a compaction, finish that compaction logger.debug("Going to delete leftover compaction ancestor {}", desc); SSTable.delete(desc, sstableFiles.getValue()); UUID compactionTaskID = unfinishedCompactions.get(desc.generation); if (compactionTaskID != null) SystemKeyspace.finishCompaction(unfinishedCompactions.get(desc.generation)); } } } // must be called after all sstables are loaded since row cache merges all row versions public void initRowCache() { if (!isRowCacheEnabled()) return; long start = System.nanoTime(); int cachedRowsRead = CacheService.instance.rowCache.loadSaved(this); if (cachedRowsRead > 0) logger.info("Completed loading ({} ms; {} keys) row cache for {}.{}", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start), cachedRowsRead, keyspace.getName(), name); } public void initCounterCache() { if (!metadata.isCounter() || CacheService.instance.counterCache.getCapacity() == 0) return; long start = System.nanoTime(); int cachedShardsRead = CacheService.instance.counterCache.loadSaved(this); if (cachedShardsRead > 0) logger.info("Completed loading ({} ms; {} shards) counter cache for {}.{}", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start), cachedShardsRead, keyspace.getName(), name); } /** * See #{@code StorageService.loadNewSSTables(String, String)} for more info * * @param ksName The keyspace name * @param cfName The columnFamily name */ public static synchronized void loadNewSSTables(String ksName, String cfName) { /** ks/cf existence checks will be done by open and getCFS methods for us */ Keyspace keyspace = Keyspace.open(ksName); keyspace.getColumnFamilyStore(cfName).loadNewSSTables(); } /** * #{@inheritDoc} */ public synchronized void loadNewSSTables() { logger.info("Loading new SSTables for {}/{}...", keyspace.getName(), name); Set<Descriptor> currentDescriptors = new HashSet<Descriptor>(); for (SSTableReader sstable : data.getView().sstables) currentDescriptors.add(sstable.descriptor); Set<SSTableReader> newSSTables = new HashSet<>(); Directories.SSTableLister lister = directories.sstableLister().skipTemporary(true); for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) { Descriptor descriptor = entry.getKey(); if (currentDescriptors.contains(descriptor)) continue; // old (initialized) SSTable found, skipping if (descriptor.type.isTemporary) // in the process of being written continue; if (!descriptor.isCompatible()) throw new RuntimeException(String.format("Can't open incompatible SSTable! Current version %s, found file: %s", descriptor.getFormat().getLatestVersion(), descriptor)); // force foreign sstables to level 0 try { if (new File(descriptor.filenameFor(Component.STATS)).exists()) descriptor.getMetadataSerializer().mutateLevel(descriptor, 0); } catch (IOException e) { SSTableReader.logOpenException(entry.getKey(), e); continue; } // Increment the generation until we find a filename that doesn't exist. This is needed because the new // SSTables that are being loaded might already use these generation numbers. Descriptor newDescriptor; do { newDescriptor = new Descriptor(descriptor.version, descriptor.directory, descriptor.ksname, descriptor.cfname, fileIndexGenerator.incrementAndGet(), Descriptor.Type.FINAL, descriptor.formatType); } while (new File(newDescriptor.filenameFor(Component.DATA)).exists()); logger.info("Renaming new SSTable {} to {}", descriptor, newDescriptor); SSTableWriter.rename(descriptor, newDescriptor, entry.getValue()); SSTableReader reader; try { reader = SSTableReader.open(newDescriptor, entry.getValue(), metadata, partitioner); } catch (IOException e) { SSTableReader.logOpenException(entry.getKey(), e); continue; } newSSTables.add(reader); } if (newSSTables.isEmpty()) { logger.info("No new SSTables were found for {}/{}", keyspace.getName(), name); return; } logger.info("Loading new SSTables and building secondary indexes for {}/{}: {}", keyspace.getName(), name, newSSTables); try (Refs<SSTableReader> refs = Refs.ref(newSSTables)) { data.addSSTables(newSSTables); indexManager.maybeBuildSecondaryIndexes(newSSTables, indexManager.allIndexesNames()); } logger.info("Done loading load new SSTables for {}/{}", keyspace.getName(), name); } public static void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames) { ColumnFamilyStore cfs = Keyspace.open(ksName).getColumnFamilyStore(cfName); Set<String> indexes = new HashSet<String>(Arrays.asList(idxNames)); Collection<SSTableReader> sstables = cfs.getSSTables(); try (Refs<SSTableReader> refs = Refs.ref(sstables)) { cfs.indexManager.setIndexRemoved(indexes); logger.info(String.format("User Requested secondary index re-build for %s/%s indexes", ksName, cfName)); cfs.indexManager.maybeBuildSecondaryIndexes(sstables, indexes); cfs.indexManager.setIndexBuilt(indexes); } } public String getColumnFamilyName() { return name; } public String getTempSSTablePath(File directory) { return getTempSSTablePath(directory, DatabaseDescriptor.getSSTableFormat().info.getLatestVersion(), DatabaseDescriptor.getSSTableFormat()); } public String getTempSSTablePath(File directory, SSTableFormat.Type format) { return getTempSSTablePath(directory, format.info.getLatestVersion(), format); } private String getTempSSTablePath(File directory, Version version, SSTableFormat.Type format) { Descriptor desc = new Descriptor(version, directory, keyspace.getName(), name, fileIndexGenerator.incrementAndGet(), Descriptor.Type.TEMP, format); return desc.filenameFor(Component.DATA); } /** * Switches the memtable iff the live memtable is the one provided * * @param memtable */ public Future<?> switchMemtableIfCurrent(Memtable memtable) { synchronized (data) { if (data.getView().getCurrentMemtable() == memtable) return switchMemtable(); } return Futures.immediateFuture(null); } /* * switchMemtable puts Memtable.getSortedContents on the writer executor. When the write is complete, * we turn the writer into an SSTableReader and add it to ssTables where it is available for reads. * This method does not block except for synchronizing on DataTracker, but the Future it returns will * not complete until the Memtable (and all prior Memtables) have been successfully flushed, and the CL * marked clean up to the position owned by the Memtable. */ public ListenableFuture<?> switchMemtable() { synchronized (data) { logFlush(); Flush flush = new Flush(false); flushExecutor.execute(flush); ListenableFutureTask<?> task = ListenableFutureTask.create(flush.postFlush, null); postFlushExecutor.submit(task); return task; } } // print out size of all memtables we're enqueuing private void logFlush() { // reclaiming includes that which we are GC-ing; float onHeapRatio = 0, offHeapRatio = 0; long onHeapTotal = 0, offHeapTotal = 0; Memtable memtable = getDataTracker().getView().getCurrentMemtable(); onHeapRatio += memtable.getAllocator().onHeap().ownershipRatio(); offHeapRatio += memtable.getAllocator().offHeap().ownershipRatio(); onHeapTotal += memtable.getAllocator().onHeap().owns(); offHeapTotal += memtable.getAllocator().offHeap().owns(); for (SecondaryIndex index : indexManager.getIndexes()) { if (index.getIndexCfs() != null) { MemtableAllocator allocator = index.getIndexCfs().getDataTracker().getView().getCurrentMemtable().getAllocator(); onHeapRatio += allocator.onHeap().ownershipRatio(); offHeapRatio += allocator.offHeap().ownershipRatio(); onHeapTotal += allocator.onHeap().owns(); offHeapTotal += allocator.offHeap().owns(); } } logger.info("Enqueuing flush of {}: {}", name, String.format("%d (%.0f%%) on-heap, %d (%.0f%%) off-heap", onHeapTotal, onHeapRatio * 100, offHeapTotal, offHeapRatio * 100)); } public ListenableFuture<?> forceFlush() { return forceFlush(null); } /** * Flush if there is unflushed data that was written to the CommitLog before @param flushIfDirtyBefore * (inclusive). If @param flushIfDirtyBefore is null, flush if there is any unflushed data. * * @return a Future such that when the future completes, all data inserted before forceFlush was called, * will be flushed. */ public ListenableFuture<?> forceFlush(ReplayPosition flushIfDirtyBefore) { // we synchronize on the data tracker to ensure we don't race against other calls to switchMemtable(), // unnecessarily queueing memtables that are about to be made clean synchronized (data) { // during index build, 2ary index memtables can be dirty even if parent is not. if so, // we want to flush the 2ary index ones too. boolean clean = true; for (ColumnFamilyStore cfs : concatWithIndexes()) clean &= cfs.data.getView().getCurrentMemtable().isCleanAfter(flushIfDirtyBefore); if (clean) { // We could have a memtable for this column family that is being // flushed. Make sure the future returned wait for that so callers can // assume that any data inserted prior to the call are fully flushed // when the future returns (see #5241). ListenableFutureTask<?> task = ListenableFutureTask.create(new Runnable() { public void run() { logger.debug("forceFlush requested but everything is clean in {}", name); } }, null); postFlushExecutor.execute(task); return task; } return switchMemtable(); } } public void forceBlockingFlush() { FBUtilities.waitOnFuture(forceFlush()); } /** * Both synchronises custom secondary indexes and provides ordering guarantees for futures on switchMemtable/flush * etc, which expect to be able to wait until the flush (and all prior flushes) requested have completed. */ private final class PostFlush implements Runnable { final boolean flushSecondaryIndexes; final OpOrder.Barrier writeBarrier; final CountDownLatch latch = new CountDownLatch(1); final ReplayPosition lastReplayPosition; private PostFlush(boolean flushSecondaryIndexes, OpOrder.Barrier writeBarrier, ReplayPosition lastReplayPosition) { this.writeBarrier = writeBarrier; this.flushSecondaryIndexes = flushSecondaryIndexes; this.lastReplayPosition = lastReplayPosition; } public void run() { writeBarrier.await(); /** * we can flush 2is as soon as the barrier completes, as they will be consistent with (or ahead of) the * flushed memtables and CL position, which is as good as we can guarantee. * TODO: SecondaryIndex should support setBarrier(), so custom implementations can co-ordinate exactly * with CL as we do with memtables/CFS-backed SecondaryIndexes. */ if (flushSecondaryIndexes) { for (SecondaryIndex index : indexManager.getIndexesNotBackedByCfs()) { // flush any non-cfs backed indexes logger.info("Flushing SecondaryIndex {}", index); index.forceBlockingFlush(); } } try { // we wait on the latch for the lastReplayPosition to be set, and so that waiters // on this task can rely on all prior flushes being complete latch.await(); } catch (InterruptedException e) { throw new IllegalStateException(); } // must check lastReplayPosition != null because Flush may find that all memtables are clean // and so not set a lastReplayPosition if (lastReplayPosition != null) { CommitLog.instance.discardCompletedSegments(metadata.cfId, lastReplayPosition); } metric.pendingFlushes.dec(); } } /** * Should only be constructed/used from switchMemtable() or truncate(), with ownership of the DataTracker monitor. * In the constructor the current memtable(s) are swapped, and a barrier on outstanding writes is issued; * when run by the flushWriter the barrier is waited on to ensure all outstanding writes have completed * before all memtables are immediately written, and the CL is either immediately marked clean or, if * there are custom secondary indexes, the post flush clean up is left to update those indexes and mark * the CL clean */ private final class Flush implements Runnable { final OpOrder.Barrier writeBarrier; final List<Memtable> memtables; final PostFlush postFlush; final boolean truncate; private Flush(boolean truncate) { // if true, we won't flush, we'll just wait for any outstanding writes, switch the memtable, and discard this.truncate = truncate; metric.pendingFlushes.inc(); /** * To ensure correctness of switch without blocking writes, run() needs to wait for all write operations * started prior to the switch to complete. We do this by creating a Barrier on the writeOrdering * that all write operations register themselves with, and assigning this barrier to the memtables, * after which we *.issue()* the barrier. This barrier is used to direct write operations started prior * to the barrier.issue() into the memtable we have switched out, and any started after to its replacement. * In doing so it also tells the write operations to update the lastReplayPosition of the memtable, so * that we know the CL position we are dirty to, which can be marked clean when we complete. */ writeBarrier = keyspace.writeOrder.newBarrier(); memtables = new ArrayList<>(); // submit flushes for the memtable for any indexed sub-cfses, and our own AtomicReference<ReplayPosition> lastReplayPositionHolder = new AtomicReference<>(); for (ColumnFamilyStore cfs : concatWithIndexes()) { // switch all memtables, regardless of their dirty status, setting the barrier // so that we can reach a coordinated decision about cleanliness once they // are no longer possible to be modified Memtable mt = cfs.data.switchMemtable(truncate); mt.setDiscarding(writeBarrier, lastReplayPositionHolder); memtables.add(mt); } // we now attempt to define the lastReplayPosition; we do this by grabbing the current limit from the CL // and attempting to set the holder to this value. at the same time all writes to the memtables are // also maintaining this value, so if somebody sneaks ahead of us somehow (should be rare) we simply retry, // so that we know all operations prior to the position have not reached it yet ReplayPosition lastReplayPosition; while (true) { lastReplayPosition = new Memtable.LastReplayPosition(CommitLog.instance.getContext()); ReplayPosition currentLast = lastReplayPositionHolder.get(); if ((currentLast == null || currentLast.compareTo(lastReplayPosition) <= 0) && lastReplayPositionHolder.compareAndSet(currentLast, lastReplayPosition)) break; } // we then issue the barrier; this lets us wait for all operations started prior to the barrier to complete; // since this happens after wiring up the lastReplayPosition, we also know all operations with earlier // replay positions have also completed, i.e. the memtables are done and ready to flush writeBarrier.issue(); postFlush = new PostFlush(!truncate, writeBarrier, lastReplayPosition); } public void run() { // mark writes older than the barrier as blocking progress, permitting them to exceed our memory limit // if they are stuck waiting on it, then wait for them all to complete writeBarrier.markBlocking(); writeBarrier.await(); // mark all memtables as flushing, removing them from the live memtable list, and // remove any memtables that are already clean from the set we need to flush Iterator<Memtable> iter = memtables.iterator(); while (iter.hasNext()) { Memtable memtable = iter.next(); memtable.cfs.data.markFlushing(memtable); if (memtable.isClean() || truncate) { memtable.cfs.replaceFlushed(memtable, null); memtable.setDiscarded(); iter.remove(); } } if (memtables.isEmpty()) { postFlush.latch.countDown(); return; } metric.memtableSwitchCount.inc(); for (final Memtable memtable : memtables) { // flush the memtable MoreExecutors.sameThreadExecutor().execute(memtable.flushRunnable()); // issue a read barrier for reclaiming the memory, and offload the wait to another thread final OpOrder.Barrier readBarrier = readOrdering.newBarrier(); readBarrier.issue(); reclaimExecutor.execute(new WrappedRunnable() { public void runMayThrow() throws InterruptedException, ExecutionException { readBarrier.await(); memtable.setDiscarded(); } }); } // signal the post-flush we've done our work postFlush.latch.countDown(); } } /** * Finds the largest memtable, as a percentage of *either* on- or off-heap memory limits, and immediately * queues it for flushing. If the memtable selected is flushed before this completes, no work is done. */ public static class FlushLargestColumnFamily implements Runnable { public void run() { float largestRatio = 0f; Memtable largest = null; for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) { // we take a reference to the current main memtable for the CF prior to snapping its ownership ratios // to ensure we have some ordering guarantee for performing the switchMemtableIf(), i.e. we will only // swap if the memtables we are measuring here haven't already been swapped by the time we try to swap them Memtable current = cfs.getDataTracker().getView().getCurrentMemtable(); // find the total ownership ratio for the memtable and all SecondaryIndexes owned by this CF, // both on- and off-heap, and select the largest of the two ratios to weight this CF float onHeap = 0f, offHeap = 0f; onHeap += current.getAllocator().onHeap().ownershipRatio(); offHeap += current.getAllocator().offHeap().ownershipRatio(); for (SecondaryIndex index : cfs.indexManager.getIndexes()) { if (index.getIndexCfs() != null) { MemtableAllocator allocator = index.getIndexCfs().getDataTracker().getView().getCurrentMemtable().getAllocator(); onHeap += allocator.onHeap().ownershipRatio(); offHeap += allocator.offHeap().ownershipRatio(); } } float ratio = Math.max(onHeap, offHeap); if (ratio > largestRatio) { largest = current; largestRatio = ratio; } } if (largest != null) largest.cfs.switchMemtableIfCurrent(largest); } } public void maybeUpdateRowCache(DecoratedKey key) { if (!isRowCacheEnabled()) return; RowCacheKey cacheKey = new RowCacheKey(metadata.cfId, key); invalidateCachedRow(cacheKey); } /** * Insert/Update the column family for this key. * Caller is responsible for acquiring Keyspace.switchLock * param @ lock - lock that needs to be used. * param @ key - key for update/insert * param @ columnFamily - columnFamily changes */ public void apply(DecoratedKey key, ColumnFamily columnFamily, SecondaryIndexManager.Updater indexer, OpOrder.Group opGroup, ReplayPosition replayPosition) { long start = System.nanoTime(); Memtable mt = data.getMemtableFor(opGroup, replayPosition); final long timeDelta = mt.put(key, columnFamily, indexer, opGroup); maybeUpdateRowCache(key); metric.samplers.get(Sampler.WRITES).addSample(key.getKey()); metric.writeLatency.addNano(System.nanoTime() - start); if(timeDelta < Long.MAX_VALUE) metric.colUpdateTimeDeltaHistogram.update(timeDelta); } /** * Purges gc-able top-level and range tombstones, returning `cf` if there are any columns or tombstones left, * null otherwise. * @param gcBefore a timestamp (in seconds); tombstones with a localDeletionTime before this will be purged */ public static ColumnFamily removeDeletedCF(ColumnFamily cf, int gcBefore) { // purge old top-level and range tombstones cf.purgeTombstones(gcBefore); // if there are no columns or tombstones left, return null return !cf.hasColumns() && !cf.isMarkedForDelete() ? null : cf; } /** * Removes deleted columns and purges gc-able tombstones. * @return an updated `cf` if any columns or tombstones remain, null otherwise */ public static ColumnFamily removeDeleted(ColumnFamily cf, int gcBefore) { return removeDeleted(cf, gcBefore, SecondaryIndexManager.nullUpdater); } /* This is complicated because we need to preserve deleted columns and columnfamilies until they have been deleted for at least GC_GRACE_IN_SECONDS. But, we do not need to preserve their contents; just the object itself as a "tombstone" that can be used to repair other replicas that do not know about the deletion. */ public static ColumnFamily removeDeleted(ColumnFamily cf, int gcBefore, SecondaryIndexManager.Updater indexer) { if (cf == null) { return null; } return removeDeletedCF(removeDeletedColumnsOnly(cf, gcBefore, indexer), gcBefore); } /** * Removes only per-cell tombstones, cells that are shadowed by a row-level or range tombstone, or * columns that have been dropped from the schema (for CQL3 tables only). * @return the updated ColumnFamily */ public static ColumnFamily removeDeletedColumnsOnly(ColumnFamily cf, int gcBefore, SecondaryIndexManager.Updater indexer) { BatchRemoveIterator<Cell> iter = cf.batchRemoveIterator(); DeletionInfo.InOrderTester tester = cf.inOrderDeletionTester(); boolean hasDroppedColumns = !cf.metadata.getDroppedColumns().isEmpty(); while (iter.hasNext()) { Cell c = iter.next(); // remove columns if // (a) the column itself is gcable or // (b) the column is shadowed by a CF tombstone // (c) the column has been dropped from the CF schema (CQL3 tables only) if (c.getLocalDeletionTime() < gcBefore || tester.isDeleted(c) || (hasDroppedColumns && isDroppedColumn(c, cf.metadata()))) { iter.remove(); indexer.remove(c); } } iter.commit(); return cf; } // returns true if // 1. this column has been dropped from schema and // 2. if it has been re-added since then, this particular column was inserted before the last drop private static boolean isDroppedColumn(Cell c, CFMetaData meta) { Long droppedAt = meta.getDroppedColumns().get(c.name().cql3ColumnName(meta)); return droppedAt != null && c.timestamp() <= droppedAt; } private void removeDroppedColumns(ColumnFamily cf) { if (cf == null || cf.metadata.getDroppedColumns().isEmpty()) return; BatchRemoveIterator<Cell> iter = cf.batchRemoveIterator(); while (iter.hasNext()) if (isDroppedColumn(iter.next(), metadata)) iter.remove(); iter.commit(); } /** * @param sstables * @return sstables whose key range overlaps with that of the given sstables, not including itself. * (The given sstables may or may not overlap with each other.) */ public Collection<SSTableReader> getOverlappingSSTables(Iterable<SSTableReader> sstables) { logger.debug("Checking for sstables overlapping {}", sstables); // a normal compaction won't ever have an empty sstables list, but we create a skeleton // compaction controller for streaming, and that passes an empty list. if (!sstables.iterator().hasNext()) return ImmutableSet.of(); DataTracker.SSTableIntervalTree tree = data.getView().intervalTree; Set<SSTableReader> results = null; for (SSTableReader sstable : sstables) { Set<SSTableReader> overlaps = ImmutableSet.copyOf(tree.search(Interval.<RowPosition, SSTableReader>create(sstable.first, sstable.last))); results = results == null ? overlaps : Sets.union(results, overlaps).immutableCopy(); } results = Sets.difference(results, ImmutableSet.copyOf(sstables)); return results; } /** * like getOverlappingSSTables, but acquires references before returning */ public Refs<SSTableReader> getAndReferenceOverlappingSSTables(Iterable<SSTableReader> sstables) { while (true) { Iterable<SSTableReader> overlapped = getOverlappingSSTables(sstables); Refs<SSTableReader> refs = Refs.tryRef(overlapped); if (refs != null) return refs; } } /* * Called after a BinaryMemtable flushes its in-memory data, or we add a file * via bootstrap. This information is cached in the ColumnFamilyStore. * This is useful for reads because the ColumnFamilyStore first looks in * the in-memory store and the into the disk to find the key. If invoked * during recoveryMode the onMemtableFlush() need not be invoked. * * param @ filename - filename just flushed to disk */ public void addSSTable(SSTableReader sstable) { assert sstable.getColumnFamilyName().equals(name); addSSTables(Arrays.asList(sstable)); } public void addSSTables(Collection<SSTableReader> sstables) { data.addSSTables(sstables); CompactionManager.instance.submitBackground(this); } /** * Calculate expected file size of SSTable after compaction. * * If operation type is {@code CLEANUP} and we're not dealing with an index sstable, * then we calculate expected file size with checking token range to be eliminated. * * Otherwise, we just add up all the files' size, which is the worst case file * size for compaction of all the list of files given. * * @param sstables SSTables to calculate expected compacted file size * @param operation Operation type * @return Expected file size of SSTable after compaction */ public long getExpectedCompactedFileSize(Iterable<SSTableReader> sstables, OperationType operation) { if (operation != OperationType.CLEANUP || isIndex()) { return SSTableReader.getTotalBytes(sstables); } // cleanup size estimation only counts bytes for keys local to this node long expectedFileSize = 0; Collection<Range<Token>> ranges = StorageService.instance.getLocalRanges(keyspace.getName()); for (SSTableReader sstable : sstables) { List<Pair<Long, Long>> positions = sstable.getPositionsForRanges(ranges); for (Pair<Long, Long> position : positions) expectedFileSize += position.right - position.left; } return expectedFileSize; } /* * Find the maximum size file in the list . */ public SSTableReader getMaxSizeFile(Iterable<SSTableReader> sstables) { long maxSize = 0L; SSTableReader maxFile = null; for (SSTableReader sstable : sstables) { if (sstable.onDiskLength() > maxSize) { maxSize = sstable.onDiskLength(); maxFile = sstable; } } return maxFile; } public CompactionManager.AllSSTableOpStatus forceCleanup() throws ExecutionException, InterruptedException { return CompactionManager.instance.performCleanup(ColumnFamilyStore.this); } public CompactionManager.AllSSTableOpStatus scrub(boolean disableSnapshot, boolean skipCorrupted) throws ExecutionException, InterruptedException { // skip snapshot creation during scrub, SEE JIRA 5891 if(!disableSnapshot) snapshotWithoutFlush("pre-scrub-" + System.currentTimeMillis()); return CompactionManager.instance.performScrub(ColumnFamilyStore.this, skipCorrupted); } public CompactionManager.AllSSTableOpStatus sstablesRewrite(boolean excludeCurrentVersion) throws ExecutionException, InterruptedException { return CompactionManager.instance.performSSTableRewrite(ColumnFamilyStore.this, excludeCurrentVersion); } public void markObsolete(Collection<SSTableReader> sstables, OperationType compactionType) { assert !sstables.isEmpty(); data.markObsolete(sstables, compactionType); } void replaceFlushed(Memtable memtable, SSTableReader sstable) { compactionStrategyWrapper.replaceFlushed(memtable, sstable); } public boolean isValid() { return valid; } /** * Package protected for access from the CompactionManager. */ public DataTracker getDataTracker() { return data; } public Collection<SSTableReader> getSSTables() { return data.getSSTables(); } public Set<SSTableReader> getUncompactingSSTables() { return data.getUncompactingSSTables(); } public ColumnFamily getColumnFamily(DecoratedKey key, Composite start, Composite finish, boolean reversed, int limit, long timestamp) { return getColumnFamily(QueryFilter.getSliceFilter(key, name, start, finish, reversed, limit, timestamp)); } /** * Fetch the row and columns given by filter.key if it is in the cache; if not, read it from disk and cache it * * If row is cached, and the filter given is within its bounds, we return from cache, otherwise from disk * * If row is not cached, we figure out what filter is "biggest", read that from disk, then * filter the result and either cache that or return it. * * @param cfId the column family to read the row from * @param filter the columns being queried. * @return the requested data for the filter provided */ private ColumnFamily getThroughCache(UUID cfId, QueryFilter filter) { assert isRowCacheEnabled() : String.format("Row cache is not enabled on table [" + name + "]"); RowCacheKey key = new RowCacheKey(cfId, filter.key); // attempt a sentinel-read-cache sequence. if a write invalidates our sentinel, we'll return our // (now potentially obsolete) data, but won't cache it. see CASSANDRA-3862 // TODO: don't evict entire rows on writes (#2864) IRowCacheEntry cached = CacheService.instance.rowCache.get(key); if (cached != null) { if (cached instanceof RowCacheSentinel) { // Some other read is trying to cache the value, just do a normal non-caching read Tracing.trace("Row cache miss (race)"); metric.rowCacheMiss.inc(); return getTopLevelColumns(filter, Integer.MIN_VALUE); } ColumnFamily cachedCf = (ColumnFamily)cached; if (isFilterFullyCoveredBy(filter.filter, cachedCf, filter.timestamp)) { metric.rowCacheHit.inc(); Tracing.trace("Row cache hit"); return filterColumnFamily(cachedCf, filter); } metric.rowCacheHitOutOfRange.inc(); Tracing.trace("Ignoring row cache as cached value could not satisfy query"); return getTopLevelColumns(filter, Integer.MIN_VALUE); } metric.rowCacheMiss.inc(); Tracing.trace("Row cache miss"); RowCacheSentinel sentinel = new RowCacheSentinel(); boolean sentinelSuccess = CacheService.instance.rowCache.putIfAbsent(key, sentinel); ColumnFamily data = null; ColumnFamily toCache = null; try { // If we are explicitely asked to fill the cache with full partitions, we go ahead and query the whole thing if (metadata.getCaching().rowCache.cacheFullPartitions()) { data = getTopLevelColumns(QueryFilter.getIdentityFilter(filter.key, name, filter.timestamp), Integer.MIN_VALUE); toCache = data; Tracing.trace("Populating row cache with the whole partition"); if (sentinelSuccess && toCache != null) CacheService.instance.rowCache.replace(key, sentinel, toCache); return filterColumnFamily(data, filter); } // Otherwise, if we want to cache the result of the query we're about to do, we must make sure this query // covers what needs to be cached. And if the user filter does not satisfy that, we sometimes extend said // filter so we can populate the cache but only if: // 1) we can guarantee it is a strict extension, i.e. that we will still fetch the data asked by the user. // 2) the extension does not make us query more than getRowsPerPartitionToCache() (as a mean to limit the // amount of extra work we'll do on a user query for the purpose of populating the cache). // // In practice, we can only guarantee those 2 points if the filter is one that queries the head of the // partition (and if that filter actually counts CQL3 rows since that's what we cache and it would be // bogus to compare the filter count to the 'rows to cache' otherwise). if (filter.filter.isHeadFilter() && filter.filter.countCQL3Rows(metadata.comparator)) { SliceQueryFilter sliceFilter = (SliceQueryFilter)filter.filter; int rowsToCache = metadata.getCaching().rowCache.rowsToCache; SliceQueryFilter cacheSlice = readFilterForCache(); QueryFilter cacheFilter = new QueryFilter(filter.key, name, cacheSlice, filter.timestamp); // If the filter count is less than the number of rows cached, we simply extend it to make sure we do cover the // number of rows to cache, and if that count is greater than the number of rows to cache, we simply filter what // needs to be cached afterwards. if (sliceFilter.count < rowsToCache) { toCache = getTopLevelColumns(cacheFilter, Integer.MIN_VALUE); if (toCache != null) { Tracing.trace("Populating row cache ({} rows cached)", cacheSlice.lastCounted()); data = filterColumnFamily(toCache, filter); } } else { data = getTopLevelColumns(filter, Integer.MIN_VALUE); if (data != null) { // The filter limit was greater than the number of rows to cache. But, if the filter had a non-empty // finish bound, we may have gotten less than what needs to be cached, in which case we shouldn't cache it // (otherwise a cache hit would assume the whole partition is cached which is not the case). if (sliceFilter.finish().isEmpty() || sliceFilter.lastCounted() >= rowsToCache) { toCache = filterColumnFamily(data, cacheFilter); Tracing.trace("Caching {} rows (out of {} requested)", cacheSlice.lastCounted(), sliceFilter.count); } else { Tracing.trace("Not populating row cache, not enough rows fetched ({} fetched but {} required for the cache)", sliceFilter.lastCounted(), rowsToCache); } } } if (sentinelSuccess && toCache != null) CacheService.instance.rowCache.replace(key, sentinel, toCache); return data; } else { Tracing.trace("Fetching data but not populating cache as query does not query from the start of the partition"); return getTopLevelColumns(filter, Integer.MIN_VALUE); } } finally { if (sentinelSuccess && toCache == null) invalidateCachedRow(key); } } public SliceQueryFilter readFilterForCache() { // We create a new filter everytime before for now SliceQueryFilter is unfortunatly mutable. return new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, false, metadata.getCaching().rowCache.rowsToCache, metadata.clusteringColumns().size()); } public boolean isFilterFullyCoveredBy(IDiskAtomFilter filter, ColumnFamily cachedCf, long now) { // We can use the cached value only if we know that no data it doesn't contain could be covered // by the query filter, that is if: // 1) either the whole partition is cached // 2) or we can ensure than any data the filter selects are in the cached partition // When counting rows to decide if the whole row is cached, we should be careful with expiring // columns: if we use a timestamp newer than the one that was used when populating the cache, we might // end up deciding the whole partition is cached when it's really not (just some rows expired since the // cf was cached). This is the reason for Integer.MIN_VALUE below. boolean wholePartitionCached = cachedCf.liveCQL3RowCount(Integer.MIN_VALUE) < metadata.getCaching().rowCache.rowsToCache; // Contrarily to the "wholePartitionCached" check above, we do want isFullyCoveredBy to take the // timestamp of the query into account when dealing with expired columns. Otherwise, we could think // the cached partition has enough live rows to satisfy the filter when it doesn't because some // are now expired. return wholePartitionCached || filter.isFullyCoveredBy(cachedCf, now); } public int gcBefore(long now) { return (int) (now / 1000) - metadata.getGcGraceSeconds(); } /** * get a list of columns starting from a given column, in a specified order. * only the latest version of a column is returned. * @return null if there is no data and no tombstones; otherwise a ColumnFamily */ public ColumnFamily getColumnFamily(QueryFilter filter) { assert name.equals(filter.getColumnFamilyName()) : filter.getColumnFamilyName(); ColumnFamily result = null; long start = System.nanoTime(); try { int gcBefore = gcBefore(filter.timestamp); if (isRowCacheEnabled()) { assert !isIndex(); // CASSANDRA-5732 UUID cfId = metadata.cfId; ColumnFamily cached = getThroughCache(cfId, filter); if (cached == null) { logger.trace("cached row is empty"); return null; } result = cached; } else { ColumnFamily cf = getTopLevelColumns(filter, gcBefore); if (cf == null) return null; result = removeDeletedCF(cf, gcBefore); } removeDroppedColumns(result); if (filter.filter instanceof SliceQueryFilter) { // Log the number of tombstones scanned on single key queries metric.tombstoneScannedHistogram.update(((SliceQueryFilter) filter.filter).lastIgnored()); metric.liveScannedHistogram.update(((SliceQueryFilter) filter.filter).lastLive()); } } finally { metric.readLatency.addNano(System.nanoTime() - start); } return result; } /** * Filter a cached row, which will not be modified by the filter, but may be modified by throwing out * tombstones that are no longer relevant. * The returned column family won't be thread safe. */ ColumnFamily filterColumnFamily(ColumnFamily cached, QueryFilter filter) { if (cached == null) return null; ColumnFamily cf = cached.cloneMeShallow(ArrayBackedSortedColumns.factory, filter.filter.isReversed()); int gcBefore = gcBefore(filter.timestamp); filter.collateOnDiskAtom(cf, filter.getIterator(cached), gcBefore); return removeDeletedCF(cf, gcBefore); } public Set<SSTableReader> getUnrepairedSSTables() { Set<SSTableReader> unRepairedSSTables = new HashSet<>(getSSTables()); Iterator<SSTableReader> sstableIterator = unRepairedSSTables.iterator(); while(sstableIterator.hasNext()) { SSTableReader sstable = sstableIterator.next(); if (sstable.isRepaired()) sstableIterator.remove(); } return unRepairedSSTables; } public Set<SSTableReader> getRepairedSSTables() { Set<SSTableReader> repairedSSTables = new HashSet<>(getSSTables()); Iterator<SSTableReader> sstableIterator = repairedSSTables.iterator(); while(sstableIterator.hasNext()) { SSTableReader sstable = sstableIterator.next(); if (!sstable.isRepaired()) sstableIterator.remove(); } return repairedSSTables; } public RefViewFragment selectAndReference(Function<DataTracker.View, List<SSTableReader>> filter) { while (true) { ViewFragment view = select(filter); Refs<SSTableReader> refs = Refs.tryRef(view.sstables); if (refs != null) return new RefViewFragment(view.sstables, view.memtables, refs); } } public ViewFragment select(Function<DataTracker.View, List<SSTableReader>> filter) { DataTracker.View view = data.getView(); List<SSTableReader> sstables = view.intervalTree.isEmpty() ? Collections.<SSTableReader>emptyList() : filter.apply(view); return new ViewFragment(sstables, view.getAllMemtables()); } /** * @return a ViewFragment containing the sstables and memtables that may need to be merged * for the given @param key, according to the interval tree */ public Function<DataTracker.View, List<SSTableReader>> viewFilter(final DecoratedKey key) { assert !key.isMinimum(); return new Function<DataTracker.View, List<SSTableReader>>() { public List<SSTableReader> apply(DataTracker.View view) { return compactionStrategyWrapper.filterSSTablesForReads(view.intervalTree.search(key)); } }; } /** * @return a ViewFragment containing the sstables and memtables that may need to be merged * for rows within @param rowBounds, inclusive, according to the interval tree. */ public Function<DataTracker.View, List<SSTableReader>> viewFilter(final AbstractBounds<RowPosition> rowBounds) { return new Function<DataTracker.View, List<SSTableReader>>() { public List<SSTableReader> apply(DataTracker.View view) { return compactionStrategyWrapper.filterSSTablesForReads(view.sstablesInBounds(rowBounds)); } }; } /** * @return a ViewFragment containing the sstables and memtables that may need to be merged * for rows for all of @param rowBoundsCollection, inclusive, according to the interval tree. */ public Function<DataTracker.View, List<SSTableReader>> viewFilter(final Collection<AbstractBounds<RowPosition>> rowBoundsCollection) { return new Function<DataTracker.View, List<SSTableReader>>() { public List<SSTableReader> apply(DataTracker.View view) { Set<SSTableReader> sstables = Sets.newHashSet(); for (AbstractBounds<RowPosition> rowBounds : rowBoundsCollection) sstables.addAll(view.sstablesInBounds(rowBounds)); return ImmutableList.copyOf(sstables); } }; } public List<String> getSSTablesForKey(String key) { DecoratedKey dk = partitioner.decorateKey(metadata.getKeyValidator().fromString(key)); try (OpOrder.Group op = readOrdering.start()) { List<String> files = new ArrayList<>(); for (SSTableReader sstr : select(viewFilter(dk)).sstables) { // check if the key actually exists in this sstable, without updating cache and stats if (sstr.getPosition(dk, SSTableReader.Operator.EQ, false) != null) files.add(sstr.getFilename()); } return files; } } public ColumnFamily getTopLevelColumns(QueryFilter filter, int gcBefore) { Tracing.trace("Executing single-partition query on {}", name); CollationController controller = new CollationController(this, filter, gcBefore); ColumnFamily columns; try (OpOrder.Group op = readOrdering.start()) { columns = controller.getTopLevelColumns(Memtable.MEMORY_POOL.needToCopyOnHeap()); } if (columns != null) metric.samplers.get(Sampler.READS).addSample(filter.key.getKey()); metric.updateSSTableIterated(controller.getSstablesIterated()); return columns; } public void beginLocalSampling(String sampler, int capacity) { metric.samplers.get(Sampler.valueOf(sampler)).beginSampling(capacity); } public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException { SamplerResult<ByteBuffer> samplerResults = metric.samplers.get(Sampler.valueOf(sampler)) .finishSampling(count); TabularDataSupport result = new TabularDataSupport(COUNTER_TYPE); for (Counter<ByteBuffer> counter : samplerResults.topK) { byte[] key = counter.getItem().array(); result.put(new CompositeDataSupport(COUNTER_COMPOSITE_TYPE, COUNTER_NAMES, new Object[] { Hex.bytesToHex(key), // raw counter.getCount(), // count counter.getError(), // error metadata.getKeyValidator().getString(ByteBuffer.wrap(key)) })); // string } return new CompositeDataSupport(SAMPLING_RESULT, SAMPLER_NAMES, new Object[]{ samplerResults.cardinality, result}); } public void cleanupCache() { Collection<Range<Token>> ranges = StorageService.instance.getLocalRanges(keyspace.getName()); for (Iterator<RowCacheKey> keyIter = CacheService.instance.rowCache.keyIterator(); keyIter.hasNext(); ) { RowCacheKey key = keyIter.next(); DecoratedKey dk = partitioner.decorateKey(ByteBuffer.wrap(key.key)); if (key.cfId.equals(metadata.cfId) && !Range.isInRanges(dk.getToken(), ranges)) invalidateCachedRow(dk); } if (metadata.isCounter()) { for (Iterator<CounterCacheKey> keyIter = CacheService.instance.counterCache.keyIterator(); keyIter.hasNext(); ) { CounterCacheKey key = keyIter.next(); DecoratedKey dk = partitioner.decorateKey(ByteBuffer.wrap(key.partitionKey)); if (key.cfId.equals(metadata.cfId) && !Range.isInRanges(dk.getToken(), ranges)) CacheService.instance.counterCache.remove(key); } } } public static abstract class AbstractScanIterator extends AbstractIterator<Row> implements CloseableIterator<Row> { public boolean needsFiltering() { return true; } } /** * Iterate over a range of rows and columns from memtables/sstables. * * @param range The range of keys and columns within those keys to fetch */ private AbstractScanIterator getSequentialIterator(final DataRange range, long now) { assert !(range.keyRange() instanceof Range) || !((Range)range.keyRange()).isWrapAround() || range.keyRange().right.isMinimum() : range.keyRange(); final ViewFragment view = select(viewFilter(range.keyRange())); Tracing.trace("Executing seq scan across {} sstables for {}", view.sstables.size(), range.keyRange().getString(metadata.getKeyValidator())); final CloseableIterator<Row> iterator = RowIteratorFactory.getIterator(view.memtables, view.sstables, range, this, now); // todo this could be pushed into SSTableScanner return new AbstractScanIterator() { protected Row computeNext() { // pull a row out of the iterator if (!iterator.hasNext()) return endOfData(); Row current = iterator.next(); DecoratedKey key = current.key; if (!range.stopKey().isMinimum() && range.stopKey().compareTo(key) < 0) return endOfData(); // skipping outside of assigned range if (!range.contains(key)) return computeNext(); if (logger.isTraceEnabled()) logger.trace("scanned {}", metadata.getKeyValidator().getString(key.getKey())); return current; } public void close() throws IOException { iterator.close(); } }; } @VisibleForTesting public List<Row> getRangeSlice(final AbstractBounds<RowPosition> range, List<IndexExpression> rowFilter, IDiskAtomFilter columnFilter, int maxResults) { return getRangeSlice(range, rowFilter, columnFilter, maxResults, System.currentTimeMillis()); } public List<Row> getRangeSlice(final AbstractBounds<RowPosition> range, List<IndexExpression> rowFilter, IDiskAtomFilter columnFilter, int maxResults, long now) { return getRangeSlice(makeExtendedFilter(range, columnFilter, rowFilter, maxResults, false, false, now)); } /** * Allows generic range paging with the slice column filter. * Typically, suppose we have rows A, B, C ... Z having each some columns in [1, 100]. * And suppose we want to page through the query that for all rows returns the columns * within [25, 75]. For that, we need to be able to do a range slice starting at (row r, column c) * and ending at (row Z, column 75), *but* that only return columns in [25, 75]. * That is what this method allows. The columnRange is the "window" of columns we are interested * in each row, and columnStart (resp. columnEnd) is the start (resp. end) for the first * (resp. last) requested row. */ public ExtendedFilter makeExtendedFilter(AbstractBounds<RowPosition> keyRange, SliceQueryFilter columnRange, Composite columnStart, Composite columnStop, List<IndexExpression> rowFilter, int maxResults, boolean countCQL3Rows, long now) { DataRange dataRange = new DataRange.Paging(keyRange, columnRange, columnStart, columnStop, metadata.comparator); return ExtendedFilter.create(this, dataRange, rowFilter, maxResults, countCQL3Rows, now); } public List<Row> getRangeSlice(AbstractBounds<RowPosition> range, List<IndexExpression> rowFilter, IDiskAtomFilter columnFilter, int maxResults, long now, boolean countCQL3Rows, boolean isPaging) { return getRangeSlice(makeExtendedFilter(range, columnFilter, rowFilter, maxResults, countCQL3Rows, isPaging, now)); } public ExtendedFilter makeExtendedFilter(AbstractBounds<RowPosition> range, IDiskAtomFilter columnFilter, List<IndexExpression> rowFilter, int maxResults, boolean countCQL3Rows, boolean isPaging, long timestamp) { DataRange dataRange; if (isPaging) { assert columnFilter instanceof SliceQueryFilter; SliceQueryFilter sfilter = (SliceQueryFilter)columnFilter; assert sfilter.slices.length == 1; // create a new SliceQueryFilter that selects all cells, but pass the original slice start and finish // through to DataRange.Paging to be used on the first and last partitions SliceQueryFilter newFilter = new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, sfilter.isReversed(), sfilter.count); dataRange = new DataRange.Paging(range, newFilter, sfilter.start(), sfilter.finish(), metadata.comparator); } else { dataRange = new DataRange(range, columnFilter); } return ExtendedFilter.create(this, dataRange, rowFilter, maxResults, countCQL3Rows, timestamp); } public List<Row> getRangeSlice(ExtendedFilter filter) { long start = System.nanoTime(); try (OpOrder.Group op = readOrdering.start()) { return filter(getSequentialIterator(filter.dataRange, filter.timestamp), filter); } finally { metric.rangeLatency.addNano(System.nanoTime() - start); } } @VisibleForTesting public List<Row> search(AbstractBounds<RowPosition> range, List<IndexExpression> clause, IDiskAtomFilter dataFilter, int maxResults) { return search(range, clause, dataFilter, maxResults, System.currentTimeMillis()); } public List<Row> search(AbstractBounds<RowPosition> range, List<IndexExpression> clause, IDiskAtomFilter dataFilter, int maxResults, long now) { return search(makeExtendedFilter(range, dataFilter, clause, maxResults, false, false, now)); } public List<Row> search(ExtendedFilter filter) { Tracing.trace("Executing indexed scan for {}", filter.dataRange.keyRange().getString(metadata.getKeyValidator())); return indexManager.search(filter); } public List<Row> filter(AbstractScanIterator rowIterator, ExtendedFilter filter) { logger.trace("Filtering {} for rows matching {}", rowIterator, filter); List<Row> rows = new ArrayList<Row>(); int columnsCount = 0; int total = 0, matched = 0; boolean ignoreTombstonedPartitions = filter.ignoreTombstonedPartitions(); try { while (rowIterator.hasNext() && matched < filter.maxRows() && columnsCount < filter.maxColumns()) { // get the raw columns requested, and additional columns for the expressions if necessary Row rawRow = rowIterator.next(); total++; ColumnFamily data = rawRow.cf; if (rowIterator.needsFiltering()) { IDiskAtomFilter extraFilter = filter.getExtraFilter(rawRow.key, data); if (extraFilter != null) { ColumnFamily cf = filter.cfs.getColumnFamily(new QueryFilter(rawRow.key, name, extraFilter, filter.timestamp)); if (cf != null) data.addAll(cf); } removeDroppedColumns(data); if (!filter.isSatisfiedBy(rawRow.key, data, null, null)) continue; logger.trace("{} satisfies all filter expressions", data); // cut the resultset back to what was requested, if necessary data = filter.prune(rawRow.key, data); } else { removeDroppedColumns(data); } rows.add(new Row(rawRow.key, data)); if (!ignoreTombstonedPartitions || !data.hasOnlyTombstones(filter.timestamp)) matched++; if (data != null) columnsCount += filter.lastCounted(data); // Update the underlying filter to avoid querying more columns per slice than necessary and to handle paging filter.updateFilter(columnsCount); } return rows; } finally { try { rowIterator.close(); Tracing.trace("Scanned {} rows and matched {}", total, matched); } catch (IOException e) { throw new RuntimeException(e); } } } public CellNameType getComparator() { return metadata.comparator; } public void snapshotWithoutFlush(String snapshotName) { snapshotWithoutFlush(snapshotName, null); } public Set<SSTableReader> snapshotWithoutFlush(String snapshotName, Predicate<SSTableReader> predicate) { Set<SSTableReader> snapshottedSSTables = new HashSet<>(); for (ColumnFamilyStore cfs : concatWithIndexes()) { final JSONArray filesJSONArr = new JSONArray(); try (RefViewFragment currentView = cfs.selectAndReference(ALL_SSTABLES)) { for (SSTableReader ssTable : currentView.sstables) { if (ssTable.openReason == SSTableReader.OpenReason.EARLY || (predicate != null && !predicate.apply(ssTable))) { continue; } File snapshotDirectory = Directories.getSnapshotDirectory(ssTable.descriptor, snapshotName); ssTable.createLinks(snapshotDirectory.getPath()); // hard links filesJSONArr.add(ssTable.descriptor.relativeFilenameFor(Component.DATA)); if (logger.isDebugEnabled()) logger.debug("Snapshot for {} keyspace data file {} created in {}", keyspace, ssTable.getFilename(), snapshotDirectory); snapshottedSSTables.add(ssTable); } writeSnapshotManifest(filesJSONArr, snapshotName); } } return snapshottedSSTables; } private void writeSnapshotManifest(final JSONArray filesJSONArr, final String snapshotName) { final File manifestFile = directories.getSnapshotManifestFile(snapshotName); try { if (!manifestFile.getParentFile().exists()) manifestFile.getParentFile().mkdirs(); try (PrintStream out = new PrintStream(manifestFile)) { final JSONObject manifestJSON = new JSONObject(); manifestJSON.put("files", filesJSONArr); out.println(manifestJSON.toJSONString()); } } catch (IOException e) { throw new FSWriteError(e, manifestFile); } } public Refs<SSTableReader> getSnapshotSSTableReader(String tag) throws IOException { Map<Integer, SSTableReader> active = new HashMap<>(); for (SSTableReader sstable : data.getView().sstables) active.put(sstable.descriptor.generation, sstable); Map<Descriptor, Set<Component>> snapshots = directories.sstableLister().snapshots(tag).list(); Refs<SSTableReader> refs = new Refs<>(); try { for (Map.Entry<Descriptor, Set<Component>> entries : snapshots.entrySet()) { // Try acquire reference to an active sstable instead of snapshot if it exists, // to avoid opening new sstables. If it fails, use the snapshot reference instead. SSTableReader sstable = active.get(entries.getKey().generation); if (sstable == null || !refs.tryRef(sstable)) { if (logger.isDebugEnabled()) logger.debug("using snapshot sstable {}", entries.getKey()); sstable = SSTableReader.open(entries.getKey(), entries.getValue(), metadata, partitioner); // This is technically not necessary since it's a snapshot but makes things easier refs.tryRef(sstable); } else if (logger.isDebugEnabled()) { logger.debug("using active sstable {}", entries.getKey()); } } } catch (IOException | RuntimeException e) { // In case one of the snapshot sstables fails to open, // we must release the references to the ones we opened so far refs.release(); throw e; } return refs; } /** * Take a snap shot of this columnfamily store. * * @param snapshotName the name of the associated with the snapshot */ public Set<SSTableReader> snapshot(String snapshotName) { return snapshot(snapshotName, null); } public Set<SSTableReader> snapshot(String snapshotName, Predicate<SSTableReader> predicate) { forceBlockingFlush(); return snapshotWithoutFlush(snapshotName, predicate); } public boolean snapshotExists(String snapshotName) { return directories.snapshotExists(snapshotName); } public long getSnapshotCreationTime(String snapshotName) { return directories.snapshotCreationTime(snapshotName); } /** * Clear all the snapshots for a given column family. * * @param snapshotName the user supplied snapshot name. If left empty, * all the snapshots will be cleaned. */ public void clearSnapshot(String snapshotName) { List<File> snapshotDirs = directories.getCFDirectories(); Directories.clearSnapshot(snapshotName, snapshotDirs); } /** * * @return Return a map of all snapshots to space being used * The pair for a snapshot has true size and size on disk. */ public Map<String, Pair<Long,Long>> getSnapshotDetails() { return directories.getSnapshotDetails(); } public boolean hasUnreclaimedSpace() { return metric.liveDiskSpaceUsed.getCount() < metric.totalDiskSpaceUsed.getCount(); } /** * @return the cached row for @param key if it is already present in the cache. * That is, unlike getThroughCache, it will not readAndCache the row if it is not present, nor * are these calls counted in cache statistics. * * Note that this WILL cause deserialization of a SerializingCache row, so if all you * need to know is whether a row is present or not, use containsCachedRow instead. */ public ColumnFamily getRawCachedRow(DecoratedKey key) { if (!isRowCacheEnabled()) return null; IRowCacheEntry cached = CacheService.instance.rowCache.getInternal(new RowCacheKey(metadata.cfId, key)); return cached == null || cached instanceof RowCacheSentinel ? null : (ColumnFamily)cached; } private void invalidateCaches() { CacheService.instance.invalidateKeyCacheForCf(metadata.cfId); CacheService.instance.invalidateRowCacheForCf(metadata.cfId); if (metadata.isCounter()) CacheService.instance.invalidateCounterCacheForCf(metadata.cfId); } /** * @return true if @param key is contained in the row cache */ public boolean containsCachedRow(DecoratedKey key) { return CacheService.instance.rowCache.getCapacity() != 0 && CacheService.instance.rowCache.containsKey(new RowCacheKey(metadata.cfId, key)); } public void invalidateCachedRow(RowCacheKey key) { CacheService.instance.rowCache.remove(key); } public void invalidateCachedRow(DecoratedKey key) { UUID cfId = Schema.instance.getId(keyspace.getName(), this.name); if (cfId == null) return; // secondary index invalidateCachedRow(new RowCacheKey(cfId, key)); } public ClockAndCount getCachedCounter(ByteBuffer partitionKey, CellName cellName) { if (CacheService.instance.counterCache.getCapacity() == 0L) // counter cache disabled. return null; return CacheService.instance.counterCache.get(CounterCacheKey.create(metadata.cfId, partitionKey, cellName)); } public void putCachedCounter(ByteBuffer partitionKey, CellName cellName, ClockAndCount clockAndCount) { if (CacheService.instance.counterCache.getCapacity() == 0L) // counter cache disabled. return; CacheService.instance.counterCache.put(CounterCacheKey.create(metadata.cfId, partitionKey, cellName), clockAndCount); } public void forceMajorCompaction() throws InterruptedException, ExecutionException { CompactionManager.instance.performMaximal(this); } public static Iterable<ColumnFamilyStore> all() { List<Iterable<ColumnFamilyStore>> stores = new ArrayList<Iterable<ColumnFamilyStore>>(Schema.instance.getKeyspaces().size()); for (Keyspace keyspace : Keyspace.all()) { stores.add(keyspace.getColumnFamilyStores()); } return Iterables.concat(stores); } public Iterable<DecoratedKey> keySamples(Range<Token> range) { try (RefViewFragment view = selectAndReference(ALL_SSTABLES)) { Iterable<DecoratedKey>[] samples = new Iterable[view.sstables.size()]; int i = 0; for (SSTableReader sstable: view.sstables) { samples[i++] = sstable.getKeySamples(range); } return Iterables.concat(samples); } } public long estimatedKeysForRange(Range<Token> range) { try (RefViewFragment view = selectAndReference(ALL_SSTABLES)) { long count = 0; for (SSTableReader sstable : view.sstables) count += sstable.estimatedKeysForRanges(Collections.singleton(range)); return count; } } /** * For testing. No effort is made to clear historical or even the current memtables, nor for * thread safety. All we do is wipe the sstable containers clean, while leaving the actual * data files present on disk. (This allows tests to easily call loadNewSSTables on them.) */ public void clearUnsafe() { for (final ColumnFamilyStore cfs : concatWithIndexes()) { cfs.runWithCompactionsDisabled(new Callable<Void>() { public Void call() { cfs.data.init(); return null; } }, true); } } /** * Truncate deletes the entire column family's data with no expensive tombstone creation */ public void truncateBlocking() { // We have two goals here: // - truncate should delete everything written before truncate was invoked // - but not delete anything that isn't part of the snapshot we create. // We accomplish this by first flushing manually, then snapshotting, and // recording the timestamp IN BETWEEN those actions. Any sstables created // with this timestamp or greater time, will not be marked for delete. // // Bonus complication: since we store replay position in sstable metadata, // truncating those sstables means we will replay any CL segments from the // beginning if we restart before they [the CL segments] are discarded for // normal reasons post-truncate. To prevent this, we store truncation // position in the System keyspace. logger.debug("truncating {}", name); if (keyspace.metadata.durableWrites || DatabaseDescriptor.isAutoSnapshot()) { // flush the CF being truncated before forcing the new segment forceBlockingFlush(); // sleep a little to make sure that our truncatedAt comes after any sstable // that was part of the flushed we forced; otherwise on a tie, it won't get deleted. Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS); } else { // just nuke the memtable data w/o writing to disk first synchronized (data) { final Flush flush = new Flush(true); flushExecutor.execute(flush); postFlushExecutor.submit(flush.postFlush); } } Runnable truncateRunnable = new Runnable() { public void run() { logger.debug("Discarding sstable data for truncated CF + indexes"); final long truncatedAt = System.currentTimeMillis(); data.notifyTruncated(truncatedAt); if (DatabaseDescriptor.isAutoSnapshot()) snapshot(Keyspace.getTimestampedSnapshotName(name)); ReplayPosition replayAfter = discardSSTables(truncatedAt); for (SecondaryIndex index : indexManager.getIndexes()) index.truncateBlocking(truncatedAt); SystemKeyspace.saveTruncationRecord(ColumnFamilyStore.this, truncatedAt, replayAfter); logger.debug("cleaning out row cache"); invalidateCaches(); } }; runWithCompactionsDisabled(Executors.callable(truncateRunnable), true); logger.debug("truncate complete"); } public <V> V runWithCompactionsDisabled(Callable<V> callable, boolean interruptValidation) { // synchronize so that concurrent invocations don't re-enable compactions partway through unexpectedly, // and so we only run one major compaction at a time synchronized (this) { logger.debug("Cancelling in-progress compactions for {}", metadata.cfName); Iterable<ColumnFamilyStore> selfWithIndexes = concatWithIndexes(); for (ColumnFamilyStore cfs : selfWithIndexes) cfs.getCompactionStrategy().pause(); try { // interrupt in-progress compactions Function<ColumnFamilyStore, CFMetaData> f = new Function<ColumnFamilyStore, CFMetaData>() { public CFMetaData apply(ColumnFamilyStore cfs) { return cfs.metadata; } }; Iterable<CFMetaData> allMetadata = Iterables.transform(selfWithIndexes, f); CompactionManager.instance.interruptCompactionFor(allMetadata, interruptValidation); // wait for the interruption to be recognized long start = System.nanoTime(); long delay = TimeUnit.MINUTES.toNanos(1); while (System.nanoTime() - start < delay) { if (CompactionManager.instance.isCompacting(selfWithIndexes)) Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); else break; } // doublecheck that we finished, instead of timing out for (ColumnFamilyStore cfs : selfWithIndexes) { if (!cfs.getDataTracker().getCompacting().isEmpty()) { logger.warn("Unable to cancel in-progress compactions for {}. Perhaps there is an unusually large row in progress somewhere, or the system is simply overloaded.", metadata.cfName); return null; } } logger.debug("Compactions successfully cancelled"); // run our task try { return callable.call(); } catch (Exception e) { throw new RuntimeException(e); } } finally { for (ColumnFamilyStore cfs : selfWithIndexes) cfs.getCompactionStrategy().resume(); } } } public Iterable<SSTableReader> markAllCompacting() { Callable<Iterable<SSTableReader>> callable = new Callable<Iterable<SSTableReader>>() { public Iterable<SSTableReader> call() throws Exception { assert data.getCompacting().isEmpty() : data.getCompacting(); Iterable<SSTableReader> sstables = Lists.newArrayList(AbstractCompactionStrategy.filterSuspectSSTables(getSSTables())); if (Iterables.isEmpty(sstables)) return Collections.emptyList(); boolean success = data.markCompacting(sstables); assert success : "something marked things compacting while compactions are disabled"; return sstables; } }; return runWithCompactionsDisabled(callable, false); } @Override public String toString() { return "CFS(" + "Keyspace='" + keyspace.getName() + '\'' + ", ColumnFamily='" + name + '\'' + ')'; } public void disableAutoCompaction() { // we don't use CompactionStrategy.pause since we don't want users flipping that on and off // during runWithCompactionsDisabled this.compactionStrategyWrapper.disable(); } public void enableAutoCompaction() { enableAutoCompaction(false); } /** * used for tests - to be able to check things after a minor compaction * @param waitForFutures if we should block until autocompaction is done */ @VisibleForTesting public void enableAutoCompaction(boolean waitForFutures) { this.compactionStrategyWrapper.enable(); List<Future<?>> futures = CompactionManager.instance.submitBackground(this); if (waitForFutures) FBUtilities.waitOnFutures(futures); } public boolean isAutoCompactionDisabled() { return !this.compactionStrategyWrapper.isEnabled(); } /* JMX getters and setters for the Default<T>s. - get/set minCompactionThreshold - get/set maxCompactionThreshold - get memsize - get memops - get/set memtime */ public AbstractCompactionStrategy getCompactionStrategy() { return compactionStrategyWrapper; } public void setCompactionThresholds(int minThreshold, int maxThreshold) { validateCompactionThresholds(minThreshold, maxThreshold); minCompactionThreshold.set(minThreshold); maxCompactionThreshold.set(maxThreshold); CompactionManager.instance.submitBackground(this); } public int getMinimumCompactionThreshold() { return minCompactionThreshold.value(); } public void setMinimumCompactionThreshold(int minCompactionThreshold) { validateCompactionThresholds(minCompactionThreshold, maxCompactionThreshold.value()); this.minCompactionThreshold.set(minCompactionThreshold); } public int getMaximumCompactionThreshold() { return maxCompactionThreshold.value(); } public void setMaximumCompactionThreshold(int maxCompactionThreshold) { validateCompactionThresholds(minCompactionThreshold.value(), maxCompactionThreshold); this.maxCompactionThreshold.set(maxCompactionThreshold); } private void validateCompactionThresholds(int minThreshold, int maxThreshold) { if (minThreshold > maxThreshold) throw new RuntimeException(String.format("The min_compaction_threshold cannot be larger than the max_compaction_threshold. " + "Min is '%d', Max is '%d'.", minThreshold, maxThreshold)); if (maxThreshold == 0 || minThreshold == 0) throw new RuntimeException("Disabling compaction by setting min_compaction_threshold or max_compaction_threshold to 0 " + "is deprecated, set the compaction strategy option 'enabled' to 'false' instead or use the nodetool command 'disableautocompaction'."); } // End JMX get/set. public int getMeanColumns() { return data.getMeanColumns(); } public long estimateKeys() { return data.estimatedKeys(); } /** true if this CFS contains secondary index data */ public boolean isIndex() { return partitioner instanceof LocalPartitioner; } public Iterable<ColumnFamilyStore> concatWithIndexes() { // we return the main CFS first, which we rely on for simplicity in switchMemtable(), for getting the // latest replay position return Iterables.concat(Collections.singleton(this), indexManager.getIndexesBackedByCfs()); } public List<String> getBuiltIndexes() { return indexManager.getBuiltIndexes(); } public int getUnleveledSSTables() { return this.compactionStrategyWrapper.getUnleveledSSTables(); } public int[] getSSTableCountPerLevel() { return compactionStrategyWrapper.getSSTableCountPerLevel(); } public static class ViewFragment { public final List<SSTableReader> sstables; public final Iterable<Memtable> memtables; public ViewFragment(List<SSTableReader> sstables, Iterable<Memtable> memtables) { this.sstables = sstables; this.memtables = memtables; } } public static class RefViewFragment extends ViewFragment implements AutoCloseable { public final Refs<SSTableReader> refs; public RefViewFragment(List<SSTableReader> sstables, Iterable<Memtable> memtables, Refs<SSTableReader> refs) { super(sstables, memtables); this.refs = refs; } public void release() { refs.release(); } public void close() { refs.release(); } } /** * Returns the creation time of the oldest memtable not fully flushed yet. */ public long oldestUnflushedMemtable() { return data.getView().getOldestMemtable().creationTime(); } public boolean isEmpty() { DataTracker.View view = data.getView(); return view.sstables.isEmpty() && view.getCurrentMemtable().getOperations() == 0 && view.getCurrentMemtable() == view.getOldestMemtable(); } private boolean isRowCacheEnabled() { return metadata.getCaching().rowCache.isEnabled() && CacheService.instance.rowCache.getCapacity() > 0; } /** * Discard all SSTables that were created before given timestamp. * * Caller should first ensure that comapctions have quiesced. * * @param truncatedAt The timestamp of the truncation * (all SSTables before that timestamp are going be marked as compacted) * * @return the most recent replay position of the truncated data */ public ReplayPosition discardSSTables(long truncatedAt) { assert data.getCompacting().isEmpty() : data.getCompacting(); List<SSTableReader> truncatedSSTables = new ArrayList<>(); for (SSTableReader sstable : getSSTables()) { if (!sstable.newSince(truncatedAt)) truncatedSSTables.add(sstable); } if (truncatedSSTables.isEmpty()) return ReplayPosition.NONE; markObsolete(truncatedSSTables, OperationType.UNKNOWN); return ReplayPosition.getReplayPosition(truncatedSSTables); } public double getDroppableTombstoneRatio() { return getDataTracker().getDroppableTombstoneRatio(); } public long trueSnapshotsSize() { return directories.trueSnapshotsSize(); } @VisibleForTesting void resetFileIndexGenerator() { fileIndexGenerator.set(0); } public static final Function<DataTracker.View, List<SSTableReader>> ALL_SSTABLES = new Function<DataTracker.View, List<SSTableReader>>() { public List<SSTableReader> apply(DataTracker.View view) { return new ArrayList<>(view.sstables); } }; }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-robomaker/src/main/java/com/amazonaws/services/robomaker/model/transform/CreateFleetRequestMarshaller.java
2275
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.robomaker.model.transform; import java.util.Map; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.robomaker.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * CreateFleetRequestMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class CreateFleetRequestMarshaller { private static final MarshallingInfo<String> NAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("name").build(); private static final MarshallingInfo<Map> TAGS_BINDING = MarshallingInfo.builder(MarshallingType.MAP).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("tags").build(); private static final CreateFleetRequestMarshaller instance = new CreateFleetRequestMarshaller(); public static CreateFleetRequestMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(CreateFleetRequest createFleetRequest, ProtocolMarshaller protocolMarshaller) { if (createFleetRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(createFleetRequest.getName(), NAME_BINDING); protocolMarshaller.marshall(createFleetRequest.getTags(), TAGS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-transcribe/src/main/java/com/amazonaws/services/transcribe/model/transform/UpdateMedicalVocabularyRequestMarshaller.java
2807
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.transcribe.model.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.transcribe.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * UpdateMedicalVocabularyRequestMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class UpdateMedicalVocabularyRequestMarshaller { private static final MarshallingInfo<String> VOCABULARYNAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("VocabularyName").build(); private static final MarshallingInfo<String> LANGUAGECODE_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("LanguageCode").build(); private static final MarshallingInfo<String> VOCABULARYFILEURI_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("VocabularyFileUri").build(); private static final UpdateMedicalVocabularyRequestMarshaller instance = new UpdateMedicalVocabularyRequestMarshaller(); public static UpdateMedicalVocabularyRequestMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(UpdateMedicalVocabularyRequest updateMedicalVocabularyRequest, ProtocolMarshaller protocolMarshaller) { if (updateMedicalVocabularyRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(updateMedicalVocabularyRequest.getVocabularyName(), VOCABULARYNAME_BINDING); protocolMarshaller.marshall(updateMedicalVocabularyRequest.getLanguageCode(), LANGUAGECODE_BINDING); protocolMarshaller.marshall(updateMedicalVocabularyRequest.getVocabularyFileUri(), VOCABULARYFILEURI_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
emistoolbox/emistoolbox
libs/joriki/src/info/joriki/truetype/GlyphDescription.java
311
/* * Copyright 2002 Felix Pahl. All rights reserved. * Use is subject to license terms. */ package info.joriki.truetype; import info.joriki.io.Outputable; abstract public class GlyphDescription implements Outputable { byte [] instructions; abstract void interpret (ByteCodeInterpreter interpreter); }
apache-2.0
muntasirraihan/apache-cassandra-1.2.4-src
test/unit/org/apache/cassandra/cli/CliTest.java
15616
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.cli; import org.apache.cassandra.SchemaLoader; import org.apache.cassandra.exceptions.ConfigurationException; import org.apache.cassandra.config.Schema; import org.apache.cassandra.service.EmbeddedCassandraService; import org.apache.cassandra.thrift.*; import org.apache.thrift.TException; import org.junit.Test; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; import java.util.regex.Pattern; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; public class CliTest extends SchemaLoader { // please add new statements here so they could be auto-runned by this test. private String[] statements = { "use TestKeySpace;", "create column family SecondaryIndicesWithoutIdxName" + " with comparator = UTF8Type" + " and default_validation_class = UTF8Type" + " and column_metadata = [{column_name: profileId, validation_class: UTF8Type, index_type: KEYS}];", "update column family SecondaryIndicesWithoutIdxName" + " with column_metadata = " + "[{column_name: profileId, validation_class: UTF8Type, index_type: KEYS}," + "{column_name: postedDate, validation_class: LongType}];", "create column family 123 with comparator=UTF8Type and column_metadata=[{ column_name:world, validation_class:IntegerType, index_type:0, index_name:IdxName }, " + "{ column_name:world2, validation_class:LongType, index_type:KEYS, index_name:LongIdxName}, " + "{ column_name:617070, validation_class:UTF8Type, index_type:KEYS }, " + "{ column_name:28292, validation_class:UTF8Type, index_type:CUSTOM, index_options:{class_name:'org.apache.cassandra.db.index.keys.KeysIndex', foo:bar}}," + "{ column_name:'-617071', validation_class:UTF8Type, index_type:KEYS }," + "{ column_name:time_spent_uuid, validation_class:TimeUUIDType}] and default_validation_class=UTF8Type;", "assume 123 keys as utf8;", "set 123[hello][world] = 123848374878933948398384;", "set 123[hello][test_quote] = 'value\\'';", "set 123['k\\'ey'][VALUE] = 'VAL';", "set 123['k\\'ey'][VALUE] = 'VAL\\'';", "set 123[hello][-31337] = 'some string value';", "list 123;", "list 123[:];", "list 123[456:];", "list 123 limit 5;", "list 123[12:15] limit 20;", "list 123[12:15] columns 2;", "list 123 columns 2 reversed;", "list 123 limit 10 columns 2 reversed;", "get 123[hello][-31337];", "get 123[hello][world];", "get 123[hello][test_quote];", "get 123['k\\'ey'][VALUE]", "set 123[hello][-31337] = -23876;", "set 123[hello][world2] = 15;", "get 123 where world2 = long(15);", "get 123 where world2 = long(15);", "get 123 where world2 = long(15);", "del 123[utf8('hello')][utf8('world')];", "del 123[hello][world2];", "set 123['hello'][time_spent_uuid] = timeuuid(a8098c1a-f86e-11da-bd1a-00112444be1e);", "create column family CF2 with comparator=IntegerType and default_validation_class=AsciiType;", "assume CF2 keys as utf8;", "set CF2['key'][98349387493847748398334] = 'some text';", "get CF2['key'][98349387493847748398334];", "set CF2['key'][98349387493] = 'some text other';", "get CF2['key'][98349387493];", "create column family CF3 with comparator=UTF8Type and column_metadata=[{column_name:'big world', validation_class:LongType, index_type:KEYS, index_name:WorldIdx}];", "assume CF3 keys as utf8;", "set CF3['hello']['big world'] = 3748;", "get CF3['hello']['big world'];", "list CF3;", "list CF3[:];", "list CF3[h:];", "list CF3 limit 10;", "list CF3[h:] limit 10;", "create column family CF4 with comparator=IntegerType and column_metadata=[{column_name:9999, validation_class:LongType}];", "assume CF4 keys as utf8;", "set CF4['hello'][9999] = 1234;", "get CF4['hello'][9999];", "get CF4['hello'][9999] as Long;", "get CF4['hello'][9999] as Bytes;", "set CF4['hello'][9999] = Long(1234);", "get CF4['hello'][9999];", "get CF4['hello'][9999] as Long;", "del CF4['hello'][9999];", "get CF4['hello'][9999];", "create column family sCf1 with column_type=Super and comparator=IntegerType and subcomparator=LongType and column_metadata=[{column_name:9999, validation_class:LongType}];", "assume sCf1 keys as utf8;", "set sCf1['hello'][1][9999] = 1234;", "get sCf1['hello'][1][9999];", "get sCf1['hello'][1][9999] as Long;", "get sCf1['hello'][1][9999] as Bytes;", "set sCf1['hello'][1][9999] = Long(1234);", "set sCf1['hello'][-1][-12] = Long(5678);", "get sCf1['hello'][-1][-12];", "set sCf1['hello'][-1][-12] = -340897;", "set sCf1['hello'][-1][-12] = integer(-340897);", "get sCf1['hello'][1][9999];", "get sCf1['hello'][1][9999] as Long;", "del sCf1['hello'][1][9999];", "get sCf1['hello'][1][9999];", "set sCf1['hello'][1][9999] = Long(1234);", "del sCf1['hello'][9999];", "get sCf1['hello'][1][9999];", "create column family 'Counter1' with comparator=UTF8Type and default_validation_class=CounterColumnType;", "assume Counter1 keys as utf8;", "incr Counter1['hello']['cassandra'];", "incr Counter1['hello']['cassandra'] by 3;", "incr Counter1['hello']['cassandra'] by -2;", "decr Counter1['hello']['cassandra'];", "decr Counter1['hello']['cassandra'] by 3;", "decr Counter1['hello']['cassandra'] by -2;", "get Counter1['hello']['cassandra'];", "get Counter1['hello'];", "truncate 123;", "drop index on '123'.world2;", "drop index on '123'.617070;", "drop index on '123'.'-617071';", "drop index on CF3.'big world';", "update keyspace TestKeySpace with durable_writes = false;", "assume 123 comparator as utf8;", "assume 123 sub_comparator as integer;", "assume 123 validator as lexicaluuid;", "assume 123 keys as timeuuid;", "create column family CF7;", "assume CF7 keys as utf8;", "set CF7[1][timeuuid()] = utf8(test1);", "set CF7[2][lexicaluuid()] = utf8('hello world!');", "set CF7[3][lexicaluuid(550e8400-e29b-41d4-a716-446655440000)] = utf8(test2);", "set CF7[key2][timeuuid()] = utf8(test3);", "assume CF7 comparator as lexicaluuid;", "assume CF7 keys as utf8;", "list CF7;", "get CF7[3];", "get CF7[3][lexicaluuid(550e8400-e29b-41d4-a716-446655440000)];", "get sCf1['hello'][1][9999];", "set sCf1['hello'][1][9999] = 938;", "set sCf1['hello'][1][9999] = 938 with ttl = 30;", "set sCf1['hello'][1][9999] = 938 with ttl = 560;", "count sCf1[hello];", "count sCf1[utf8('hello')];", "count sCf1[utf8('hello')][integer(1)];", "count sCf1[hello][1];", "list sCf1;", "del sCf1['hello'][1][9999];", "assume sCf1 comparator as utf8;", "create column family CF8;", "drop column family cF8;", "create keyspace TESTIN;", "drop keyspace tesTIN;", "update column family 123 with comparator=UTF8Type and column_metadata=[];", "drop column family 123;", "create column family myCF with column_type='Super' and comparator='UTF8Type' AND subcomparator='UTF8Type' AND default_validation_class=AsciiType;", "assume myCF keys as utf8;", "create column family Countries with comparator=UTF8Type and column_metadata=[ {column_name: name, validation_class: UTF8Type} ];", "set Countries[11][name] = USA;", "get Countries[11][name];", "update column family Countries with compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy';", "create column family Cities with compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' and compaction_strategy_options = {min_sstable_size:1024};", "set myCF['key']['scName']['firstname'] = 'John';", "get myCF['key']['scName']", "assume CF3 keys as utf8;", "use TestKEYSpace;", "update keyspace TestKeySpace with placement_strategy='org.apache.cassandra.locator.NetworkTopologyStrategy';", "update keyspace TestKeySpace with strategy_options=[{DC1:3, DC2:4, DC5:1}];", "describe cluster;", "help describe cluster;", "show cluster name", "show api version", "help help", "help connect", "help use", "help describe", "HELP exit", "help QUIT", "help show cluster name", "help show keyspaces", "help show schema", "help show api version", "help create keyspace", "HELP update KEYSPACE", "HELP CREATE column FAMILY", "HELP UPDATE COLUMN family", "HELP drop keyspace", "help drop column family", "HELP GET", "HELP set", "HELP DEL", "HELP count", "HELP list", "HELP TRUNCATE", "help assume", "HELP", "?", "show schema", "show schema TestKeySpace" }; @Test public void testCli() throws IOException, TException, ConfigurationException, ClassNotFoundException, TimedOutException, NotFoundException, SchemaDisagreementException, NoSuchFieldException, InvalidRequestException, UnavailableException, InstantiationException, IllegalAccessException { Schema.instance.clear(); // Schema are now written on disk and will be reloaded new EmbeddedCassandraService().start(); // new error/output streams for CliSessionState ByteArrayOutputStream errStream = new ByteArrayOutputStream(); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); // checking if we can connect to the running cassandra node on localhost CliMain.connect("127.0.0.1", 9170); // setting new output stream CliMain.sessionState.setOut(new PrintStream(outStream)); CliMain.sessionState.setErr(new PrintStream(errStream)); // re-creating keyspace for tests try { // dropping in case it exists e.g. could be left from previous run CliMain.processStatement("drop keyspace TestKeySpace;"); } catch (Exception e) { // TODO check before drop so we don't have this fragile ignored exception block } CliMain.processStatement("create keyspace TestKeySpace;"); for (String statement : statements) { errStream.reset(); // System.out.println("Executing statement: " + statement); CliMain.processStatement(statement); String result = outStream.toString(); // System.out.println("Result:\n" + result); if (statement.startsWith("show schema")) assertEquals(errStream.toString() + "processing" + statement, "\nWARNING: CQL3 tables are intentionally omitted from 'show schema' output.\n" + "See https://issues.apache.org/jira/browse/CASSANDRA-4377 for details.\n\n", errStream.toString()); else assertEquals(errStream.toString() + " processing " + statement, "", errStream.toString()); if (statement.startsWith("drop ") || statement.startsWith("create ") || statement.startsWith("update ")) { assert Pattern.compile("(.{8})-(.{4})-(.{4})-(.{4})-(.{12}).*", Pattern.DOTALL).matcher(result).matches() : String.format("\"%s\" failed: %s", statement, result); } else if (statement.startsWith("set ")) { assertTrue(result.contains("Value inserted.")); assertTrue(result.contains("Elapsed time:")); } else if (statement.startsWith("incr ")) { assertTrue(result.contains("Value incremented.")); } else if (statement.startsWith("decr ")) { assertTrue(result.contains("Value decremented.")); } else if (statement.startsWith("get ")) { if (statement.contains("where")) { assertTrue(result.startsWith("-------------------" + System.getProperty("line.separator") + "RowKey:")); } else if (statement.contains("Counter")) { assertTrue(result.startsWith("=> (counter=") || result.startsWith("Value was not found")); } else { assertTrue(result.startsWith("=> (column=") || result.startsWith("Value was not found")); } assertTrue(result.contains("Elapsed time:")); } else if (statement.startsWith("truncate ")) { assertTrue(result.contains(" truncated.")); } else if (statement.startsWith("assume ")) { assertTrue(result.contains("successfully.")); } outStream.reset(); // reset stream so we have only output from next statement all the time errStream.reset(); // no errors to the end user. } } @Test public void testEscape() { //escaped is the string read from the cli. String escaped = "backspace \\b tab \\t linefeed \\n form feed \\f carriage return \\r duble quote \\\" " + "single quote \\' backslash \\\\"; String unescaped = "backspace \b tab \t linefeed \n form feed \f carriage return \r duble quote \" " + "single quote ' backslash \\"; // when read from the cli may have single quotes around it assertEquals(unescaped, CliUtils.unescapeSQLString("'" + escaped + "'")); assertEquals(escaped, CliUtils.escapeSQLString(unescaped)); } }
apache-2.0
Vidada-Project/Vidada-Server
vidada-server/src/main/java/com/elderbyte/vidada/domain/auth/users/VidadaUserDtoBuilder.java
569
package com.elderbyte.vidada.domain.auth.users; import java.util.stream.Collectors; public class VidadaUserDtoBuilder { public static VidadaUserDto build(VidadaUser user){ VidadaUserDto dto = new VidadaUserDto(); dto.username = user.getUsername(); dto.firstName = user.getFirstName(); dto.lastName = user.getLastName(); dto.enabled = user.isEnabled(); dto.authorities = user.getAuthorities().stream() .map(a -> a.getAuthority()) .collect(Collectors.toSet()); return dto; } }
apache-2.0
VickerChen/mao-company-wifi-code
wiFiWeather/src/main/java/com/wifi/utils/BaiduLocation.java
25551
package com.wifi.utils; import android.content.Context; import android.os.Handler; import android.os.Message; import android.util.Log; import com.baidu.location.BDLocation; import com.baidu.location.BDLocationListener; import com.baidu.location.LocationClient; import com.baidu.location.LocationClientOption; import com.baidu.location.LocationClientOption.LocationMode; import com.mpw.constant.MyApplication; import com.wwr.clock.A; import org.json.JSONException; import org.json.JSONObject; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.util.ArrayList; import java.util.List; import static com.mpw.constant.MyApplication.isEnterPosition; public class BaiduLocation { public LocationClient mLocationClient = null; public BDLocationListener myListener = new MyLocationListener(); private Context context; private Context mContext; private Handler handler; public static final int SUCCESS_JUST_ONE = 1; // 成功遍歷到城市 且本地匹配 public static final int SUCCESS_NO = 0; // 成功 沒有遍歷到城市,雅虎有数据 public static final int FAILD = -1; // 定位失敗 public static final int YAHOO_FAILD = -2; // 成功,沒有遍歷到城市 雅虎无数据 private LocationService locationService; Message msg = new Message(); List<City> LoactionCitys = null; public BaiduLocation(Context context, Handler handler, Context mcontext) { this.context = context; this.handler = handler; this.mContext = mcontext; locationService = new LocationService(context); locationService.registerListener(mListener); locationService.setLocationOption(locationService.getDefaultLocationClientOption()); locationService.start(); } /** * 设置定位的参数 并且开始定位 */ private void initLocation() { /** * 初始化 */ mLocationClient = new LocationClient(context); // 声明LocationClient类 mLocationClient.registerLocationListener(myListener); // 注册监听函数 LocationClientOption option = new LocationClientOption(); // option.setLocationMode(LocationMode.Battery_Saving);// 可选,默认高精度,设置定位模式,高精度,低功耗,仅设备 // // 当前是低功耗 网络定位 // option.setCoorType("bd09ll");// 可选,默认gcj02,设置返回的定位结果坐标系 // option.setScanSpan(0);// 可选,默认0,即仅定位一次,设置发起定位请求的间隔需要大于等于1000ms才是有效的 // option.setIsNeedAddress(true);// 可选,设置是否需要地址信息,默认不需要 // option.setOpenGps(false);// 可选,默认false,设置是否使用gps // option.setLocationNotify(false);// 可选,默认false,设置是否当GPS有效时按照1S/1次频率输出GPS结果 // option.setIsNeedLocationDescribe(false);// 可选,默认false,设置是否需要位置语义化结果,可以在BDLocation // .getLocationDescribe里得到,结果类似于“在北京天安门附近” // option.setIsNeedLocationPoiList(true);// 可选,默认false,设置是否需要POI结果,可以在BDLocation.getPoiList里得到 // option.setIgnoreKillProcess(true);// // 可选,默认true,定位SDK内部是一个SERVICE,并放到了独立进程,设置是否在stop的时候杀死这个进程,默认不杀死 // option.SetIgnoreCacheException(false);// 可选,默认false,设置是否收集CRASH信息,默认收集 // option.setEnableSimulateGps(false);// 可选,默认false,设置是否需要过滤GPS仿真结果,默认需要 option = new LocationClientOption(); option.setLocationMode(LocationMode.Battery_Saving);//可选,默认高精度,设置定位模式,高精度,低功耗,仅设备 option.setCoorType("bd09ll");//可选,默认gcj02,设置返回的定位结果坐标系,如果配合百度地图使用,建议设置为bd09ll; option.setScanSpan(0);//可选,默认0,即仅定位一次,设置发起定位请求的间隔需要大于等于1000ms才是有效的 option.setIsNeedAddress(true);//可选,设置是否需要地址信息,默认不需要 option.setIsNeedLocationDescribe(true);//可选,设置是否需要地址描述 option.setNeedDeviceDirect(false);//可选,设置是否需要设备方向结果 option.setLocationNotify(false);//可选,默认false,设置是否当gps有效时按照1S1次频率输出GPS结果 option.setIgnoreKillProcess(true); //可选,默认true,定位SDK内部是一个SERVICE,并放到了独立进程,设置是否在stop的时候杀死这个进程,默认不杀死 option.setIsNeedLocationDescribe(true);//可选,默认false,设置是否需要位置语义化结果,可以在BDLocation // .getLocationDescribe里得到,结果类似于“在北京天安门附近” option.setIsNeedLocationPoiList(true);//可选,默认false,设置是否需要POI结果,可以在BDLocation.getPoiList里得到 option.SetIgnoreCacheException(false);//可选,默认false,设置是否收集CRASH信息,默认收集 option.setIsNeedAltitude(false);//可选,默认false,设置定位时是否需要海拔信息,默认不需要,除基础定位版本都可用 mLocationClient.setLocOption(option); mLocationClient.start(); } /** * 定位监听 也即是定位的结果分析 * * @author lxj */ public class MyLocationListener implements BDLocationListener { Message msg = new Message(); List<City> LoactionCitys = null; /** * 先判断定位成功 失败 -1 * <p> * 成功 定位到的城市都转换成小写字母无空格的字符串 * 国内: 遍历国内城市 无匹配 返回 -2 有匹配 返回 1 * 国外: 遍历国外城市 无匹配 访问雅虎: 有返回数据 0 无返回数据 -2 * 有匹配 返回 1 如果没有 */ @Override public void onReceiveLocation(BDLocation location) { LoactionCitys = new ArrayList<City>(); msg = new Message(); msg.what = A.LOCATION; LocationResultBean lrb = new LocationResultBean(); lrb.setResltCode(-1); //默认是失败 所以所有失败的情况不需要设定 // if (location.getLocType() == BDLocation.TypeGpsLocation) {// GPS定位结果 // } else Log.d("koma===country",11111+""); if (location.getLocType() == BDLocation.TypeNetWorkLocation) {// 网络定位结果 if (location.getCity() != null) {//定位成功但是没有城市 直接不管 final String city = location.getCity(); String afterCity = ""; Log.d("koma===country",location.getCountry()); if ("中国".equals(location.getCountry())) { //定位结果是国内 lrb.setCoutry("CN"); if ("市".equals(city.charAt(city.length() - 1))) { afterCity = city.substring(0, city.length() - 1); } else { afterCity = city; } for (City c : MyApplication.CN_LIST) { if ((c.getName().indexOf(afterCity) != -1 || afterCity.indexOf(c .getName()) != -1)) { LoactionCitys.add(c); } } if (LoactionCitys.size() == 0) { //无匹配 lrb.setResltCode(-2); LoactionCitys.add(new City(city, city, city)); lrb.setLocationCitys(LoactionCitys); new Thread() { public void run() { new SendCity2Service(context).send(city); } ; }.start(); } else { //有匹配 lrb.setResltCode(1); lrb.setLocationCitys(LoactionCitys); } } else { //定位结果是国外 //全部转换成小写字母 并且将空格去掉 afterCity = toLower(city.replace(" ", "")); lrb.setCoutry("NO"); // 国外 for (City c : MyApplication.FORE_LIST) { if ((afterCity.indexOf(toLower(c.getPinyi().replace(" ", ""))) != -1) || (toLower(c.getPinyi().replace(" ", "")).indexOf(afterCity) != -1)) { LoactionCitys.add(c); } } if (LoactionCitys.isEmpty()) { // 定位到城市且本地库没有数据 // 查询雅虎天气是否有数据返回 String yahoo = "https://query.yahooapis" + ".com/v1/public/yql?q=select%20*%20from%20weather" + ".forecast%20where%20woeid%20in%20" + "(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22" + city + "%22)&format=json&env=store%3A%2F%2Fdatatables" + ".org%2Falltableswithkeys"; try { URL url1 = new URL(yahoo); HttpURLConnection conn1 = (HttpURLConnection) url1.openConnection(); InputStream is1 = conn1.getInputStream(); BufferedReader br1 = new BufferedReader(new InputStreamReader(is1)); StringBuffer sb1 = new StringBuffer(); String str1; while ((str1 = br1.readLine()) != null) { sb1.append(str1); } System.out.println(sb1.toString()); if ("{".equals(String.valueOf(sb1.toString().charAt(0)))) { JSONObject object1 = new org.json.JSONObject(sb1.toString() .trim()); JSONObject data = object1.getJSONObject("query"); int count = data.getInt("count"); if (count == 0) { // 雅虎查询不到该城市 LoactionCitys.add(new City(city, city, city)); lrb.setLocationCitys(LoactionCitys); lrb.setResltCode(-2); } else {// 雅虎能查询到该城市 返回0 /** * 获取雅虎返回的国家 */ JSONObject results = data.getJSONObject("results"); // 结果 JSONObject channel = results.getJSONObject("channel"); // // 具体数据 String title = channel.getString("title"); // 具体数据 String yahooCountry = title.substring(title.length() - 2); // System.out.println("定位到的国家 " + country); System.out.println("雅虎返回的国家" + yahooCountry); // if (country.equals(yahooCountry)) { // 定位到的国家和雅虎返回的国家相对比 // // } // lrb.setCoutry(country); LoactionCitys.add(new City(city, city, city)); lrb.setLocationCitys(LoactionCitys); lrb.setResltCode(0); } } else { LoactionCitys.add(new City(city, city, city)); lrb.setLocationCitys(LoactionCitys); lrb.setResltCode(-2); } new Thread() { public void run() { new SendCity2Service(context).send(city); } }.start(); } catch (MalformedURLException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } catch (JSONException e) { e.printStackTrace(); } } else { // 定位到城市且本地库有数据 // lrb.setCoutry(country); lrb.setLocationCitys(LoactionCitys); lrb.setResltCode(1); } } } // // 运营商信息 // } else if (location.getLocType() == BDLocation.TypeOffLineLocation) {// 离线定位结果 // sb.append("离线定位成功,离线定位结果也是有效的"); } else if (location.getLocType() == BDLocation.TypeServerError) { //sb.append("服务端网络定位失败,可以反馈IMEI号和大体定位时间到loc-bugs@baidu.com,会有人追查原因"); } else if (location.getLocType() == BDLocation.TypeNetWorkException) { //sb.append("网络不同导致定位失败,请检查网络是否通畅"); } else if (location.getLocType() == BDLocation.TypeCriteriaException) { //sb.append("无法获取有效定位依据导致定位失败,一般是由于手机的原因,处于飞行模式下一般会造成这种结果,可以试着重启手机"); } // Message msg = new Message(); // msg.obj = new Location().getCity(); // msg.what = LOCATION; msg.obj = lrb; handler.sendMessage(msg); mLocationClient.stop(); } @Override public void onConnectHotSpotMessage(String arg0, int arg1) { // TODO Auto-generated method stub } } /***** * 定位结果回调,重写onReceiveLocation方法,可以直接拷贝如下代码到自己工程中修改 * */ private BDLocationListener mListener = new BDLocationListener() { @Override public void onReceiveLocation(BDLocation location) { if (!isEnterPosition) { isEnterPosition = true; LoactionCitys = new ArrayList<City>(); msg.what = A.LOCATION; LocationResultBean lrb = new LocationResultBean(); // lrb.setResltCode(-1); //默认是失败 所以所有失败的情况不需要设定 // TODO Auto-generated method stub if (null != location && location.getLocType() != BDLocation.TypeServerError) { LogUtil.LOG("进来了 有定位结果"); Log.d("koma===country",location.getCity()); // 网络定位结果 if (location.getCity() != null) {//定位成功但是没有城市 直接不管 LogUtil.LOG("进来了 有城市"); final String city = location.getCity(); String afterCity = ""; if ("中国".equals(location.getCountry())) { //定位结果是国内 lrb.setCoutry("CN"); if ("市".equals(city.charAt(city.length() - 1))) { afterCity = city.substring(0, city.length() - 1); } else { afterCity = city; } for (City c : MyApplication.CN_LIST) { if ((c.getName().indexOf(afterCity) != -1 || afterCity.indexOf(c .getName()) != -1)) { LoactionCitys.add(c); } } if (LoactionCitys.size() == 0) { //无匹配 lrb.setResltCode(-2); LoactionCitys.add(new City(city, city, city)); lrb.setLocationCitys(LoactionCitys); new Thread() { public void run() { new SendCity2Service(context).send(city); } ; }.start(); } else { //有匹配 lrb.setResltCode(1); lrb.setLocationCitys(LoactionCitys); } } else { //定位结果是国外 //全部转换成小写字母 并且将空格去掉 afterCity = toLower(city.replace(" ", "")); lrb.setCoutry("NO"); // 国外 for (City c : MyApplication.FORE_LIST) { if ((afterCity.indexOf(toLower(c.getPinyi().replace(" ", ""))) != -1) || (toLower(c.getPinyi().replace(" ", "")).indexOf (afterCity) != -1)) { LoactionCitys.add(c); } } if (LoactionCitys.isEmpty()) { // 定位到城市且本地库没有数据 // 查询雅虎天气是否有数据返回 String yahoo = "https://query.yahooapis" + ".com/v1/public/yql?q=select%20*%20from%20weather" + ".forecast%20where%20woeid%20in%20" + "(select%20woeid%20from%20geo.places(1)" + "%20where%20text%3D%22" + city + "%22)&format=json&env=store%3A%2F%2Fdatatables" + ".org%2Falltableswithkeys"; try { URL url1 = new URL(yahoo); HttpURLConnection conn1 = (HttpURLConnection) url1.openConnection(); InputStream is1 = conn1.getInputStream(); BufferedReader br1 = new BufferedReader(new InputStreamReader (is1)); StringBuffer sb1 = new StringBuffer(); String str1; while ((str1 = br1.readLine()) != null) { sb1.append(str1); } System.out.println(sb1.toString()); if ("{".equals(String.valueOf(sb1.toString().charAt(0)))) { JSONObject object1 = new org.json.JSONObject(sb1.toString ().trim()); JSONObject data = object1.getJSONObject("query"); int count = data.getInt("count"); if (count == 0) { // 雅虎查询不到该城市 LoactionCitys.add(new City(city, city, city)); lrb.setLocationCitys(LoactionCitys); lrb.setResltCode(-2); } else {// 雅虎能查询到该城市 返回0 /** * 获取雅虎返回的国家 */ JSONObject results = data.getJSONObject("results"); // 结果 JSONObject channel = results.getJSONObject("channel") ; // 具体数据 String title = channel.getString("title"); // 具体数据 String yahooCountry = title.substring(title.length() - 2); // System.out.println("定位到的国家 " + country); System.out.println("雅虎返回的国家" + yahooCountry); // if (country.equals(yahooCountry)) { // 定位到的国家和雅虎返回的国家相对比 // // } // lrb.setCoutry(country); LoactionCitys.add(new City(city, city, city)); lrb.setLocationCitys(LoactionCitys); lrb.setResltCode(0); } } else { LoactionCitys.add(new City(city, city, city)); lrb.setLocationCitys(LoactionCitys); lrb.setResltCode(-2); } new Thread() { public void run() { new SendCity2Service(context).send(city); } ; }.start(); } catch (MalformedURLException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } catch (JSONException e) { e.printStackTrace(); } } else { // 定位到城市且本地库有数据 // lrb.setCoutry(country); lrb.setLocationCitys(LoactionCitys); lrb.setResltCode(1); } } } else if (location.getLocType() == BDLocation.TypeGpsLocation) {// GPS定位结果 } else if (location.getLocType() == BDLocation.TypeNetWorkLocation) {// 网络定位结果 } else if (location.getLocType() == BDLocation.TypeOffLineLocation) {// 离线定位结果 // sb.append("离线定位成功,离线定位结果也是有效的"); } else if (location.getLocType() == BDLocation.TypeServerError) { // sb.append("服务端网络定位失败,可以反馈IMEI号和大体定位时间到loc-bugs@baidu.com,会有人追查原因"); } else if (location.getLocType() == BDLocation.TypeNetWorkException) { // sb.append("网络不同导致定位失败,请检查网络是否通畅"); } else if (location.getLocType() == BDLocation.TypeCriteriaException) { // sb.append("无法获取有效定位依据导致定位失败,一般是由于手机的原因,处于飞行模式下一般会造成这种结果,可以试着重启手机"); } msg.obj = lrb; MyApplication.sLrb = lrb; handler.sendMessage(msg); locationService.stop(); } } } public void onConnectHotSpotMessage(String s, int i) { } }; /** * 将字符串中所有字母转换成小写字母 * * @param str 转换前 * @return 转换后 */ public String toLower(String str) { StringBuffer sb = new StringBuffer(); for (int j = 0; j < str.length(); j++) { char c = str.charAt(j); sb.append(Character.toLowerCase(c)); } return sb.toString(); } }
apache-2.0
michel-kraemer/citeproc-java
citeproc-java-tool/src/main/java/de/undercouch/citeproc/tool/shell/ShellLoadCommand.java
2799
package de.undercouch.citeproc.tool.shell; import de.undercouch.citeproc.BibliographyFileReader; import de.undercouch.citeproc.BibliographyFileReader.FileFormat; import de.undercouch.citeproc.tool.AbstractCSLToolCommand; import de.undercouch.citeproc.tool.CSLToolContext; import de.undercouch.underline.InputReader; import de.undercouch.underline.OptionParserException; import de.undercouch.underline.UnknownAttributes; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintWriter; import java.util.List; /** * Load an input bibliography * @author Michel Kraemer */ public class ShellLoadCommand extends AbstractCSLToolCommand { /** * The current files */ private List<String> files; @Override public String getUsageName() { return "load"; } @Override public String getUsageDescription() { return "Load an input bibliography from a file"; } /** * Sets the current files * @param files the files */ @UnknownAttributes("FILE") public void setFiles(List<String> files) { this.files = files; } @Override public boolean checkArguments() { if (files == null || files.isEmpty()) { error("no file specified"); return false; } if (files.size() > 1) { error("you can only specify one file"); return false; } File f = new File(files.get(0)); // check file format BibliographyFileReader reader = CSLToolContext.current().getBibliographyFileReader(); FileFormat ff; try { ff = reader.determineFileFormat(f); } catch (FileNotFoundException e) { error("file not found"); return false; } catch (IOException e) { error("could not determine file format"); return false; } if (ff == FileFormat.UNKNOWN) { error("Unsupported file format"); return false; } return true; } @Override public int doRun(String[] remainingArgs, InputReader in, PrintWriter out) throws OptionParserException, IOException { // load the bibliography file now. use the common instance of // BibliographyFileReader in order to enable caching String fn = files.get(0); File f = new File(fn); BibliographyFileReader reader = CSLToolContext.current().getBibliographyFileReader(); try { reader.readBibliographyFile(f); } catch (IOException e) { error("could not read input file"); return 1; } ShellContext.current().setInputFile(fn); return 0; } }
apache-2.0
juebanlin/util4j
util4j/src/main/java/net/jueb/util4j/buffer/tool/demo/BufferBuilderDemo.java
3983
package net.jueb.util4j.buffer.tool.demo; import java.io.File; import java.io.IOException; import java.util.Date; import java.util.List; import net.jueb.util4j.buffer.tool.ClassFileUitl; import org.apache.commons.io.FileUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.netty.util.CharsetUtil; import net.jueb.util4j.buffer.tool.BufferBuilder; public class BufferBuilderDemo { protected Logger log=LoggerFactory.getLogger(getClass()); public static final String writeMethodName="writeTo"; public static final String readMethodName="readFrom"; /** * (@Override)?([\s]+|[\r\n\t])(public)([\s]+|[\r\n|\r|\t])(void)([\s]+|[\r\n|\r|\t])(writeToSql)([\s]*|[\r\n|\r|\t])(\()([\s]*|[\r\n|\r|\t])(ByteBuffer)([\s]+|[\r\n|\r|\t])(buffer)([\s]*|[\r\n|\r|\t])(\))([\s]*|[\r\n|\r|\t])(\{)([\s\S]*)(\}) */ public static final String MATCH_WRITE="(@Override)?([\\s]+|[\\r\\n\\t])(public)([\\s]+|[\\r\\n|\\r|\\t])(void)([\\s]+|[\\r\\n|\\r|\\t])("+writeMethodName+")([\\s]*|[\\r\\n|\\r|\\t])(\\()([\\s]*|[\\r\\n|\\r|\\t])(ByteBuffer)([\\s]+|[\\r\\n|\\r|\\t])(buffer)([\\s]*|[\\r\\n|\\r|\\t])(\\))([\\s]*|[\\r\n|\\r|\\t])(\\{)([\\s\\S]*)(\\})"; public static final String MATCH_READ="(@Override)?([\\s]+|[\\r\\n\\t])(public)([\\s]+|[\\r\\n|\\r|\\t])(void)([\\s]+|[\\r\\n|\\r|\\t])("+readMethodName+")([\\s]*|[\\r\\n|\\r|\\t])(\\()([\\s]*|[\\r\\n|\\r|\\t])(ByteBuffer)([\\s]+|[\\r\\n|\\r|\\t])(buffer)([\\s]*|[\\r\\n|\\r|\\t])(\\))([\\s]*|[\\r\n|\\r|\\t])(\\{)([\\s\\S]*)(\\})"; public static String BEGIN_FLAG = "//auto sql write begin"; public static String END_FLAG = "//auto sql write end"; public void build(String soruceRootDir,String pkg)throws Exception { BufferBuilder bb=new BufferBuilder("net.jueb.util4j.buffer.ArrayBytesBuff", "writeTo", "readFrom"); //属性过滤器 bb.addFieldSkipFilter((field)->{ String name = field.getName(); return name.contains("$SWITCH_TABLE$"); }); //其它类型 bb.addTypeHandler((ctx)->{ Class<?> type=ctx.varType(); if(Date.class.isAssignableFrom(type)) { String ClassName=type.getSimpleName(); ctx.write().append("\t").append(ctx.varBuffer()+".writeLong("+ctx.varName()+".getTime());").append("\n"); ctx.read().append("\t").append(ctx.varName() +"=new "+ClassName+"();").append("\n"); ctx.read().append("\t").append(ctx.varName() + ".setTime("+ctx.varBuffer()+".readLong());").append("\n"); return true; } return false; }); List<Class<?>> fileList =ClassFileUitl.getClassInfo(soruceRootDir, pkg); for(Class<?> clazz:fileList) { if(!Dto.class.isAssignableFrom(clazz)) { continue; } StringBuilder writeSb=new StringBuilder(); StringBuilder readSb=new StringBuilder(); bb.build(clazz,writeSb,readSb); writeSb.append("\n"); writeSb.append(readSb.toString()); File javaSourceFile=ClassFileUitl.findJavaSourceFile(soruceRootDir, clazz); String javaSource=fillCode(javaSourceFile, writeSb); FileUtils.writeByteArrayToFile(javaSourceFile,javaSource.getBytes(CharsetUtil.UTF_8)); } } /** * 填充代码 * @param javaFile * @param bufferCode * @return * @throws IOException */ public String fillCode(File javaFile,StringBuilder bufferCode) throws IOException { String javaSource=FileUtils.readFileToString(javaFile, CharsetUtil.UTF_8); int start=javaSource.indexOf(BEGIN_FLAG); int end=javaSource.indexOf(END_FLAG); if(start>0&& end>0) { String head=javaSource.substring(0, start+BEGIN_FLAG.length()); String til=javaSource.substring(end, javaSource.length()); javaSource=head+"\n"+bufferCode.toString()+"\n"+til; } return javaSource; } public static void main(String[] args) throws Exception { String path=System.getProperty("user.dir")+File.separator+"src"+File.separator+"main"+File.separator+"java"+File.separator; String pkg="net.jueb.util4j.buffer.tool.demo"; BufferBuilderDemo buildUtils = new BufferBuilderDemo(); buildUtils.build(path,pkg); } }
apache-2.0
titze/axmlparser
src/axmlprinter/SdkConstants.java
19114
package axmlprinter; import java.io.File; /** * Constant definition class.<br> * <br> * Most constants have a prefix defining the content. * <ul> * <li><code>OS_</code> OS path constant. These paths are different depending on * the platform.</li> * <li><code>FN_</code> File name constant.</li> * <li><code>FD_</code> Folder name constant.</li> * </ul> * */ public final class SdkConstants { public final static int PLATFORM_UNKNOWN = 0; public final static int PLATFORM_LINUX = 1; public final static int PLATFORM_WINDOWS = 2; public final static int PLATFORM_DARWIN = 3; /** * Returns current platform, one of {@link #PLATFORM_WINDOWS}, * {@link #PLATFORM_DARWIN}, {@link #PLATFORM_LINUX} or * {@link #PLATFORM_UNKNOWN}. */ public final static int CURRENT_PLATFORM = currentPlatform(); /** * Charset for the ini file handled by the SDK. */ public final static String INI_CHARSET = "UTF-8"; /** An SDK Project's AndroidManifest.xml file */ public static final String FN_ANDROID_MANIFEST_XML = "AndroidManifest.xml"; /** Dex filename inside the APK. i.e. "classes.dex" */ public final static String FN_APK_CLASSES_DEX = "classes.dex"; //$NON-NLS-1$ /** An SDK Project's build.xml file */ public final static String FN_BUILD_XML = "build.xml"; /** Name of the framework library, i.e. "android.jar" */ public static final String FN_FRAMEWORK_LIBRARY = "android.jar"; /** Name of the layout attributes, i.e. "attrs.xml" */ public static final String FN_ATTRS_XML = "attrs.xml"; /** Name of the layout attributes, i.e. "attrs_manifest.xml" */ public static final String FN_ATTRS_MANIFEST_XML = "attrs_manifest.xml"; /** framework aidl import file */ public static final String FN_FRAMEWORK_AIDL = "framework.aidl"; /** framework renderscript folder */ public static final String FN_FRAMEWORK_RENDERSCRIPT = "renderscript"; /** framework include folder */ public static final String FN_FRAMEWORK_INCLUDE = "include"; /** framework include (clang) folder */ public static final String FN_FRAMEWORK_INCLUDE_CLANG = "clang-include"; /** layoutlib.jar file */ public static final String FN_LAYOUTLIB_JAR = "layoutlib.jar"; /** widget list file */ public static final String FN_WIDGETS = "widgets.txt"; /** Intent activity actions list file */ public static final String FN_INTENT_ACTIONS_ACTIVITY = "activity_actions.txt"; /** Intent broadcast actions list file */ public static final String FN_INTENT_ACTIONS_BROADCAST = "broadcast_actions.txt"; /** Intent service actions list file */ public static final String FN_INTENT_ACTIONS_SERVICE = "service_actions.txt"; /** Intent category list file */ public static final String FN_INTENT_CATEGORIES = "categories.txt"; /** platform build property file */ public final static String FN_BUILD_PROP = "build.prop"; /** plugin properties file */ public final static String FN_PLUGIN_PROP = "plugin.prop"; /** add-on manifest file */ public final static String FN_MANIFEST_INI = "manifest.ini"; /** add-on layout device XML file. */ public final static String FN_DEVICES_XML = "devices.xml"; /** hardware properties definition file */ public final static String FN_HARDWARE_INI = "hardware-properties.ini"; /** project default property file */ public final static String FN_DEFAULT_PROPERTIES = "default.properties"; /** project export property file */ public final static String FN_EXPORT_PROPERTIES = "export.properties"; /** project local property file */ public final static String FN_LOCAL_PROPERTIES = "local.properties"; /** project build property file */ public final static String FN_BUILD_PROPERTIES = "build.properties"; /** Skin layout file */ public final static String FN_SKIN_LAYOUT = "layout";//$NON-NLS-1$ /** dx.jar file */ public static final String FN_DX_JAR = "dx.jar"; //$NON-NLS-1$ /** dx executable (with extension for the current OS) */ public final static String FN_DX = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? "dx.bat" : "dx"; //$NON-NLS-1$ //$NON-NLS-2$ /** aapt executable (with extension for the current OS) */ public final static String FN_AAPT = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? "aapt.exe" : "aapt"; //$NON-NLS-1$ //$NON-NLS-2$ /** aidl executable (with extension for the current OS) */ public final static String FN_AIDL = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? "aidl.exe" : "aidl"; //$NON-NLS-1$ //$NON-NLS-2$ /** renderscript executable (with extension for the current OS) */ public final static String FN_RENDERSCRIPT = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? "llvm-rs-cc.exe" : "llvm-rs-cc"; //$NON-NLS-1$ //$NON-NLS-2$ /** adb executable (with extension for the current OS) */ public final static String FN_ADB = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? "adb.exe" : "adb"; //$NON-NLS-1$ //$NON-NLS-2$ /** emulator executable (_WITHOUT_ extension for the current OS) */ public final static String FN_EMULATOR = "emulator"; //$NON-NLS-1$ //$NON-NLS-2$ /** emulator executable extension for the current OS */ public final static String FN_EMULATOR_EXTENSION = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? ".exe" : ""; //$NON-NLS-1$ //$NON-NLS-2$ /** zipalign executable (with extension for the current OS) */ public final static String FN_ZIPALIGN = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? "zipalign.exe" : "zipalign"; //$NON-NLS-1$ //$NON-NLS-2$ /** dexdump executable (with extension for the current OS) */ public final static String FN_DEXDUMP = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? "dexdump.exe" : "dexdump"; //$NON-NLS-1$ //$NON-NLS-2$ /** zipalign executable (with extension for the current OS) */ public final static String FN_PROGUARD = (CURRENT_PLATFORM == PLATFORM_WINDOWS) ? "proguard.bat" : "proguard.sh"; //$NON-NLS-1$ //$NON-NLS-2$ /** properties file for SDK Updater packages */ public final static String FN_SOURCE_PROP = "source.properties"; //$NON-NLS-1$ /** properties file for content hash of installed packages */ public final static String FN_CONTENT_HASH_PROP = "content_hash.properties"; //$NON-NLS-1$ /** properties file for the SDK */ public final static String FN_SDK_PROP = "sdk.properties"; //$NON-NLS-1$ /** * filename for gdbserver. */ public final static String FN_GDBSERVER = "gdbserver"; /** default proguard config file */ public final static String FN_PROGUARD_CFG = "proguard.cfg"; /* Folder Names for Android Projects . */ /** Resources folder name, i.e. "res". */ public final static String FD_RESOURCES = "res"; //$NON-NLS-1$ /** Assets folder name, i.e. "assets" */ public final static String FD_ASSETS = "assets"; //$NON-NLS-1$ /** Default source folder name, i.e. "src" */ public final static String FD_SOURCES = "src"; //$NON-NLS-1$ /** Default generated source folder name, i.e. "gen" */ public final static String FD_GEN_SOURCES = "gen"; //$NON-NLS-1$ /** * Default native library folder name inside the project, i.e. "libs" While * the folder inside the .apk is "lib", we call that one libs because that's * what we use in ant for both .jar and .so and we need to make the 2 * development ways compatible. */ public final static String FD_NATIVE_LIBS = "libs"; //$NON-NLS-1$ /** Native lib folder inside the APK: "lib" */ public final static String FD_APK_NATIVE_LIBS = "lib"; //$NON-NLS-1$ /** Default output folder name, i.e. "bin" */ public final static String FD_OUTPUT = "bin"; //$NON-NLS-1$ /** proguard output folder for mapping, etc.. files */ public final static String FD_PROGUARD = "proguard"; //$NON-NLS-1$ /* Folder Names for the Android SDK */ /** Name of the SDK platforms folder. */ public final static String FD_PLATFORMS = "platforms"; /** Name of the SDK addons folder. */ public final static String FD_ADDONS = "add-ons"; /** Name of the SDK tools folder. */ public final static String FD_TOOLS = "tools"; /** Name of the SDK platform tools folder. */ public final static String FD_PLATFORM_TOOLS = "platform-tools"; /** Name of the SDK tools/lib folder. */ public final static String FD_LIB = "lib"; /** Name of the SDK docs folder. */ public final static String FD_DOCS = "docs"; /** Name of the doc folder containing API reference doc (javadoc) */ public static final String FD_DOCS_REFERENCE = "reference"; /** Name of the SDK images folder. */ public final static String FD_IMAGES = "images"; /** Name of the processors to support. */ public final static String ABI_ARMEABI = "armeabi"; public final static String ABI_INTEL_ATOM = "x86"; /** Name of the SDK skins folder. */ public final static String FD_SKINS = "skins"; /** Name of the SDK samples folder. */ public final static String FD_SAMPLES = "samples"; /** Name of the SDK extras folder. */ public final static String FD_EXTRAS = "extras"; /** Name of the SDK templates folder, i.e. "templates" */ public final static String FD_TEMPLATES = "templates"; /** Name of the SDK Ant folder, i.e. "ant" */ public final static String FD_ANT = "ant"; /** Name of the SDK data folder, i.e. "data" */ public final static String FD_DATA = "data"; /** Name of the SDK renderscript folder, i.e. "rs" */ public final static String FD_RENDERSCRIPT = "rs"; /** Name of the SDK resources folder, i.e. "res" */ public final static String FD_RES = "res"; /** Name of the SDK font folder, i.e. "fonts" */ public final static String FD_FONTS = "fonts"; /** Name of the android sources directory */ public static final String FD_ANDROID_SOURCES = "sources"; /** Name of the addon libs folder. */ public final static String FD_ADDON_LIBS = "libs"; /** * Namespace for the resource XML, i.e. * "http://schemas.android.com/apk/res/android" */ public final static String NS_RESOURCES = "http://schemas.android.com/apk/res/android"; /** The name of the uses-library that provides "android.test.runner" */ public final static String ANDROID_TEST_RUNNER_LIB = "android.test.runner"; /* Folder path relative to the SDK root */ /** * Path of the documentation directory relative to the sdk folder. This is * an OS path, ending with a separator. */ public final static String OS_SDK_DOCS_FOLDER = FD_DOCS + File.separator; /** * Path of the tools directory relative to the sdk folder, or to a platform * folder. This is an OS path, ending with a separator. */ public final static String OS_SDK_TOOLS_FOLDER = FD_TOOLS + File.separator; /** * Path of the lib directory relative to the sdk folder, or to a platform * folder. This is an OS path, ending with a separator. */ public final static String OS_SDK_TOOLS_LIB_FOLDER = OS_SDK_TOOLS_FOLDER + FD_LIB + File.separator; /** * Path of the lib directory relative to the sdk folder, or to a platform * folder. This is an OS path, ending with a separator. */ public final static String OS_SDK_TOOLS_LIB_EMULATOR_FOLDER = OS_SDK_TOOLS_LIB_FOLDER + "emulator" + File.separator; /** * Path of the platform tools directory relative to the sdk folder. This is * an OS path, ending with a separator. */ public final static String OS_SDK_PLATFORM_TOOLS_FOLDER = FD_PLATFORM_TOOLS + File.separator; /** * Path of the Platform tools Lib directory relative to the sdk folder. This * is an OS path, ending with a separator. */ public final static String OS_SDK_PLATFORM_TOOLS_LIB_FOLDER = OS_SDK_PLATFORM_TOOLS_FOLDER + FD_LIB + File.separator; /** * Path of the bin folder of proguard folder relative to the sdk folder. * This is an OS path, ending with a separator. */ public final static String OS_SDK_TOOLS_PROGUARD_BIN_FOLDER = SdkConstants.OS_SDK_TOOLS_FOLDER + "proguard" + File.separator + "bin" + File.separator; //$NON-NLS-1$ //$NON-NLS-2$ /* Folder paths relative to a platform or add-on folder */ /** * Path of the images directory relative to a platform or addon folder. This * is an OS path, ending with a separator. */ public final static String OS_IMAGES_FOLDER = FD_IMAGES + File.separator; /** * Path of the skin directory relative to a platform or addon folder. This * is an OS path, ending with a separator. */ public final static String OS_SKINS_FOLDER = FD_SKINS + File.separator; /* Folder paths relative to a Platform folder */ /** * Path of the data directory relative to a platform folder. This is an OS * path, ending with a separator. */ public final static String OS_PLATFORM_DATA_FOLDER = FD_DATA + File.separator; /** * Path of the renderscript directory relative to a platform folder. This is * an OS path, ending with a separator. */ public final static String OS_PLATFORM_RENDERSCRIPT_FOLDER = FD_RENDERSCRIPT + File.separator; /** * Path of the samples directory relative to a platform folder. This is an * OS path, ending with a separator. */ public final static String OS_PLATFORM_SAMPLES_FOLDER = FD_SAMPLES + File.separator; /** * Path of the resources directory relative to a platform folder. This is an * OS path, ending with a separator. */ public final static String OS_PLATFORM_RESOURCES_FOLDER = OS_PLATFORM_DATA_FOLDER + FD_RES + File.separator; /** * Path of the fonts directory relative to a platform folder. This is an OS * path, ending with a separator. */ public final static String OS_PLATFORM_FONTS_FOLDER = OS_PLATFORM_DATA_FOLDER + FD_FONTS + File.separator; /** * Path of the android source directory relative to a platform folder. This * is an OS path, ending with a separator. */ public final static String OS_PLATFORM_SOURCES_FOLDER = FD_ANDROID_SOURCES + File.separator; /** * Path of the android templates directory relative to a platform folder. * This is an OS path, ending with a separator. */ public final static String OS_PLATFORM_TEMPLATES_FOLDER = FD_TEMPLATES + File.separator; /** * Path of the Ant build rules directory relative to a platform folder. This * is an OS path, ending with a separator. */ public final static String OS_PLATFORM_ANT_FOLDER = FD_ANT + File.separator; /** Path of the attrs.xml file relative to a platform folder. */ public final static String OS_PLATFORM_ATTRS_XML = OS_PLATFORM_RESOURCES_FOLDER + AndroidConstants.FD_RES_VALUES + File.separator + FN_ATTRS_XML; /** Path of the attrs_manifest.xml file relative to a platform folder. */ public final static String OS_PLATFORM_ATTRS_MANIFEST_XML = OS_PLATFORM_RESOURCES_FOLDER + AndroidConstants.FD_RES_VALUES + File.separator + FN_ATTRS_MANIFEST_XML; /** Path of the layoutlib.jar file relative to a platform folder. */ public final static String OS_PLATFORM_LAYOUTLIB_JAR = OS_PLATFORM_DATA_FOLDER + FN_LAYOUTLIB_JAR; /** Path of the renderscript include folder relative to a platform folder. */ public final static String OS_FRAMEWORK_RS = FN_FRAMEWORK_RENDERSCRIPT + File.separator + FN_FRAMEWORK_INCLUDE; /** * Path of the renderscript (clang) include folder relative to a platform * folder. */ public final static String OS_FRAMEWORK_RS_CLANG = FN_FRAMEWORK_RENDERSCRIPT + File.separator + FN_FRAMEWORK_INCLUDE_CLANG; /* Folder paths relative to a addon folder */ /** * Path of the images directory relative to a folder folder. This is an OS * path, ending with a separator. */ public final static String OS_ADDON_LIBS_FOLDER = FD_ADDON_LIBS + File.separator; /** Skin default **/ public final static String SKIN_DEFAULT = "default"; /** SDK property: ant templates revision */ public final static String PROP_SDK_ANT_TEMPLATES_REVISION = "sdk.ant.templates.revision"; //$NON-NLS-1$ /* Android Class Constants */ public final static String CLASS_ACTIVITY = "android.app.Activity"; //$NON-NLS-1$ public final static String CLASS_APPLICATION = "android.app.Application"; //$NON-NLS-1$ public final static String CLASS_SERVICE = "android.app.Service"; //$NON-NLS-1$ public final static String CLASS_BROADCASTRECEIVER = "android.content.BroadcastReceiver"; //$NON-NLS-1$ public final static String CLASS_CONTENTPROVIDER = "android.content.ContentProvider"; //$NON-NLS-1$ public final static String CLASS_INSTRUMENTATION = "android.app.Instrumentation"; //$NON-NLS-1$ public final static String CLASS_INSTRUMENTATION_RUNNER = "android.test.InstrumentationTestRunner"; //$NON-NLS-1$ public final static String CLASS_BUNDLE = "android.os.Bundle"; //$NON-NLS-1$ public final static String CLASS_R = "android.R"; //$NON-NLS-1$ public final static String CLASS_MANIFEST_PERMISSION = "android.Manifest$permission"; //$NON-NLS-1$ public final static String CLASS_INTENT = "android.content.Intent"; //$NON-NLS-1$ public final static String CLASS_CONTEXT = "android.content.Context"; //$NON-NLS-1$ public final static String CLASS_VIEW = "android.view.View"; //$NON-NLS-1$ public final static String CLASS_VIEWGROUP = "android.view.ViewGroup"; //$NON-NLS-1$ public final static String CLASS_NAME_LAYOUTPARAMS = "LayoutParams"; //$NON-NLS-1$ public final static String CLASS_VIEWGROUP_LAYOUTPARAMS = CLASS_VIEWGROUP + "$" + CLASS_NAME_LAYOUTPARAMS; //$NON-NLS-1$ public final static String CLASS_NAME_FRAMELAYOUT = "FrameLayout"; //$NON-NLS-1$ public final static String CLASS_FRAMELAYOUT = "android.widget." + CLASS_NAME_FRAMELAYOUT; //$NON-NLS-1$ public final static String CLASS_PREFERENCE = "android.preference.Preference"; //$NON-NLS-1$ public final static String CLASS_NAME_PREFERENCE_SCREEN = "PreferenceScreen"; //$NON-NLS-1$ public final static String CLASS_PREFERENCES = "android.preference." + CLASS_NAME_PREFERENCE_SCREEN; //$NON-NLS-1$ public final static String CLASS_PREFERENCEGROUP = "android.preference.PreferenceGroup"; //$NON-NLS-1$ public final static String CLASS_PARCELABLE = "android.os.Parcelable"; //$NON-NLS-1$ /** * MockView is part of the layoutlib bridge and used to display classes that * have no rendering in the graphical layout editor. */ public final static String CLASS_MOCK_VIEW = "com.android.layoutlib.bridge.MockView"; //$NON-NLS-1$ /** * Returns the appropriate name for the 'android' command, which is * 'android.bat' for Windows and 'android' for all other platforms. */ public static String androidCmdName() { String os = System.getProperty("os.name"); String cmd = "android"; if (os.startsWith("Windows")) { cmd += ".bat"; } return cmd; } /** * Returns the appropriate name for the 'mksdcard' command, which is * 'mksdcard.exe' for Windows and 'mkdsdcard' for all other platforms. */ public static String mkSdCardCmdName() { String os = System.getProperty("os.name"); String cmd = "mksdcard"; if (os.startsWith("Windows")) { cmd += ".exe"; } return cmd; } /** * Returns current platform * * @return one of {@link #PLATFORM_WINDOWS}, {@link #PLATFORM_DARWIN}, * {@link #PLATFORM_LINUX} or {@link #PLATFORM_UNKNOWN}. */ public static int currentPlatform() { String os = System.getProperty("os.name"); //$NON-NLS-1$ if (os.startsWith("Mac OS")) { //$NON-NLS-1$ return PLATFORM_DARWIN; } else if (os.startsWith("Windows")) { //$NON-NLS-1$ return PLATFORM_WINDOWS; } else if (os.startsWith("Linux")) { //$NON-NLS-1$ return PLATFORM_LINUX; } return PLATFORM_UNKNOWN; } }
apache-2.0
GeLiXin/hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
28150
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.service.component.instance; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.binding.RegistryPathUtils; import org.apache.hadoop.registry.client.binding.RegistryUtils; import org.apache.hadoop.registry.client.types.ServiceRecord; import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.client.api.NMClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.service.ServiceScheduler; import org.apache.hadoop.yarn.service.api.records.Artifact; import org.apache.hadoop.yarn.service.api.records.ContainerState; import org.apache.hadoop.yarn.service.component.Component; import org.apache.hadoop.yarn.service.component.ComponentEvent; import org.apache.hadoop.yarn.service.component.ComponentEventType; import org.apache.hadoop.yarn.service.component.ComponentRestartPolicy; import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus; import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders; import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher; import org.apache.hadoop.yarn.service.utils.ServiceUtils; import org.apache.hadoop.yarn.state.InvalidStateTransitionException; import org.apache.hadoop.yarn.state.SingleArcTransition; import org.apache.hadoop.yarn.state.StateMachine; import org.apache.hadoop.yarn.state.StateMachineFactory; import org.apache.hadoop.yarn.util.BoundedAppender; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.text.MessageFormat; import java.util.Date; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import static org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes.*; import static org.apache.hadoop.yarn.api.records.ContainerExitStatus.KILLED_BY_APPMASTER; import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.*; import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.*; public class ComponentInstance implements EventHandler<ComponentInstanceEvent>, Comparable<ComponentInstance> { private static final Logger LOG = LoggerFactory.getLogger(ComponentInstance.class); private static final String FAILED_BEFORE_LAUNCH_DIAG = "failed before launch"; private StateMachine<ComponentInstanceState, ComponentInstanceEventType, ComponentInstanceEvent> stateMachine; private Component component; private final ReadLock readLock; private final WriteLock writeLock; private ComponentInstanceId compInstanceId = null; private Path compInstanceDir; private Container container; private YarnRegistryViewForProviders yarnRegistryOperations; private FileSystem fs; private boolean timelineServiceEnabled = false; private ServiceTimelinePublisher serviceTimelinePublisher; private ServiceScheduler scheduler; private BoundedAppender diagnostics = new BoundedAppender(64 * 1024); private volatile ScheduledFuture containerStatusFuture; private volatile ContainerStatus status; private long containerStartedTime = 0; // This container object is used for rest API query private org.apache.hadoop.yarn.service.api.records.Container containerSpec; private String serviceVersion; private static final StateMachineFactory<ComponentInstance, ComponentInstanceState, ComponentInstanceEventType, ComponentInstanceEvent> stateMachineFactory = new StateMachineFactory<ComponentInstance, ComponentInstanceState, ComponentInstanceEventType, ComponentInstanceEvent>(INIT) .addTransition(INIT, STARTED, START, new ContainerStartedTransition()) .addTransition(INIT, INIT, STOP, // container failed before launching, nothing to cleanup from registry // This could happen if NMClient#startContainerAsync failed, container // will be completed, but COMP_INSTANCE is still at INIT. new ContainerStoppedTransition(true)) //From Running .addTransition(STARTED, INIT, STOP, new ContainerStoppedTransition()) .addTransition(STARTED, READY, BECOME_READY, new ContainerBecomeReadyTransition()) // FROM READY .addTransition(READY, STARTED, BECOME_NOT_READY, new ContainerBecomeNotReadyTransition()) .addTransition(READY, INIT, STOP, new ContainerStoppedTransition()) .addTransition(READY, UPGRADING, UPGRADE, new ContainerUpgradeTransition()) .addTransition(UPGRADING, UPGRADING, UPGRADE, new ContainerUpgradeTransition()) .addTransition(UPGRADING, READY, BECOME_READY, new ContainerBecomeReadyTransition()) .addTransition(UPGRADING, INIT, STOP, new ContainerStoppedTransition()) .installTopology(); public ComponentInstance(Component component, ComponentInstanceId compInstanceId) { this.stateMachine = stateMachineFactory.make(this); this.component = component; this.compInstanceId = compInstanceId; this.scheduler = component.getScheduler(); this.yarnRegistryOperations = component.getScheduler().getYarnRegistryOperations(); this.serviceTimelinePublisher = component.getScheduler().getServiceTimelinePublisher(); if (YarnConfiguration .timelineServiceV2Enabled(component.getScheduler().getConfig())) { this.timelineServiceEnabled = true; } ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); this.fs = scheduler.getContext().fs.getFileSystem(); } private static class ContainerStartedTransition extends BaseTransition { @Override public void transition(ComponentInstance compInstance, ComponentInstanceEvent event) { // Query container status for ip and host boolean cancelOnSuccess = true; if (compInstance.getCompSpec().getArtifact() != null && compInstance .getCompSpec().getArtifact().getType() == Artifact.TypeEnum.DOCKER) { // A docker container might get a different IP if the container is // relaunched by the NM, so we need to keep checking the status. // This is a temporary fix until the NM provides a callback for // container relaunch (see YARN-8265). cancelOnSuccess = false; } compInstance.containerStatusFuture = compInstance.scheduler.executorService.scheduleAtFixedRate( new ContainerStatusRetriever(compInstance.scheduler, event.getContainerId(), compInstance, cancelOnSuccess), 0, 1, TimeUnit.SECONDS); long containerStartTime = System.currentTimeMillis(); try { ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils .newContainerTokenIdentifier(compInstance.getContainer() .getContainerToken()); containerStartTime = containerTokenIdentifier.getCreationTime(); } catch (Exception e) { LOG.info("Could not get container creation time, using current time"); } org.apache.hadoop.yarn.service.api.records.Container container = new org.apache.hadoop.yarn.service.api.records.Container(); container.setId(event.getContainerId().toString()); container.setLaunchTime(new Date(containerStartTime)); container.setState(ContainerState.RUNNING_BUT_UNREADY); container.setBareHost(compInstance.getNodeId().getHost()); container.setComponentInstanceName(compInstance.getCompInstanceName()); if (compInstance.containerSpec != null) { // remove the previous container. compInstance.getCompSpec().removeContainer(compInstance.containerSpec); } compInstance.containerSpec = container; compInstance.getCompSpec().addContainer(container); compInstance.containerStartedTime = containerStartTime; compInstance.component.incRunningContainers(); compInstance.serviceVersion = compInstance.scheduler.getApp() .getVersion(); if (compInstance.timelineServiceEnabled) { compInstance.serviceTimelinePublisher .componentInstanceStarted(container, compInstance); } } } private static class ContainerBecomeReadyTransition extends BaseTransition { @Override public void transition(ComponentInstance compInstance, ComponentInstanceEvent event) { compInstance.containerSpec.setState(ContainerState.READY); if (compInstance.getState().equals(ComponentInstanceState.UPGRADING)) { compInstance.component.incContainersReady(false); compInstance.component.decContainersThatNeedUpgrade(); compInstance.serviceVersion = compInstance.component.getUpgradeEvent() .getUpgradeVersion(); ComponentEvent checkState = new ComponentEvent( compInstance.component.getName(), ComponentEventType.CHECK_STABLE); compInstance.scheduler.getDispatcher().getEventHandler().handle( checkState); } else { compInstance.component.incContainersReady(true); } if (compInstance.timelineServiceEnabled) { compInstance.serviceTimelinePublisher .componentInstanceBecomeReady(compInstance.containerSpec); } } } private static class ContainerBecomeNotReadyTransition extends BaseTransition { @Override public void transition(ComponentInstance compInstance, ComponentInstanceEvent event) { compInstance.containerSpec.setState(ContainerState.RUNNING_BUT_UNREADY); compInstance.component.decContainersReady(true); } } @VisibleForTesting static void handleComponentInstanceRelaunch( ComponentInstance compInstance, ComponentInstanceEvent event, boolean failureBeforeLaunch) { Component comp = compInstance.getComponent(); // Do we need to relaunch the service? boolean hasContainerFailed = hasContainerFailed(event.getStatus()); ComponentRestartPolicy restartPolicy = comp.getRestartPolicyHandler(); if (restartPolicy.shouldRelaunchInstance(compInstance, event.getStatus())) { // re-ask the failed container. comp.requestContainers(1); comp.reInsertPendingInstance(compInstance); StringBuilder builder = new StringBuilder(); builder.append(compInstance.getCompInstanceId()).append(": "); builder.append(event.getContainerId()).append(" completed. Reinsert back to pending list and requested "); builder.append("a new container.").append(System.lineSeparator()); builder.append(" exitStatus=").append(failureBeforeLaunch ? null : event.getStatus().getExitStatus()); builder.append(", diagnostics="); builder.append(failureBeforeLaunch ? FAILED_BEFORE_LAUNCH_DIAG : event.getStatus().getDiagnostics()); if (event.getStatus().getExitStatus() != 0) { LOG.error(builder.toString()); } else { LOG.info(builder.toString()); } } else { // When no relaunch, update component's #succeeded/#failed // instances. if (hasContainerFailed) { comp.markAsFailed(compInstance); } else { comp.markAsSucceeded(compInstance); } LOG.info(compInstance.getCompInstanceId() + (!hasContainerFailed ? " succeeded" : " failed") + " without retry, exitStatus=" + event.getStatus()); comp.getScheduler().terminateServiceIfAllComponentsFinished(); } } public static boolean hasContainerFailed(ContainerStatus containerStatus) { //Mark conainer as failed if we cant get its exit status i.e null? return containerStatus == null || containerStatus.getExitStatus() != ContainerExitStatus.SUCCESS; } private static class ContainerStoppedTransition extends BaseTransition { // whether the container failed before launched by AM or not. boolean failedBeforeLaunching = false; public ContainerStoppedTransition(boolean failedBeforeLaunching) { this.failedBeforeLaunching = failedBeforeLaunching; } public ContainerStoppedTransition() { this(false); } @Override public void transition(ComponentInstance compInstance, ComponentInstanceEvent event) { Component comp = compInstance.component; String containerDiag = compInstance.getCompInstanceId() + ": " + (failedBeforeLaunching ? FAILED_BEFORE_LAUNCH_DIAG : event.getStatus().getDiagnostics()); compInstance.diagnostics.append(containerDiag + System.lineSeparator()); compInstance.cancelContainerStatusRetriever(); if (compInstance.getState().equals(ComponentInstanceState.UPGRADING)) { compInstance.component.decContainersThatNeedUpgrade(); } if (compInstance.getState().equals(READY)) { compInstance.component.decContainersReady(true); } compInstance.component.decRunningContainers(); // Should we fail (terminate) the service? boolean shouldFailService = false; final ServiceScheduler scheduler = comp.getScheduler(); scheduler.getAmRMClient().releaseAssignedContainer( event.getContainerId()); // Check if it exceeds the failure threshold, but only if health threshold // monitor is not enabled if (!comp.isHealthThresholdMonitorEnabled() && comp.currentContainerFailure .get() > comp.maxContainerFailurePerComp) { String exitDiag = MessageFormat.format( "[COMPONENT {0}]: Failed {1} times, exceeded the limit - {2}. Shutting down now... " + System.lineSeparator(), comp.getName(), comp.currentContainerFailure.get(), comp.maxContainerFailurePerComp); compInstance.diagnostics.append(exitDiag); // append to global diagnostics that will be reported to RM. scheduler.getDiagnostics().append(containerDiag); scheduler.getDiagnostics().append(exitDiag); LOG.warn(exitDiag); shouldFailService = true; } if (!failedBeforeLaunching) { // clean up registry // If the container failed before launching, no need to cleanup registry, // because it was not registered before. // hdfs dir content will be overwritten when a new container gets started, // so no need remove. compInstance.scheduler.executorService .submit(() -> compInstance.cleanupRegistry(event.getContainerId())); if (compInstance.timelineServiceEnabled) { // record in ATS compInstance.serviceTimelinePublisher .componentInstanceFinished(event.getContainerId(), event.getStatus().getExitStatus(), containerDiag); } compInstance.containerSpec.setState(ContainerState.STOPPED); } // remove the failed ContainerId -> CompInstance mapping scheduler.removeLiveCompInstance(event.getContainerId()); // According to component restart policy, handle container restart // or finish the service (if all components finished) handleComponentInstanceRelaunch(compInstance, event, failedBeforeLaunching); if (shouldFailService) { scheduler.getTerminationHandler().terminate(-1); } } } private static class ContainerUpgradeTransition extends BaseTransition { @Override public void transition(ComponentInstance compInstance, ComponentInstanceEvent event) { compInstance.containerSpec.setState(ContainerState.UPGRADING); compInstance.component.decContainersReady(false); ComponentEvent upgradeEvent = compInstance.component.getUpgradeEvent(); compInstance.scheduler.getContainerLaunchService() .reInitCompInstance(compInstance.scheduler.getApp(), compInstance, compInstance.container, compInstance.component.createLaunchContext( upgradeEvent.getTargetSpec(), upgradeEvent.getUpgradeVersion())); } } public ComponentInstanceState getState() { this.readLock.lock(); try { return this.stateMachine.getCurrentState(); } finally { this.readLock.unlock(); } } /** * Returns the version of service at which the instance is at. */ public String getServiceVersion() { this.readLock.lock(); try { return this.serviceVersion; } finally { this.readLock.unlock(); } } /** * Returns the state of the container in the container spec. */ public ContainerState getContainerState() { this.readLock.lock(); try { return this.containerSpec.getState(); } finally { this.readLock.unlock(); } } @Override public void handle(ComponentInstanceEvent event) { try { writeLock.lock(); ComponentInstanceState oldState = getState(); try { stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { LOG.error(getCompInstanceId() + ": Invalid event " + event.getType() + " at " + oldState, e); } if (oldState != getState()) { LOG.info(getCompInstanceId() + " Transitioned from " + oldState + " to " + getState() + " on " + event.getType() + " event"); } } finally { writeLock.unlock(); } } public void setContainer(Container container) { this.container = container; this.compInstanceId.setContainerId(container.getId()); } public String getCompInstanceName() { return compInstanceId.getCompInstanceName(); } public ContainerStatus getContainerStatus() { return status; } public void updateContainerStatus(ContainerStatus status) { this.status = status; org.apache.hadoop.yarn.service.api.records.Container container = getCompSpec().getContainer(status.getContainerId().toString()); boolean doRegistryUpdate = true; if (container != null) { String existingIP = container.getIp(); String newIP = StringUtils.join(",", status.getIPs()); container.setIp(newIP); container.setHostname(status.getHost()); if (existingIP != null && newIP.equals(existingIP)) { doRegistryUpdate = false; } if (timelineServiceEnabled && doRegistryUpdate) { serviceTimelinePublisher.componentInstanceIPHostUpdated(container); } } if (doRegistryUpdate) { cleanupRegistry(status.getContainerId()); LOG.info( getCompInstanceId() + " new IP = " + status.getIPs() + ", host = " + status.getHost() + ", updating registry"); updateServiceRecord(yarnRegistryOperations, status); } } public String getCompName() { return compInstanceId.getCompName(); } public void setCompInstanceDir(Path dir) { this.compInstanceDir = dir; } public Component getComponent() { return component; } public Container getContainer() { return container; } public ComponentInstanceId getCompInstanceId() { return compInstanceId; } public NodeId getNodeId() { return this.container.getNodeId(); } private org.apache.hadoop.yarn.service.api.records.Component getCompSpec() { return component.getComponentSpec(); } private static class BaseTransition implements SingleArcTransition<ComponentInstance, ComponentInstanceEvent> { @Override public void transition(ComponentInstance compInstance, ComponentInstanceEvent event) { } } public ProbeStatus ping() { if (component.getProbe() == null) { ProbeStatus status = new ProbeStatus(); status.setSuccess(true); return status; } return component.getProbe().ping(this); } // Write service record into registry private void updateServiceRecord( YarnRegistryViewForProviders yarnRegistry, ContainerStatus status) { ServiceRecord record = new ServiceRecord(); String containerId = status.getContainerId().toString(); record.set(YARN_ID, containerId); record.description = getCompInstanceName(); record.set(YARN_PERSISTENCE, PersistencePolicies.CONTAINER); record.set(YARN_IP, status.getIPs().get(0)); record.set(YARN_HOSTNAME, status.getHost()); record.set(YARN_COMPONENT, component.getName()); try { yarnRegistry .putComponent(RegistryPathUtils.encodeYarnID(containerId), record); } catch (IOException e) { LOG.error( "Failed to update service record in registry: " + containerId + ""); } } // Called when user flexed down the container and ContainerStoppedTransition // is not executed in this case. // Release the container, dec running, // cleanup registry, hdfs dir, and send record to ATS public void destroy() { LOG.info(getCompInstanceId() + ": Flexed down by user, destroying."); diagnostics.append(getCompInstanceId() + ": Flexed down by user"); // update metrics if (getState() == STARTED) { component.decRunningContainers(); } if (getState() == READY) { component.decContainersReady(true); component.decRunningContainers(); } getCompSpec().removeContainer(containerSpec); if (container == null) { LOG.info(getCompInstanceId() + " no container is assigned when " + "destroying"); return; } ContainerId containerId = container.getId(); scheduler.removeLiveCompInstance(containerId); component.getScheduler().getAmRMClient() .releaseAssignedContainer(containerId); if (timelineServiceEnabled) { serviceTimelinePublisher.componentInstanceFinished(containerId, KILLED_BY_APPMASTER, diagnostics.toString()); } cancelContainerStatusRetriever(); scheduler.executorService.submit(() -> cleanupRegistryAndCompHdfsDir(containerId)); } private void cleanupRegistry(ContainerId containerId) { String cid = RegistryPathUtils.encodeYarnID(containerId.toString()); try { yarnRegistryOperations.deleteComponent(getCompInstanceId(), cid); } catch (IOException e) { LOG.error(getCompInstanceId() + ": Failed to delete registry", e); } } //TODO Maybe have a dedicated cleanup service. public void cleanupRegistryAndCompHdfsDir(ContainerId containerId) { cleanupRegistry(containerId); try { if (compInstanceDir != null && fs.exists(compInstanceDir)) { boolean deleted = fs.delete(compInstanceDir, true); if (!deleted) { LOG.error(getCompInstanceId() + ": Failed to delete component instance dir: " + compInstanceDir); } else { LOG.info(getCompInstanceId() + ": Deleted component instance dir: " + compInstanceDir); } } } catch (IOException e) { LOG.warn(getCompInstanceId() + ": Failed to delete directory", e); } } // Query container status until ip and hostname are available and update // the service record into registry service private static class ContainerStatusRetriever implements Runnable { private ContainerId containerId; private NodeId nodeId; private NMClient nmClient; private ComponentInstance instance; private boolean cancelOnSuccess; ContainerStatusRetriever(ServiceScheduler scheduler, ContainerId containerId, ComponentInstance instance, boolean cancelOnSuccess) { this.containerId = containerId; this.nodeId = instance.getNodeId(); this.nmClient = scheduler.getNmClient().getClient(); this.instance = instance; this.cancelOnSuccess = cancelOnSuccess; } @Override public void run() { ContainerStatus status = null; try { status = nmClient.getContainerStatus(containerId, nodeId); } catch (Exception e) { if (e instanceof YarnException) { throw new YarnRuntimeException( instance.compInstanceId + " Failed to get container status on " + nodeId + " , cancelling.", e); } LOG.error(instance.compInstanceId + " Failed to get container status on " + nodeId + ", will try again", e); return; } if (ServiceUtils.isEmpty(status.getIPs()) || ServiceUtils .isUnset(status.getHost())) { return; } instance.updateContainerStatus(status); if (cancelOnSuccess) { LOG.info( instance.compInstanceId + " IP = " + status.getIPs() + ", host = " + status.getHost() + ", cancel container status retriever"); instance.containerStatusFuture.cancel(false); } } } private void cancelContainerStatusRetriever() { if (containerStatusFuture != null && !containerStatusFuture.isDone()) { containerStatusFuture.cancel(true); } } public String getHostname() { String domain = getComponent().getScheduler().getConfig() .get(RegistryConstants.KEY_DNS_DOMAIN); String hostname; if (domain == null || domain.isEmpty()) { hostname = MessageFormat .format("{0}.{1}.{2}", getCompInstanceName(), getComponent().getContext().service.getName(), RegistryUtils.currentUser()); } else { hostname = MessageFormat .format("{0}.{1}.{2}.{3}", getCompInstanceName(), getComponent().getContext().service.getName(), RegistryUtils.currentUser(), domain); } return hostname; } @Override public int compareTo(ComponentInstance to) { return getCompInstanceId().compareTo(to.getCompInstanceId()); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ComponentInstance instance = (ComponentInstance) o; if (containerStartedTime != instance.containerStartedTime) return false; return compInstanceId.equals(instance.compInstanceId); } @Override public int hashCode() { int result = compInstanceId.hashCode(); result = 31 * result + (int) (containerStartedTime ^ (containerStartedTime >>> 32)); return result; } /** * Returns container spec. */ public org.apache.hadoop.yarn.service.api.records .Container getContainerSpec() { readLock.lock(); try { return containerSpec; } finally { readLock.unlock(); } } }
apache-2.0
dayubai/GuessEngine
src/main/java/com/dayu/lotto/web/controller/OZLottoController.java
3582
package com.dayu.lotto.web.controller; import java.io.IOException; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.multipart.MultipartFile; import org.springframework.web.servlet.ModelAndView; import com.dayu.lotto.algorithm.WeightedSelector; import com.dayu.lotto.entity.OZLottoPrediction; import com.dayu.lotto.entity.OZLottoResult; import com.dayu.lotto.entity.OZLottoTicket; import com.dayu.lotto.service.LottoService; @Controller public class OZLottoController { @Autowired private LottoService<OZLottoTicket, OZLottoResult, OZLottoPrediction> ozLottoService; @RequestMapping(value="/ozLotto", method=RequestMethod.GET) public ModelAndView ozLotto() { ModelAndView modelAndView = new ModelAndView("ozLotto"); modelAndView.addObject("tickets", ozLottoService.findAllTickets()); modelAndView.addObject("result", ozLottoService.findLast(1).get(0)); return modelAndView; } @RequestMapping(value="/ozLotto/ticket/{ticketId}", method=RequestMethod.GET) public ModelAndView saturdayLottoTicket(@PathVariable String ticketId) { ModelAndView modelAndView = new ModelAndView("ticket/ozLottoTicket"); modelAndView.addObject("ticket", ozLottoService.findByTicketId(ticketId)); return modelAndView; } @RequestMapping(value="/ozLotto/ticket/new", method=RequestMethod.POST) public ModelAndView saturdayLottoTicket(@RequestParam("draws") String draws, @RequestParam("games") String games) { ModelAndView modelAndView = new ModelAndView("ticket/ozLottoTicket"); String ticketId = ozLottoService.draw(new WeightedSelector(), Integer.parseInt(draws), Integer.parseInt(games)); modelAndView.addObject("ticket", ozLottoService.findByTicketId(ticketId)); return modelAndView; } @RequestMapping(value="/ozLotto/uploadResult", method=RequestMethod.POST) public ModelAndView ozLottoUploadResult(@RequestParam("file") MultipartFile file) { try { ozLottoService.uploadResults(file.getInputStream()); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } return ozLotto(); } @RequestMapping(value="/ozLotto/rfpredict/run", method=RequestMethod.POST) public ModelAndView ozLottoRfPredict(@RequestParam("draw") String draw) { ozLottoService.generateNumberPredictions(draw); return ozLottoRfPredictDetailView(draw); } @RequestMapping(value="/ozLotto/rfpredict/draw/{draw}", method=RequestMethod.GET) public ModelAndView ozLottoRfPredictDetailView(@PathVariable("draw") String draw) { ModelAndView modelAndView = new ModelAndView("rfpredict/detail/ozLottoPredictionDetail"); modelAndView.addObject("prediction", ozLottoService.findForestRandomPredictionByDraw(Integer.parseInt(draw))); return modelAndView; } @RequestMapping(value="/ozLotto/rfpredict", method=RequestMethod.GET) public ModelAndView ozLottoRfPredictView() { ModelAndView modelAndView = new ModelAndView("rfpredict/ozLottoPrediction"); modelAndView.addObject("predictions", ozLottoService.findAllForestRandomPredictions()); return modelAndView; } }
apache-2.0
dma-dk/MsiProxy
msiproxy-model/src/main/java/dk/dma/msiproxy/model/msi/SeriesIdType.java
848
/* Copyright (c) 2011 Danish Maritime Authority * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this library. If not, see <http://www.gnu.org/licenses/>. */ package dk.dma.msiproxy.model.msi; /** * The type of the message series identifier */ public enum SeriesIdType { MSI, NM }
apache-2.0
jqno/equalsverifier
equalsverifier-test-core/src/test/java/nl/jqno/equalsverifier/integration/operational/ConfiguredEqualsVerifierMultipleTest.java
7256
package nl.jqno.equalsverifier.integration.operational; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.Arrays; import java.util.List; import nl.jqno.equalsverifier.ConfiguredEqualsVerifier; import nl.jqno.equalsverifier.EqualsVerifier; import nl.jqno.equalsverifier.EqualsVerifierReport; import nl.jqno.equalsverifier.Warning; import nl.jqno.equalsverifier.testhelpers.ExpectedException; import nl.jqno.equalsverifier.testhelpers.packages.correct.A; import nl.jqno.equalsverifier.testhelpers.types.FinalMethodsPoint; import nl.jqno.equalsverifier.testhelpers.types.GetClassPoint; import nl.jqno.equalsverifier.testhelpers.types.MutablePoint; import nl.jqno.equalsverifier.testhelpers.types.PointContainer; import nl.jqno.equalsverifier.testhelpers.types.RecursiveTypeHelper.RecursiveType; import nl.jqno.equalsverifier.testhelpers.types.RecursiveTypeHelper.RecursiveTypeContainer; import nl.jqno.equalsverifier.testhelpers.types.TypeHelper.DoubleGenericContainer; import nl.jqno.equalsverifier.testhelpers.types.TypeHelper.DoubleGenericContainerContainer; import nl.jqno.equalsverifier.testhelpers.types.TypeHelper.SingleGenericContainer; import nl.jqno.equalsverifier.testhelpers.types.TypeHelper.SingleGenericContainerContainer; import org.junit.jupiter.api.Test; public class ConfiguredEqualsVerifierMultipleTest { @Test public void succeed_whenCallingForPackage_givenAllClassesInPackageAreCorrect() { EqualsVerifier .configure() .forPackage("nl.jqno.equalsverifier.testhelpers.packages.correct") .verify(); } @Test public void succeed_whenEqualsVerifierUsesGetClassInsteadOfInstanceOf_givenUsingGetClassIsPreConfigured_forIterableOverload() { List<EqualsVerifierReport> reports = EqualsVerifier .configure() .usingGetClass() .forClasses(Arrays.asList(GetClassPoint.class, FinalMethodsPoint.class)) .report(); assertTrue(reports.get(0).isSuccessful()); assertFalse(reports.get(1).isSuccessful()); } @Test public void succeed_whenEqualsUsesGetClassInsteadOfInstanceOf_givenUsingGetClassIsPreConfigured_forVarargOverload() { List<EqualsVerifierReport> reports = EqualsVerifier .configure() .usingGetClass() .forClasses(GetClassPoint.class, FinalMethodsPoint.class) .report(); assertTrue(reports.get(0).isSuccessful()); assertFalse(reports.get(1).isSuccessful()); } @Test public void suppressedWarningsArePassedOn() { EqualsVerifier .configure() .suppress(Warning.STRICT_INHERITANCE) .forClasses(PointContainer.class, A.class) .verify(); } @Test public void sanity_fail_whenTypeIsRecursive() { ExpectedException .when(() -> EqualsVerifier.forClasses(RecursiveType.class, A.class).verify()) .assertFailure() .assertMessageContains("Recursive datastructure"); } @Test public void succeed_whenTypeIsRecursive_givenPrefabValuesArePreconfigured() { EqualsVerifier .configure() .withPrefabValues( RecursiveType.class, new RecursiveType(null), new RecursiveType(new RecursiveType(null)) ) .forClasses(RecursiveTypeContainer.class, A.class) .verify(); } @Test public void sanity_fail_whenSingleGenericTypeIsRecursive() { ExpectedException .when(() -> EqualsVerifier.forClasses(SingleGenericContainerContainer.class, A.class).verify() ) .assertFailure() .assertMessageContains("Recursive datastructure"); } @Test public void succeed_whenSingleGenericTypeIsRecursive_givenGenericPrefabValuesArePreconfigured() { EqualsVerifier .configure() .withGenericPrefabValues(SingleGenericContainer.class, SingleGenericContainer::new) .forClasses(SingleGenericContainerContainer.class, A.class) .verify(); } @Test public void sanity_fail_whenDoubleGenericTypeIsRecursive() { ExpectedException .when(() -> EqualsVerifier.forClasses(DoubleGenericContainerContainer.class, A.class).verify() ) .assertFailure() .assertMessageContains("Recursive datastructure"); } @Test public void succeed_whenDoubleGenericTypeIsRecursive_givenGenericPrefabValuesArePreconfigured() { EqualsVerifier .configure() .withGenericPrefabValues(DoubleGenericContainer.class, DoubleGenericContainer::new) .forClasses(DoubleGenericContainerContainer.class, A.class) .verify(); } @Test public void succeed_whenConfigurationIsShared() { ConfiguredEqualsVerifier ev = EqualsVerifier .configure() .withGenericPrefabValues(SingleGenericContainer.class, SingleGenericContainer::new) .withGenericPrefabValues(DoubleGenericContainer.class, DoubleGenericContainer::new); ev.forClasses(SingleGenericContainerContainer.class, A.class).verify(); ev.forClasses(DoubleGenericContainerContainer.class, A.class).verify(); } @Test public void individuallySuppressedWarningsAreNotAddedGlobally() { ConfiguredEqualsVerifier ev = EqualsVerifier .configure() .suppress(Warning.STRICT_INHERITANCE); // should succeed ev.forClasses(MutablePoint.class, A.class).suppress(Warning.NONFINAL_FIELDS).verify(); // NONFINAL_FIELDS is not added to configuration, so should fail ExpectedException .when(() -> ev.forClasses(MutablePoint.class, A.class).verify()) .assertFailure() .assertMessageContains("Mutability"); } @Test public void individuallyAddedPrefabValuesAreNotAddedGlobally() { ConfiguredEqualsVerifier ev = EqualsVerifier.configure(); // should succeed ev .forClasses(SingleGenericContainerContainer.class, A.class) .withGenericPrefabValues(SingleGenericContainer.class, SingleGenericContainer::new) .verify(); // PrefabValues are not added to configuration, so should fail ExpectedException .when(() -> ev.forClasses(SingleGenericContainerContainer.class, A.class).verify()) .assertFailure() .assertMessageContains("Recursive datastructure"); } @Test public void succeed_whenFieldsAreNonfinalAndClassIsNonfinal_givenTwoWarningsAreSuppressedButInDifferentPlaces() { EqualsVerifier .configure() .suppress(Warning.STRICT_INHERITANCE) .forClasses(MutablePoint.class, A.class) .suppress(Warning.NONFINAL_FIELDS) .verify(); EqualsVerifier .configure() .suppress(Warning.NONFINAL_FIELDS) .forClasses(MutablePoint.class, A.class) .suppress(Warning.STRICT_INHERITANCE) .verify(); } }
apache-2.0
delightsoft/DSCommon
app/code/docflow/controlflow/DocflowJob.java
3604
package code.docflow.controlflow; // // Author: Alexey Zorkaltsev (alexey@zorkaltsev.com) // import code.docflow.action.Transaction; import code.docflow.docs.Document; import code.docflow.users.CurrentUser; import play.Logger; import play.db.jpa.JPAPlugin; import play.db.jpa.NoTransaction; import play.exceptions.JPAException; import play.exceptions.UnexpectedException; import play.jobs.Job; /** * Extends Play Job, to support DSCommon rules: 1. Keep working ander user, who instantiated this job; 2. In any * case (Ok or Exception) result will be returned as Result object. * <p/> * Child must overide doDocflowJob method. * <p/> * Job should be started from context where user is authenticated, so all the work job is done * being sourced by proper user. */ @NoTransaction public abstract class DocflowJob<V> extends Job<V> { private final Document user; private final String userRoles; private Result result; public static class Context { boolean withinScope; } private final static ThreadLocal<Context> context = new ThreadLocal<Context>() { @Override protected Context initialValue() { return new Context(); } }; public static boolean isWithinScope() { return context.get().withinScope; } public DocflowJob() { final CurrentUser currentUser = CurrentUser.getInstance(); user = currentUser.getUser() == null ? CurrentUser.ANONYMOUS_USER : currentUser.getUser(); userRoles = currentUser.getUserRoles() == null ? CurrentUser.ANONYMOUS_USER.roles : currentUser.getUserRoles(); } protected DocflowJob(Document user, String userRoles) { this.user = user; this.userRoles = userRoles; } /** * Returns result of the job. Result is available only after job completion. */ public final Result getResult() { return result; } public final void execute(final Result result) { final Result localResult = this.result = new Result(); try { this.doDocflowJob(localResult); result.append(localResult); } catch (Exception e) { result.append(localResult); throw new UnexpectedException(e); } } @Override public V doJobWithResult() throws Exception { final Result result = new Result(); V res = null; final CurrentUser currentUser = CurrentUser.getInstance(); final boolean prevInActionScopeValue = currentUser.inActionScope; try { context.get().withinScope = true; currentUser.inActionScope = true; currentUser.setUser(user, userRoles); res = doDocflowJob(result); if (result.isError()) res = null; try { JPAPlugin.closeTx(result.isError()); // commit or rollback depending on result.isError() } catch (JPAException e1) { // then no transaction is opened at the moment // it says 'The JPA context is not initialized...', so we just ignor this } } catch (Throwable e) { result.addException(e); Transaction.rollbackPreviousTransactionIfOpened(); Logger.error("Job '%s': Error:\n%s", this.getClass().getName(), result.toString()); } finally { currentUser.inActionScope = prevInActionScopeValue; context.get().withinScope = false; } this.result = result; return res; } public abstract V doDocflowJob(Result result) throws Exception; }
apache-2.0
hardfish/justTest
azurecompute/src/main/java/org/jclouds/azurecompute/features/SubscriptionApi.java
1968
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.azurecompute.features; import static org.jclouds.Fallbacks.EmptyListOnNotFoundOr404; import java.util.List; import javax.inject.Named; import javax.ws.rs.Consumes; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.core.MediaType; import org.jclouds.azurecompute.domain.RoleSize; import org.jclouds.azurecompute.xml.ListRoleSizesHandler; import org.jclouds.rest.annotations.Fallback; import org.jclouds.rest.annotations.Headers; import org.jclouds.rest.annotations.XMLResponseParser; /** * The Service Management API includes operations for retrieving information about a subscription. * * @see <a href="http://msdn.microsoft.com/en-us/library/gg715315">docs</a> */ @Headers(keys = "x-ms-version", values = "{jclouds.api-version}") @Consumes(MediaType.APPLICATION_XML) public interface SubscriptionApi { /** * The List Role Sizes operation lists the role sizes that are available under the specified subscription. */ @Named("ListRoleSizes") @GET @Path("/rolesizes") @XMLResponseParser(ListRoleSizesHandler.class) @Fallback(EmptyListOnNotFoundOr404.class) List<RoleSize> listRoleSizes(); }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-medialive/src/main/java/com/amazonaws/services/medialive/model/transform/PipelinePauseStateSettingsMarshaller.java
2063
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.medialive.model.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.medialive.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * PipelinePauseStateSettingsMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class PipelinePauseStateSettingsMarshaller { private static final MarshallingInfo<String> PIPELINEID_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("pipelineId").build(); private static final PipelinePauseStateSettingsMarshaller instance = new PipelinePauseStateSettingsMarshaller(); public static PipelinePauseStateSettingsMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(PipelinePauseStateSettings pipelinePauseStateSettings, ProtocolMarshaller protocolMarshaller) { if (pipelinePauseStateSettings == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(pipelinePauseStateSettings.getPipelineId(), PIPELINEID_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
tjkrell/MrGEO
mrgeo-core/src/main/java/org/mrgeo/data/image/MrsImageOutputFormatProvider.java
1397
/* * Copyright 2009-2014 DigitalGlobe, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and limitations under the License. */ package org.mrgeo.data.image; import java.io.IOException; import org.apache.hadoop.mapreduce.Job; import org.mrgeo.data.DataProviderException; import org.mrgeo.data.tile.TiledOutputFormatContext; import org.mrgeo.data.tile.TiledOutputFormatProvider; /** * Data plugins that wish to provide storage for image pyramids must * include a sub-class of this class. */ public abstract class MrsImageOutputFormatProvider implements TiledOutputFormatProvider { protected TiledOutputFormatContext context; public MrsImageOutputFormatProvider(TiledOutputFormatContext context) { this.context = context; } /** * Sub-classes that override this method must call super.setupJob(job). */ @Override public void setupJob(Job job) throws DataProviderException, IOException { } }
apache-2.0
thescouser89/pnc
rest/src/main/java/org/jboss/pnc/rest/endpoints/ProjectEndpointImpl.java
3238
/** * JBoss, Home of Professional Open Source. * Copyright 2014-2020 Red Hat, Inc., and individual contributors * as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.pnc.rest.endpoints; import org.jboss.pnc.dto.Build; import org.jboss.pnc.dto.BuildConfiguration; import org.jboss.pnc.dto.Project; import org.jboss.pnc.dto.ProjectRef; import org.jboss.pnc.dto.response.Page; import org.jboss.pnc.facade.providers.api.BuildConfigurationProvider; import org.jboss.pnc.facade.providers.api.BuildPageInfo; import org.jboss.pnc.facade.providers.api.BuildProvider; import org.jboss.pnc.facade.providers.api.ProjectProvider; import org.jboss.pnc.rest.api.endpoints.ProjectEndpoint; import org.jboss.pnc.rest.api.parameters.BuildsFilterParameters; import org.jboss.pnc.rest.api.parameters.PageParameters; import javax.annotation.PostConstruct; import javax.enterprise.context.ApplicationScoped; import javax.inject.Inject; @ApplicationScoped public class ProjectEndpointImpl implements ProjectEndpoint { @Inject private ProjectProvider projectProvider; @Inject private BuildConfigurationProvider buildConfigurationProvider; @Inject private BuildProvider buildProvider; private EndpointHelper<Integer, Project, ProjectRef> endpointHelper; @PostConstruct public void init() { endpointHelper = new EndpointHelper<>(Project.class, projectProvider); } @Override public Page<Project> getAll(PageParameters pageParameters) { return endpointHelper.getAll(pageParameters); } @Override public Project createNew(Project project) { return endpointHelper.create(project); } @Override public Project getSpecific(String id) { return endpointHelper.getSpecific(id); } @Override public void update(String id, Project project) { endpointHelper.update(id, project); } @Override public Project patchSpecific(String id, Project project) { return endpointHelper.update(id, project); } @Override public Page<BuildConfiguration> getBuildConfigurations(String id, PageParameters pageParameters) { return buildConfigurationProvider.getBuildConfigurationsForProject( pageParameters.getPageIndex(), pageParameters.getPageSize(), pageParameters.getSort(), pageParameters.getQ(), id); } @Override public Page<Build> getBuilds(String id, PageParameters page, BuildsFilterParameters filter) { BuildPageInfo pageInfo = BuildEndpointImpl.toBuildPageInfo(page, filter); return buildProvider.getBuildsForProject(pageInfo, id); } }
apache-2.0
nickman/heliosJMX
src/main/java/com/heliosapm/jmx/remote/tunnel/TunnelHandle.java
1651
/** * Helios, OpenSource Monitoring * Brought to you by the Helios Development Group * * Copyright 2007, Helios Development Group and individual contributors * as indicated by the @author tags. See the copyright.txt file in the * distribution for a full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. * */ package com.heliosapm.jmx.remote.tunnel; import java.io.Closeable; /** * <p>Title: TunnelHandle</p> * <p>Description: A closeable for tunnels that also provides the local sshPort side of the tunnel</p> * <p>Company: Helios Development Group LLC</p> * @author Whitehead (nwhitehead AT heliosdev DOT org) * <p><code>com.heliosapm.jmx.remote.tunnel.TunnelHandle</code></p> */ public interface TunnelHandle extends Closeable { /** * Returns the local side sshPort of the tunnel * @return the local side sshPort of the tunnel */ public int getLocalPort(); }
apache-2.0
PSUdaemon/traffic_control
traffic_router/core/src/main/java/com/comcast/cdn/traffic_control/traffic_router/core/request/RequestMatcher.java
3765
package com.comcast.cdn.traffic_control.traffic_router.core.request; import com.comcast.cdn.traffic_control.traffic_router.core.ds.DeliveryServiceMatcher.Type; import com.comcast.cdn.traffic_control.traffic_router.core.util.ComparableStringByLength; import java.util.regex.Matcher; import java.util.regex.Pattern; public class RequestMatcher implements Comparable<RequestMatcher> { // This "meta" pattern is used to strip away all leading and trailing non-word characters except '.' and '-' from the original regex private static final String META_REGEX = "([\\W])*([\\w-\\./]+).*"; private static final Pattern metaPattern = Pattern.compile(META_REGEX); private final Type type; private final Pattern pattern; private String requestHeader = ""; private final ComparableStringByLength comparableRegex; public RequestMatcher(final Type type, final String regex, final String requestHeader) { if (type == Type.HEADER && (requestHeader == null || requestHeader.isEmpty())) { throw new IllegalArgumentException("Request Header name must be supplied for type HEADER"); } this.type = type; this.requestHeader = requestHeader; pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); final Matcher matcher = metaPattern.matcher(regex); matcher.matches(); comparableRegex = new ComparableStringByLength(matcher.group(2)); } public RequestMatcher(final Type type, final String regex) { this(type, regex, ""); } public boolean matches(final Request request) { final String target = getTarget(request); if (target == null) { return false; } return pattern.matcher(target).matches(); } private String getTarget(final Request request) { if (type == Type.HOST) { return request.getHostname(); } if (!(request instanceof HTTPRequest)) { return null; } final HTTPRequest httpRequest = (HTTPRequest) request; if (type == Type.HEADER) { if (httpRequest.getHeaders() != null) { return httpRequest.getHeaders().get(requestHeader); } return null; } if (type == Type.PATH) { if (httpRequest.getQueryString() == null) { return httpRequest.getPath(); } return httpRequest.getPath() + "?" + httpRequest.getQueryString(); } return null; } @Override public int compareTo(final RequestMatcher other) { if (this == other || this.equals(other)) { return 0; } return this.comparableRegex.compareTo(other.comparableRegex); } @Override @SuppressWarnings({"PMD.IfStmtsMustUseBraces" , "PMD.CyclomaticComplexity"}) public boolean equals(final Object other) { if (this == other) return true; if (other == null || getClass() != other.getClass()) return false; final RequestMatcher that = (RequestMatcher) other; if (type != that.type) return false; if (pattern != null ? !pattern.pattern().equals(that.pattern.pattern()) : that.pattern != null) return false; if (requestHeader != null ? !requestHeader.equals(that.requestHeader) : that.requestHeader != null) return false; return !(comparableRegex != null ? !comparableRegex.equals(that.comparableRegex) : that.comparableRegex != null); } @Override @SuppressWarnings({"PMD.CyclomaticComplexity", "PMD.NPathComplexity"}) public int hashCode() { int result = type != null ? type.hashCode() : 0; result = 31 * result + (pattern != null ? pattern.pattern().hashCode() : 0); result = 31 * result + (requestHeader != null ? requestHeader.hashCode() : 0); result = 31 * result + (comparableRegex != null ? comparableRegex.hashCode() : 0); return result; } @Override public String toString() { return "RequestMatcher{" + "type=" + type + ", pattern=" + pattern + ", requestHeader='" + requestHeader + '\'' + ", comparableRegex=" + comparableRegex + '}'; } }
apache-2.0
thrawn-sh/subversion
src/test/java/de/shadowhunt/subversion/internal/httpv1/v1_2/RepositoryLogReadOnlyIT.java
1131
/** * Copyright © 2013-2018 shadowhunt (dev@shadowhunt.de) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.shadowhunt.subversion.internal.httpv1.v1_2; import de.shadowhunt.subversion.internal.AbstractRepositoryLogIT; import org.junit.BeforeClass; public class RepositoryLogReadOnlyIT extends AbstractRepositoryLogIT { private static final Helper HELPER = new Helper(); @BeforeClass public static void prepare() throws Exception { HELPER.pullCurrentDumpData(); } public RepositoryLogReadOnlyIT() { super(HELPER.getRepositoryReadOnly(), HELPER.getRoot()); } }
apache-2.0
plasma147/lorraine-dto-test-gen
src/test/java/uk/co/optimisticpanda/gtest/dto/ExampleTest2.java
3292
/* * Copyright 2009 Andy Lee. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package uk.co.optimisticpanda.gtest.dto; import java.util.List; import junit.framework.TestCase; import uk.co.optimisticpanda.gtest.dto.defaultfill.DefaultValueGenerator; import uk.co.optimisticpanda.gtest.dto.defaultfill.ValueGenerator; import uk.co.optimisticpanda.gtest.dto.defaultfill.enggen.DtoGenerationEngine; import uk.co.optimisticpanda.gtest.dto.defaultfill.insgen.InstanceGenerator; import uk.co.optimisticpanda.gtest.dto.test.utils.TestDto3; /** * @author Andy Lee * */ public class ExampleTest2 extends TestCase { /** * @throws Exception */ public void testBasicExampleGeneration() throws Exception { //Register the utils context to use ognl for introspection TestUtilsContext.useOgnl(); //Build the intance generator (which is responsible for creating new dtos of a specific type). InstanceGenerator<TestDto3> generator = InstanceGenerator.create(TestDto3.class); //Create a generation engine that is responsible for producing the dtos and applying visitors to them. DtoGenerationEngine<TestDto3> engine = new DtoGenerationEngine<>(generator); //Create and return 5 dtos List<TestDto3> dtos = engine.collect(5); assertEquals(5, dtos.size()); //When no ValueGeneratorCache is specified then a default cache will be used. //This provides default values for primitive types, in the case of strings the value "DEFAULT" assertEquals("DEFAULT", dtos.get(0).getName()); assertEquals("DEFAULT", dtos.get(0).getDescription()); } /** * @throws Exception */ public void testExample() throws Exception { //Register the utils context to use ognl for introspection TestUtilsContext.useOgnl(); //Create a value generator cache ValueGenerator cache = new DefaultValueGenerator(); // //register the string generator against properties of type string called name. //Note you can register generators against different criteria (See ValueGeneratorCache.java) cache.registerAPropertyNameAndTypeGenerator("name", String.class, () -> "DEFAULT_EXAMPLE_NAME"); //Build the generator with the custom cache. InstanceGenerator<TestDto3> generator = InstanceGenerator.create(TestDto3.class, cache); //Create a generation engine that is responsible for producing the dtos and applying visitors to them. DtoGenerationEngine<TestDto3> engine = new DtoGenerationEngine<TestDto3>(generator); //Create and return 1 dto List<TestDto3> dtos = engine.collect(1); assertEquals(1, dtos.size()); //Name has been set to the non-default value assertEquals("DEFAULT_EXAMPLE_NAME", dtos.get(0).getName()); //Description is still set to the default value assertEquals("DEFAULT", dtos.get(0).getDescription()); } }
apache-2.0
daedafusion/aniketos
aniketos-core/src/main/java/com/daedafusion/aniketos/framework/ServerTokenExchange.java
184
package com.daedafusion.aniketos.framework; /** * Created by mphilpot on 1/22/15. */ public interface ServerTokenExchange { boolean isTokenValidNoSession(String tokenString); }
apache-2.0
anjuncc/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
163447
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.BitSet; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; import javax.management.ObjectName; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult; import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Keeps information related to the blocks stored in the Hadoop cluster. */ @InterfaceAudience.Private public class BlockManager implements BlockStatsMXBean { public static final Logger LOG = LoggerFactory.getLogger(BlockManager.class); public static final Logger blockLog = NameNode.blockStateChangeLog; private static final String QUEUE_REASON_CORRUPT_STATE = "it has the wrong state or generation stamp"; private static final String QUEUE_REASON_FUTURE_GENSTAMP = "generation stamp is in the future"; private final Namesystem namesystem; private final DatanodeManager datanodeManager; private final HeartbeatManager heartbeatManager; private final BlockTokenSecretManager blockTokenSecretManager; private final PendingDataNodeMessages pendingDNMessages = new PendingDataNodeMessages(); private volatile long pendingReplicationBlocksCount = 0L; private volatile long corruptReplicaBlocksCount = 0L; private volatile long underReplicatedBlocksCount = 0L; private volatile long scheduledReplicationBlocksCount = 0L; /** flag indicating whether replication queues have been initialized */ private boolean initializedReplQueues; private final AtomicLong excessBlocksCount = new AtomicLong(0L); private final AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L); private final long startupDelayBlockDeletionInMs; private final BlockReportLeaseManager blockReportLeaseManager; private ObjectName mxBeanName; /** Used by metrics */ public long getPendingReplicationBlocksCount() { return pendingReplicationBlocksCount; } /** Used by metrics */ public long getUnderReplicatedBlocksCount() { return underReplicatedBlocksCount; } /** Used by metrics */ public long getCorruptReplicaBlocksCount() { return corruptReplicaBlocksCount; } /** Used by metrics */ public long getScheduledReplicationBlocksCount() { return scheduledReplicationBlocksCount; } /** Used by metrics */ public long getPendingDeletionBlocksCount() { return invalidateBlocks.numBlocks(); } /** Used by metrics */ public long getStartupDelayBlockDeletionInMs() { return startupDelayBlockDeletionInMs; } /** Used by metrics */ public long getExcessBlocksCount() { return excessBlocksCount.get(); } /** Used by metrics */ public long getPostponedMisreplicatedBlocksCount() { return postponedMisreplicatedBlocksCount.get(); } /** Used by metrics */ public int getPendingDataNodeMessageCount() { return pendingDNMessages.count(); } /**replicationRecheckInterval is how often namenode checks for new replication work*/ private final long replicationRecheckInterval; /** * Mapping: Block -> { BlockCollection, datanodes, self ref } * Updated only in response to client-sent information. */ final BlocksMap blocksMap; /** Replication thread. */ final Daemon replicationThread = new Daemon(new ReplicationMonitor()); /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */ final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); /** * Blocks to be invalidated. * For a striped block to invalidate, we should track its individual internal * blocks. */ private final InvalidateBlocks invalidateBlocks; /** * After a failover, over-replicated blocks may not be handled * until all of the replicas have done a block report to the * new active. This is to make sure that this NameNode has been * notified of all block deletions that might have been pending * when the failover happened. */ private final LightWeightHashSet<Block> postponedMisreplicatedBlocks = new LightWeightHashSet<>(); /** * Maps a StorageID to the set of blocks that are "extra" for this * DataNode. We'll eventually remove these extras. */ public final Map<String, LightWeightLinkedSet<BlockInfo>> excessReplicateMap = new HashMap<>(); /** * Store set of Blocks that need to be replicated 1 or more times. * We also store pending replication-orders. */ public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); @VisibleForTesting final PendingReplicationBlocks pendingReplications; /** The maximum number of replicas allowed for a block */ public final short maxReplication; /** * The maximum number of outgoing replication streams a given node should have * at one time considering all but the highest priority replications needed. */ int maxReplicationStreams; /** * The maximum number of outgoing replication streams a given node should have * at one time. */ int replicationStreamsHardLimit; /** Minimum copies needed or else write is disallowed */ public final short minReplication; /** Default number of replicas */ public final int defaultReplication; /** value returned by MAX_CORRUPT_FILES_RETURNED */ final int maxCorruptFilesReturned; final float blocksInvalidateWorkPct; final int blocksReplWorkMultiplier; /** variable to enable check for enough racks */ final boolean shouldCheckForEnoughRacks; // whether or not to issue block encryption keys. final boolean encryptDataTransfer; // Max number of blocks to log info about during a block report. private final long maxNumBlocksToLog; /** * When running inside a Standby node, the node may receive block reports * from datanodes before receiving the corresponding namespace edits from * the active NameNode. Thus, it will postpone them for later processing, * instead of marking the blocks as corrupt. */ private boolean shouldPostponeBlocksFromFuture = false; /** * Process replication queues asynchronously to allow namenode safemode exit * and failover to be faster. HDFS-5496 */ private Daemon replicationQueuesInitializer = null; /** * Number of blocks to process asychronously for replication queues * initialization once aquired the namesystem lock. Remaining blocks will be * processed again after aquiring lock again. */ private int numBlocksPerIteration; /** * Progress of the Replication queues initialisation. */ private double replicationQueuesInitProgress = 0.0; /** for block replicas placement */ private BlockPlacementPolicies placementPolicies; private final BlockStoragePolicySuite storagePolicySuite; /** Check whether name system is running before terminating */ private boolean checkNSRunning = true; /** Check whether there are any non-EC blocks using StripedID */ private boolean hasNonEcBlockUsingStripedID = false; public BlockManager(final Namesystem namesystem, final Configuration conf) throws IOException { this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); startupDelayBlockDeletionInMs = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L; invalidateBlocks = new InvalidateBlocks( datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs); // Compute the map capacity by allocating 2% of total memory blocksMap = new BlocksMap( LightWeightGSet.computeCapacity(2.0, "BlocksMap")); placementPolicies = new BlockPlacementPolicies( conf, datanodeManager.getFSClusterStats(), datanodeManager.getNetworkTopology(), datanodeManager.getHost2DatanodeMap()); storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite(); pendingReplications = new PendingReplicationBlocks(conf.getInt( DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L); blockTokenSecretManager = createBlockTokenSecretManager(conf); this.maxCorruptFilesReturned = conf.getInt( DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY, DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED); this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY, DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT); final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT); if (minR <= 0) throw new IOException("Unexpected configuration parameters: " + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY + " = " + minR + " <= 0"); if (maxR > Short.MAX_VALUE) throw new IOException("Unexpected configuration parameters: " + DFSConfigKeys.DFS_REPLICATION_MAX_KEY + " = " + maxR + " > " + Short.MAX_VALUE); if (minR > maxR) throw new IOException("Unexpected configuration parameters: " + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY + " = " + minR + " > " + DFSConfigKeys.DFS_REPLICATION_MAX_KEY + " = " + maxR); this.minReplication = (short)minR; this.maxReplication = (short)maxR; this.maxReplicationStreams = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT); this.replicationStreamsHardLimit = conf.getInt( DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT); this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null ? false : true; this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf); this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf); this.replicationRecheckInterval = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L; this.encryptDataTransfer = conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); this.maxNumBlocksToLog = conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY, DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT); this.numBlocksPerIteration = conf.getInt( DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT, DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT); this.blockReportLeaseManager = new BlockReportLeaseManager(conf); LOG.info("defaultReplication = " + defaultReplication); LOG.info("maxReplication = " + maxReplication); LOG.info("minReplication = " + minReplication); LOG.info("maxReplicationStreams = " + maxReplicationStreams); LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks); LOG.info("replicationRecheckInterval = " + replicationRecheckInterval); LOG.info("encryptDataTransfer = " + encryptDataTransfer); LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog); } private static BlockTokenSecretManager createBlockTokenSecretManager( final Configuration conf) throws IOException { final boolean isEnabled = conf.getBoolean( DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT); LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled); if (!isEnabled) { if (UserGroupInformation.isSecurityEnabled()) { String errMessage = "Security is enabled but block access tokens " + "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " + "aren't enabled. This may cause issues " + "when clients attempt to connect to a DataNode. Aborting NameNode"; throw new IOException(errMessage); } return null; } final long updateMin = conf.getLong( DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT); final long lifetimeMin = conf.getLong( DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT); final String encryptionAlgorithm = conf.get( DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY); LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY + "=" + updateMin + " min(s), " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY + "=" + lifetimeMin + " min(s), " + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY + "=" + encryptionAlgorithm); String nsId = DFSUtil.getNamenodeNameServiceId(conf); boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId); if (isHaEnabled) { // figure out which index we are of the nns Collection<String> nnIds = DFSUtilClient.getNameNodeIds(conf, nsId); String nnId = HAUtil.getNameNodeId(conf, nsId); int nnIndex = 0; for (String id : nnIds) { if (id.equals(nnId)) { break; } nnIndex++; } return new BlockTokenSecretManager(updateMin * 60 * 1000L, lifetimeMin * 60 * 1000L, nnIndex, nnIds.size(), null, encryptionAlgorithm); } else { return new BlockTokenSecretManager(updateMin*60*1000L, lifetimeMin*60*1000L, 0, 1, null, encryptionAlgorithm); } } public BlockStoragePolicy getStoragePolicy(final String policyName) { return storagePolicySuite.getPolicy(policyName); } public BlockStoragePolicy getStoragePolicy(final byte policyId) { return storagePolicySuite.getPolicy(policyId); } public BlockStoragePolicy[] getStoragePolicies() { return storagePolicySuite.getAllPolicies(); } public void setBlockPoolId(String blockPoolId) { if (isBlockTokenEnabled()) { blockTokenSecretManager.setBlockPoolId(blockPoolId); } } public BlockStoragePolicySuite getStoragePolicySuite() { return storagePolicySuite; } /** get the BlockTokenSecretManager */ @VisibleForTesting public BlockTokenSecretManager getBlockTokenSecretManager() { return blockTokenSecretManager; } /** Allow silent termination of replication monitor for testing */ @VisibleForTesting void enableRMTerminationForTesting() { checkNSRunning = false; } private boolean isBlockTokenEnabled() { return blockTokenSecretManager != null; } /** Should the access keys be updated? */ boolean shouldUpdateBlockKey(final long updateTime) throws IOException { return isBlockTokenEnabled()? blockTokenSecretManager.updateKeys(updateTime) : false; } public void activate(Configuration conf) { pendingReplications.start(); datanodeManager.activate(conf); this.replicationThread.start(); mxBeanName = MBeans.register("NameNode", "BlockStats", this); } public void close() { try { replicationThread.interrupt(); replicationThread.join(3000); } catch (InterruptedException ie) { } datanodeManager.close(); pendingReplications.stop(); blocksMap.close(); } /** @return the datanodeManager */ public DatanodeManager getDatanodeManager() { return datanodeManager; } @VisibleForTesting public BlockPlacementPolicy getBlockPlacementPolicy() { return placementPolicies.getPolicy(false); } /** Dump meta data to out. */ public void metaSave(PrintWriter out) { assert namesystem.hasWriteLock(); final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); datanodeManager.fetchDatanodes(live, dead, false); out.println("Live Datanodes: " + live.size()); out.println("Dead Datanodes: " + dead.size()); // // Dump contents of neededReplication // synchronized (neededReplications) { out.println("Metasave: Blocks waiting for replication: " + neededReplications.size()); for (Block block : neededReplications) { dumpBlockMeta(block, out); } } // Dump any postponed over-replicated blocks out.println("Mis-replicated blocks that have been postponed:"); for (Block block : postponedMisreplicatedBlocks) { dumpBlockMeta(block, out); } // Dump blocks from pendingReplication pendingReplications.metaSave(out); // Dump blocks that are waiting to be deleted invalidateBlocks.dump(out); // Dump all datanodes getDatanodeManager().datanodeDump(out); } /** * Dump the metadata for the given block in a human-readable * form. */ private void dumpBlockMeta(Block block, PrintWriter out) { List<DatanodeDescriptor> containingNodes = new ArrayList<DatanodeDescriptor>(); List<DatanodeStorageInfo> containingLiveReplicasNodes = new ArrayList<DatanodeStorageInfo>(); NumberReplicas numReplicas = new NumberReplicas(); // source node returned is not used chooseSourceDatanodes(getStoredBlock(block), containingNodes, containingLiveReplicasNodes, numReplicas, new LinkedList<Short>(), UnderReplicatedBlocks.LEVEL); // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which are // not included in the numReplicas.liveReplicas() count assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas(); int usableReplicas = numReplicas.liveReplicas() + numReplicas.decommissionedAndDecommissioning(); if (block instanceof BlockInfo) { BlockCollection bc = getBlockCollection((BlockInfo)block); String fileName = (bc == null) ? "[orphaned]" : bc.getName(); out.print(fileName + ": "); } // l: == live:, d: == decommissioned c: == corrupt e: == excess out.print(block + ((usableReplicas > 0)? "" : " MISSING") + " (replicas:" + " l: " + numReplicas.liveReplicas() + " d: " + numReplicas.decommissionedAndDecommissioning() + " c: " + numReplicas.corruptReplicas() + " e: " + numReplicas.excessReplicas() + ") "); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(block); for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); String state = ""; if (corruptNodes != null && corruptNodes.contains(node)) { state = "(corrupt)"; } else if (node.isDecommissioned() || node.isDecommissionInProgress()) { state = "(decommissioned)"; } if (storage.areBlockContentsStale()) { state += " (block deletions maybe out of date)"; } out.print(" " + node + state + " : "); } out.println(""); } /** @return maxReplicationStreams */ public int getMaxReplicationStreams() { return maxReplicationStreams; } public int getDefaultStorageNum(BlockInfo block) { if (block.isStriped()) { return ((BlockInfoStriped) block).getRealTotalBlockNum(); } else { return defaultReplication; } } public short getMinStorageNum(BlockInfo block) { if (block.isStriped()) { return ((BlockInfoStriped) block).getRealDataBlockNum(); } else { return minReplication; } } public boolean hasMinStorage(BlockInfo block) { return countNodes(block).liveReplicas() >= getMinStorageNum(block); } public boolean hasMinStorage(BlockInfo block, int liveNum) { return liveNum >= getMinStorageNum(block); } /** * Commit a block of a file * * @param block block to be committed * @param commitBlock - contains client reported block length and generation * @return true if the block is changed to committed state. * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ private static boolean commitBlock(final BlockInfo block, final Block commitBlock) throws IOException { if (block.getBlockUCState() == BlockUCState.COMMITTED) return false; assert block.getNumBytes() <= commitBlock.getNumBytes() : "commitBlock length is less than the stored one " + commitBlock.getNumBytes() + " vs. " + block.getNumBytes(); block.commitBlock(commitBlock); return true; } /** * Commit the last block of the file and mark it as complete if it has * meets the minimum replication requirement * * @param bc block collection * @param commitBlock - contains client reported block length and generation * @return true if the last block is changed to committed state. * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ public boolean commitOrCompleteLastBlock(BlockCollection bc, Block commitBlock) throws IOException { if(commitBlock == null) return false; // not committing, this is a block allocation retry BlockInfo lastBlock = bc.getLastBlock(); if(lastBlock == null) return false; // no blocks in file yet if(lastBlock.isComplete()) return false; // already completed (e.g. by syncBlock) final boolean b = commitBlock(lastBlock, commitBlock); if (hasMinStorage(lastBlock)) { completeBlock(lastBlock, false); } return b; } /** * Convert a specified block of the file to a complete block. * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ private void completeBlock(BlockInfo curBlock, boolean force) throws IOException { if (curBlock.isComplete()) { return; } int numNodes = curBlock.numNodes(); if (!force && !hasMinStorage(curBlock, numNodes)) { throw new IOException("Cannot complete block: " + "block does not satisfy minimal replication requirement."); } if (!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED) { throw new IOException( "Cannot complete block: block has not been COMMITTED by the client"); } curBlock.convertToCompleteBlock(); // Since safe-mode only counts complete blocks, and we now have // one more complete block, we need to adjust the total up, and // also count it as safe, if we have at least the minimum replica // count. (We may not have the minimum replica count yet if this is // a "forced" completion when a file is getting closed by an // OP_CLOSE edit on the standby). namesystem.adjustSafeModeBlockTotals(0, 1); final int minStorage = curBlock.isStriped() ? ((BlockInfoStriped) curBlock).getRealDataBlockNum() : minReplication; namesystem.incrementSafeBlockCount( Math.min(numNodes, minStorage), curBlock); } /** * Force the given block in the given file to be marked as complete, * regardless of whether enough replicas are present. This is necessary * when tailing edit logs as a Standby. */ public void forceCompleteBlock(final BlockInfo block) throws IOException { block.commitBlock(block); completeBlock(block, true); } /** * Convert the last block of the file to an under construction block.<p> * The block is converted only if the file has blocks and the last one * is a partial block (its size is less than the preferred block size). * The converted block is returned to the client. * The client uses the returned block locations to form the data pipeline * for this block.<br> * The methods returns null if there is no partial block at the end. * The client is supposed to allocate a new block with the next call. * * @param bc file * @param bytesToRemove num of bytes to remove from block * @return the last block locations if the block is partial or null otherwise */ public LocatedBlock convertLastBlockToUnderConstruction( BlockCollection bc, long bytesToRemove) throws IOException { BlockInfo lastBlock = bc.getLastBlock(); if (lastBlock == null || bc.getPreferredBlockSize() == lastBlock.getNumBytes() - bytesToRemove) { return null; } assert lastBlock == getStoredBlock(lastBlock) : "last block of the file is not in blocksMap"; DatanodeStorageInfo[] targets = getStorages(lastBlock); // convert the last block to under construction. note no block replacement // is happening bc.convertLastBlockToUC(lastBlock, targets); // Remove block from replication queue. NumberReplicas replicas = countNodes(lastBlock); neededReplications.remove(lastBlock, replicas.liveReplicas(), replicas.decommissionedAndDecommissioning(), getReplication(lastBlock)); pendingReplications.remove(lastBlock); // remove this block from the list of pending blocks to be deleted. for (DatanodeStorageInfo storage : targets) { final Block b = getBlockOnStorage(lastBlock, storage); if (b != null) { invalidateBlocks.remove(storage.getDatanodeDescriptor(), b); } } // Adjust safe-mode totals, since under-construction blocks don't // count in safe-mode. namesystem.adjustSafeModeBlockTotals( // decrement safe if we had enough hasMinStorage(lastBlock, targets.length) ? -1 : 0, // always decrement total blocks -1); final long fileLength = bc.computeContentSummary( getStoragePolicySuite()).getLength(); final long pos = fileLength - lastBlock.getNumBytes(); return createLocatedBlock(lastBlock, pos, BlockTokenIdentifier.AccessMode.WRITE); } /** * Get all valid locations of the block */ private List<DatanodeStorageInfo> getValidLocations(BlockInfo block) { final List<DatanodeStorageInfo> locations = new ArrayList<DatanodeStorageInfo>(blocksMap.numNodes(block)); for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) { // filter invalidate replicas Block b = getBlockOnStorage(block, storage); if(b != null && !invalidateBlocks.contains(storage.getDatanodeDescriptor(), b)) { locations.add(storage); } } return locations; } private List<LocatedBlock> createLocatedBlockList(final BlockInfo[] blocks, final long offset, final long length, final int nrBlocksToReturn, final AccessMode mode) throws IOException { int curBlk; long curPos = 0, blkSize = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { blkSize = blocks[curBlk].getNumBytes(); assert blkSize > 0 : "Block of size 0"; if (curPos + blkSize > offset) { break; } curPos += blkSize; } if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file return Collections.emptyList(); long endOff = offset + length; List<LocatedBlock> results = new ArrayList<>(blocks.length); do { results.add(createLocatedBlock(blocks[curBlk], curPos, mode)); curPos += blocks[curBlk].getNumBytes(); curBlk++; } while (curPos < endOff && curBlk < blocks.length && results.size() < nrBlocksToReturn); return results; } private LocatedBlock createLocatedBlock(final BlockInfo[] blocks, final long endPos, final AccessMode mode) throws IOException { int curBlk; long curPos = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { long blkSize = blocks[curBlk].getNumBytes(); if (curPos + blkSize >= endPos) { break; } curPos += blkSize; } return createLocatedBlock(blocks[curBlk], curPos, mode); } private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos, final AccessMode mode) throws IOException { final LocatedBlock lb = createLocatedBlock(blk, pos); if (mode != null) { setBlockToken(lb, mode); } return lb; } /** @return a LocatedBlock for the given block */ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos) throws IOException { if (!blk.isComplete()) { final BlockUnderConstructionFeature uc = blk.getUnderConstructionFeature(); if (blk.isStriped()) { final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); return newLocatedStripedBlock(eb, storages, uc.getBlockIndices(), pos, false); } else { final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); return newLocatedBlock(eb, storages, pos, false); } } // get block locations final int numCorruptNodes = countNodes(blk).corruptReplicas(); final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk); if (numCorruptNodes != numCorruptReplicas) { LOG.warn("Inconsistent number of corrupt replicas for " + blk + " blockMap has " + numCorruptNodes + " but corrupt replicas map has " + numCorruptReplicas); } final int numNodes = blocksMap.numNodes(blk); final boolean isCorrupt = numCorruptNodes != 0 && numCorruptNodes == numNodes; final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes; final DatanodeStorageInfo[] machines = new DatanodeStorageInfo[numMachines]; final int[] blockIndices = blk.isStriped() ? new int[numMachines] : null; int j = 0, i = 0; if (numMachines > 0) { for(DatanodeStorageInfo storage : blocksMap.getStorages(blk)) { final DatanodeDescriptor d = storage.getDatanodeDescriptor(); final boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(blk, d); if (isCorrupt || (!replicaCorrupt)) { machines[j++] = storage; // TODO this can be more efficient if (blockIndices != null) { int index = ((BlockInfoStriped) blk).getStorageBlockIndex(storage); assert index >= 0; blockIndices[i++] = index; } } } } assert j == machines.length : "isCorrupt: " + isCorrupt + " numMachines: " + numMachines + " numNodes: " + numNodes + " numCorrupt: " + numCorruptNodes + " numCorruptRepls: " + numCorruptReplicas; final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); return blockIndices == null ? newLocatedBlock(eb, machines, pos, isCorrupt) : newLocatedStripedBlock(eb, machines, blockIndices, pos, isCorrupt); } /** Create a LocatedBlocks. */ public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks, final long fileSizeExcludeBlocksUnderConstruction, final boolean isFileUnderConstruction, final long offset, final long length, final boolean needBlockToken, final boolean inSnapshot, FileEncryptionInfo feInfo, ErasureCodingPolicy ecPolicy) throws IOException { assert namesystem.hasReadLock(); if (blocks == null) { return null; } else if (blocks.length == 0) { return new LocatedBlocks(0, isFileUnderConstruction, Collections.<LocatedBlock> emptyList(), null, false, feInfo, ecPolicy); } else { if (LOG.isDebugEnabled()) { LOG.debug("blocks = " + java.util.Arrays.asList(blocks)); } final AccessMode mode = needBlockToken? BlockTokenIdentifier.AccessMode.READ: null; final List<LocatedBlock> locatedblocks = createLocatedBlockList( blocks, offset, length, Integer.MAX_VALUE, mode); final LocatedBlock lastlb; final boolean isComplete; if (!inSnapshot) { final BlockInfo last = blocks[blocks.length - 1]; final long lastPos = last.isComplete()? fileSizeExcludeBlocksUnderConstruction - last.getNumBytes() : fileSizeExcludeBlocksUnderConstruction; lastlb = createLocatedBlock(last, lastPos, mode); isComplete = last.isComplete(); } else { lastlb = createLocatedBlock(blocks, fileSizeExcludeBlocksUnderConstruction, mode); isComplete = true; } return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo, ecPolicy); } } /** @return current access keys. */ public ExportedBlockKeys getBlockKeys() { return isBlockTokenEnabled()? blockTokenSecretManager.exportKeys() : ExportedBlockKeys.DUMMY_KEYS; } /** Generate a block token for the located block. */ public void setBlockToken(final LocatedBlock b, final AccessMode mode) throws IOException { if (isBlockTokenEnabled()) { // Use cached UGI if serving RPC calls. if (b.isStriped()) { Preconditions.checkState(b instanceof LocatedStripedBlock); LocatedStripedBlock sb = (LocatedStripedBlock) b; int[] indices = sb.getBlockIndices(); Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length]; ExtendedBlock internalBlock = new ExtendedBlock(b.getBlock()); for (int i = 0; i < indices.length; i++) { internalBlock.setBlockId(b.getBlock().getBlockId() + indices[i]); blockTokens[i] = blockTokenSecretManager.generateToken( NameNode.getRemoteUser().getShortUserName(), internalBlock, EnumSet.of(mode)); } sb.setBlockTokens(blockTokens); } else { b.setBlockToken(blockTokenSecretManager.generateToken( NameNode.getRemoteUser().getShortUserName(), b.getBlock(), EnumSet.of(mode))); } } } void addKeyUpdateCommand(final List<DatanodeCommand> cmds, final DatanodeDescriptor nodeinfo) { // check access key update if (isBlockTokenEnabled() && nodeinfo.needKeyUpdate) { cmds.add(new KeyUpdateCommand(blockTokenSecretManager.exportKeys())); nodeinfo.needKeyUpdate = false; } } public DataEncryptionKey generateDataEncryptionKey() { if (isBlockTokenEnabled() && encryptDataTransfer) { return blockTokenSecretManager.generateDataEncryptionKey(); } else { return null; } } /** * Clamp the specified replication between the minimum and the maximum * replication levels. */ public short adjustReplication(short replication) { return replication < minReplication? minReplication : replication > maxReplication? maxReplication: replication; } /** * Check whether the replication parameter is within the range * determined by system configuration and throw an exception if it's not. * * @param src the path to the target file * @param replication the requested replication factor * @param clientName the name of the client node making the request * @throws java.io.IOException thrown if the requested replication factor * is out of bounds */ public void verifyReplication(String src, short replication, String clientName) throws IOException { if (replication < minReplication || replication > maxReplication) { StringBuilder msg = new StringBuilder("Requested replication factor of "); msg.append(replication); if (replication > maxReplication) { msg.append(" exceeds maximum of "); msg.append(maxReplication); } else { msg.append(" is less than the required minimum of "); msg.append(minReplication); } msg.append(" for ").append(src); if (clientName != null) { msg.append(" from ").append(clientName); } throw new IOException(msg.toString()); } } /** * Check if a block is replicated to at least the minimum replication. */ public boolean isSufficientlyReplicated(BlockInfo b) { // Compare against the lesser of the minReplication and number of live DNs. final int replication = Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes()); return countNodes(b).liveReplicas() >= replication; } /** * return a list of blocks & their locations on <code>datanode</code> whose * total size is <code>size</code> * * @param datanode on which blocks are located * @param size total size of blocks */ public BlocksWithLocations getBlocks(DatanodeID datanode, long size ) throws IOException { namesystem.checkOperation(OperationCategory.READ); namesystem.readLock(); try { namesystem.checkOperation(OperationCategory.READ); return getBlocksWithLocations(datanode, size); } finally { namesystem.readUnlock(); } } /** Get all blocks with location information from a datanode. */ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" + " unrecorded node {}", datanode); throw new HadoopIllegalArgumentException( "Datanode " + datanode + " not found."); } int numBlocks = node.numBlocks(); if(numBlocks == 0) { return new BlocksWithLocations(new BlockWithLocations[0]); } Iterator<BlockInfo> iter = node.getBlockIterator(); // starting from a random block int startBlock = ThreadLocalRandom.current().nextInt(numBlocks); // skip blocks for(int i=0; i<startBlock; i++) { iter.next(); } List<BlockWithLocations> results = new ArrayList<BlockWithLocations>(); long totalSize = 0; BlockInfo curBlock; while(totalSize<size && iter.hasNext()) { curBlock = iter.next(); if(!curBlock.isComplete()) continue; totalSize += addBlock(curBlock, results); } if(totalSize<size) { iter = node.getBlockIterator(); // start from the beginning for(int i=0; i<startBlock&&totalSize<size; i++) { curBlock = iter.next(); if(!curBlock.isComplete()) continue; totalSize += addBlock(curBlock, results); } } return new BlocksWithLocations( results.toArray(new BlockWithLocations[results.size()])); } /** Remove the blocks associated to the given datanode. */ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { final Iterator<BlockInfo> it = node.getBlockIterator(); while(it.hasNext()) { removeStoredBlock(it.next(), node); } // Remove all pending DN messages referencing this DN. pendingDNMessages.removeAllMessagesForDatanode(node); node.resetBlocks(); invalidateBlocks.remove(node); } /** Remove the blocks associated to the given DatanodeStorageInfo. */ void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) { assert namesystem.hasWriteLock(); final Iterator<BlockInfo> it = storageInfo.getBlockIterator(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); while(it.hasNext()) { BlockInfo block = it.next(); removeStoredBlock(block, node); final Block b = getBlockOnStorage(block, storageInfo); if (b != null) { invalidateBlocks.remove(node, b); } } namesystem.checkSafeMode(); } /** * Adds block to list of blocks which will be invalidated on specified * datanode and log the operation */ void addToInvalidates(final Block block, final DatanodeInfo datanode) { if (!isPopulatingReplQueues()) { return; } invalidateBlocks.add(block, datanode, true); } /** * Adds block to list of blocks which will be invalidated on all its * datanodes. */ private void addToInvalidates(BlockInfo storedBlock) { if (!isPopulatingReplQueues()) { return; } StringBuilder datanodes = new StringBuilder(); for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock, State.NORMAL)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); final Block b = getBlockOnStorage(storedBlock, storage); if (b != null) { invalidateBlocks.add(b, node, false); datanodes.append(node).append(" "); } } if (datanodes.length() != 0) { blockLog.debug("BLOCK* addToInvalidates: {} {}", storedBlock, datanodes.toString()); } } private Block getBlockOnStorage(BlockInfo storedBlock, DatanodeStorageInfo storage) { return storedBlock.isStriped() ? ((BlockInfoStriped) storedBlock).getBlockOnStorage(storage) : storedBlock; } /** * Remove all block invalidation tasks under this datanode UUID; * used when a datanode registers with a new UUID and the old one * is wiped. */ void removeFromInvalidates(final DatanodeInfo datanode) { if (!isPopulatingReplQueues()) { return; } invalidateBlocks.remove(datanode); } /** * Mark the block belonging to datanode as corrupt * @param blk Block to be marked as corrupt * @param dn Datanode which holds the corrupt replica * @param storageID if known, null otherwise. * @param reason a textual reason why the block should be marked corrupt, * for logging purposes */ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, final DatanodeInfo dn, String storageID, String reason) throws IOException { assert namesystem.hasWriteLock(); final Block reportedBlock = blk.getLocalBlock(); final BlockInfo storedBlock = getStoredBlock(reportedBlock); if (storedBlock == null) { // Check if the replica is in the blockMap, if not // ignore the request for now. This could happen when BlockScanner // thread of Datanode reports bad block before Block reports are sent // by the Datanode on startup blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found", blk); return; } DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { throw new IOException("Cannot mark " + blk + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid() + ") does not exist"); } markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock, blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED), storageID == null ? null : node.getStorageInfo(storageID), node); } /** * Mark a replica (of a contiguous block) or an internal block (of a striped * block group) as corrupt. * @param b Indicating the reported bad block and the corresponding BlockInfo * stored in blocksMap. * @param storageInfo storage that contains the block, if known. null otherwise. */ private void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeStorageInfo storageInfo, DatanodeDescriptor node) throws IOException { if (b.getStored().isDeleted()) { blockLog.debug("BLOCK markBlockAsCorrupt: {} cannot be marked as" + " corrupt as it does not belong to any file", b); addToInvalidates(b.getCorrupted(), node); return; } short expectedReplicas = getExpectedReplicaNum(b.getStored()); // Add replica to the data-node if it is not already there if (storageInfo != null) { storageInfo.addBlock(b.getStored(), b.getCorrupted()); } // Add this replica to corruptReplicas Map. For striped blocks, we always // use the id of whole striped block group when adding to corruptReplicas Block corrupted = new Block(b.getCorrupted()); if (b.getStored().isStriped()) { corrupted.setBlockId(b.getStored().getBlockId()); } corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(), b.getReasonCode()); NumberReplicas numberOfReplicas = countNodes(b.getStored()); boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >= expectedReplicas; boolean minReplicationSatisfied = hasMinStorage(b.getStored(), numberOfReplicas.liveReplicas()); boolean hasMoreCorruptReplicas = minReplicationSatisfied && (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) > expectedReplicas; boolean corruptedDuringWrite = minReplicationSatisfied && b.isCorruptedDuringWrite(); // case 1: have enough number of live replicas // case 2: corrupted replicas + live replicas > Replication factor // case 3: Block is marked corrupt due to failure while writing. In this // case genstamp will be different than that of valid block. // In all these cases we can delete the replica. // In case of 3, rbw block will be deleted and valid block can be replicated if (hasEnoughLiveReplicas || hasMoreCorruptReplicas || corruptedDuringWrite) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(b, node, numberOfReplicas); } else if (isPopulatingReplQueues()) { // add the block to neededReplication updateNeededReplications(b.getStored(), -1, 0); } } /** * Invalidates the given block on the given datanode. * @return true if the block was successfully invalidated and no longer * present in the BlocksMap */ private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn, NumberReplicas nr) throws IOException { blockLog.debug("BLOCK* invalidateBlock: {} on {}", b, dn); DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { throw new IOException("Cannot invalidate " + b + " because datanode " + dn + " does not exist."); } // Check how many copies we have of the block if (nr.replicasOnStaleNodes() > 0) { blockLog.debug("BLOCK* invalidateBlocks: postponing " + "invalidation of {} on {} because {} replica(s) are located on " + "nodes with potentially out-of-date block reports", b, dn, nr.replicasOnStaleNodes()); postponeBlock(b.getCorrupted()); return false; } else { // we already checked the number of replicas in the caller of this // function and know there are enough live replicas, so we can delete it. addToInvalidates(b.getCorrupted(), dn); removeStoredBlock(b.getStored(), node); blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.", b, dn); return true; } } public void setPostponeBlocksFromFuture(boolean postpone) { this.shouldPostponeBlocksFromFuture = postpone; } private void postponeBlock(Block blk) { if (postponedMisreplicatedBlocks.add(blk)) { postponedMisreplicatedBlocksCount.incrementAndGet(); } } void updateState() { pendingReplicationBlocksCount = pendingReplications.size(); underReplicatedBlocksCount = neededReplications.size(); corruptReplicaBlocksCount = corruptReplicas.size(); } /** Return number of under-replicated but not missing blocks */ public int getUnderReplicatedNotMissingBlocks() { return neededReplications.getUnderReplicatedBlockCount(); } /** * Schedule blocks for deletion at datanodes * @param nodesToProcess number of datanodes to schedule deletion work * @return total number of block for deletion */ int computeInvalidateWork(int nodesToProcess) { final List<DatanodeInfo> nodes = invalidateBlocks.getDatanodes(); Collections.shuffle(nodes); nodesToProcess = Math.min(nodes.size(), nodesToProcess); int blockCnt = 0; for (DatanodeInfo dnInfo : nodes) { int blocks = invalidateWorkForOneNode(dnInfo); if (blocks > 0) { blockCnt += blocks; if (--nodesToProcess == 0) { break; } } } return blockCnt; } /** * Scan blocks in {@link #neededReplications} and assign recovery * (replication or erasure coding) work to data-nodes they belong to. * * The number of process blocks equals either twice the number of live * data-nodes or the number of under-replicated blocks whichever is less. * * @return number of blocks scheduled for replication during this iteration. */ int computeBlockRecoveryWork(int blocksToProcess) { List<List<BlockInfo>> blocksToReplicate = null; namesystem.writeLock(); try { // Choose the blocks to be replicated blocksToReplicate = neededReplications .chooseUnderReplicatedBlocks(blocksToProcess); } finally { namesystem.writeUnlock(); } return computeRecoveryWorkForBlocks(blocksToReplicate); } /** * Recover a set of blocks to full strength through replication or * erasure coding * * @param blocksToRecover blocks to be recovered, for each priority * @return the number of blocks scheduled for replication */ @VisibleForTesting int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) { int requiredReplication, numEffectiveReplicas; List<DatanodeDescriptor> containingNodes; BlockCollection bc; int additionalReplRequired; int scheduledWork = 0; List<BlockRecoveryWork> recovWork = new LinkedList<>(); // Step 1: categorize at-risk blocks into replication and EC tasks namesystem.writeLock(); try { synchronized (neededReplications) { for (int priority = 0; priority < blocksToRecover.size(); priority++) { for (BlockInfo block : blocksToRecover.get(priority)) { BlockRecoveryWork rw = scheduleRecovery(block, priority); if (rw != null) { recovWork.add(rw); } } } } } finally { namesystem.writeUnlock(); } // Step 2: choose target nodes for each recovery task final Set<Node> excludedNodes = new HashSet<>(); for(BlockRecoveryWork rw : recovWork){ // Exclude all of the containing nodes from being targets. // This list includes decommissioning or corrupt nodes. excludedNodes.clear(); for (DatanodeDescriptor dn : rw.getContainingNodes()) { excludedNodes.add(dn); } // choose replication targets: NOT HOLDING THE GLOBAL LOCK // It is costly to extract the filename for which chooseTargets is called, // so for now we pass in the block collection itself. final BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(rw.getBlock().isStriped()); rw.chooseTargets(placementPolicy, storagePolicySuite, excludedNodes); } // Step 3: add tasks to the DN namesystem.writeLock(); try { for(BlockRecoveryWork rw : recovWork){ final DatanodeStorageInfo[] targets = rw.getTargets(); if(targets == null || targets.length == 0){ rw.resetTargets(); continue; } synchronized (neededReplications) { if (validateRecoveryWork(rw)) { scheduledWork++; } } } } finally { namesystem.writeUnlock(); } if (blockLog.isInfoEnabled()) { // log which blocks have been scheduled for replication for(BlockRecoveryWork rw : recovWork){ DatanodeStorageInfo[] targets = rw.getTargets(); if (targets != null && targets.length != 0) { StringBuilder targetList = new StringBuilder("datanode(s)"); for (DatanodeStorageInfo target : targets) { targetList.append(' '); targetList.append(target.getDatanodeDescriptor()); } blockLog.debug("BLOCK* ask {} to replicate {} to {}", rw.getSrcNodes(), rw.getBlock(), targetList); } } } if (blockLog.isDebugEnabled()) { blockLog.debug("BLOCK* neededReplications = {} pendingReplications = {}", neededReplications.size(), pendingReplications.size()); } return scheduledWork; } boolean hasEnoughEffectiveReplicas(BlockInfo block, NumberReplicas numReplicas, int pendingReplicaNum, int required) { int numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplicaNum; return (numEffectiveReplicas >= required) && (pendingReplicaNum > 0 || blockHasEnoughRacks(block, required)); } private BlockRecoveryWork scheduleRecovery(BlockInfo block, int priority) { // block should belong to a file BlockCollection bc = getBlockCollection(block); // abandoned block or block reopened for append if (bc == null || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) { // remove from neededReplications neededReplications.remove(block, priority); return null; } short requiredReplication = getExpectedReplicaNum(block); // get a source data-node List<DatanodeDescriptor> containingNodes = new ArrayList<>(); List<DatanodeStorageInfo> liveReplicaNodes = new ArrayList<>(); NumberReplicas numReplicas = new NumberReplicas(); List<Short> liveBlockIndices = new ArrayList<>(); final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block, containingNodes, liveReplicaNodes, numReplicas, liveBlockIndices, priority); if(srcNodes == null || srcNodes.length == 0) { // block can not be recovered from any node LOG.debug("Block " + block + " cannot be recovered " + "from any node"); return null; } // liveReplicaNodes can include READ_ONLY_SHARED replicas which are // not included in the numReplicas.liveReplicas() count assert liveReplicaNodes.size() >= numReplicas.liveReplicas(); int pendingNum = pendingReplications.getNumReplicas(block); if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum, requiredReplication)) { neededReplications.remove(block, priority); blockLog.debug("BLOCK* Removing {} from neededReplications as" + " it has enough replicas", block); return null; } final int additionalReplRequired; if (numReplicas.liveReplicas() < requiredReplication) { additionalReplRequired = requiredReplication - numReplicas.liveReplicas() - pendingNum; } else { additionalReplRequired = 1; // Needed on a new rack } if (block.isStriped()) { short[] indices = new short[liveBlockIndices.size()]; for (int i = 0 ; i < liveBlockIndices.size(); i++) { indices[i] = liveBlockIndices.get(i); } return new ErasureCodingWork(block, bc, srcNodes, containingNodes, liveReplicaNodes, additionalReplRequired, priority, indices); } else { return new ReplicationWork(block, bc, srcNodes, containingNodes, liveReplicaNodes, additionalReplRequired, priority); } } private boolean validateRecoveryWork(BlockRecoveryWork rw) { BlockInfo block = rw.getBlock(); int priority = rw.getPriority(); // Recheck since global lock was released // block should belong to a file BlockCollection bc = getBlockCollection(block); // abandoned block or block reopened for append if (bc == null || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) { neededReplications.remove(block, priority); rw.resetTargets(); return false; } // do not schedule more if enough replicas is already pending final short requiredReplication = getExpectedReplicaNum(block); NumberReplicas numReplicas = countNodes(block); final int pendingNum = pendingReplications.getNumReplicas(block); if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum, requiredReplication)) { neededReplications.remove(block, priority); rw.resetTargets(); blockLog.debug("BLOCK* Removing {} from neededReplications as" + " it has enough replicas", block); return false; } DatanodeStorageInfo[] targets = rw.getTargets(); if ( (numReplicas.liveReplicas() >= requiredReplication) && (!blockHasEnoughRacks(block, requiredReplication)) ) { if (rw.getSrcNodes()[0].getNetworkLocation().equals( targets[0].getDatanodeDescriptor().getNetworkLocation())) { //No use continuing, unless a new rack in this case return false; } } // Add block to the to be recovered list if (block.isStriped()) { assert rw instanceof ErasureCodingWork; assert rw.getTargets().length > 0; String src = getBlockCollection(block).getName(); ErasureCodingPolicy ecPolicy = null; try { ecPolicy = namesystem.getErasureCodingPolicyForPath(src); } catch (IOException e) { blockLog .warn("Failed to get EC policy for the file {} ", src); } if (ecPolicy == null) { blockLog.warn("No erasure coding policy found for the file {}. " + "So cannot proceed for recovery", src); // TODO: we may have to revisit later for what we can do better to // handle this case. return false; } rw.getTargets()[0].getDatanodeDescriptor().addBlockToBeErasureCoded( new ExtendedBlock(namesystem.getBlockPoolId(), block), rw.getSrcNodes(), rw.getTargets(), ((ErasureCodingWork) rw).getLiveBlockIndicies(), ecPolicy); } else { rw.getSrcNodes()[0].addBlockToBeReplicated(block, targets); } DatanodeStorageInfo.incrementBlocksScheduled(targets); // Move the block-replication into a "pending" state. // The reason we use 'pending' is so we can retry // replications that fail after an appropriate amount of time. pendingReplications.increment(block, DatanodeStorageInfo.toDatanodeDescriptors(targets)); blockLog.debug("BLOCK* block {} is moved from neededReplications to " + "pendingReplications", block); int numEffectiveReplicas = numReplicas.liveReplicas() + pendingNum; // remove from neededReplications if(numEffectiveReplicas + targets.length >= requiredReplication) { neededReplications.remove(block, priority); } return true; } /** Choose target for WebHDFS redirection. */ public DatanodeStorageInfo[] chooseTarget4WebHDFS(String src, DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) { return placementPolicies.getPolicy(false).chooseTarget(src, 1, clientnode, Collections.<DatanodeStorageInfo>emptyList(), false, excludes, blocksize, storagePolicySuite.getDefaultPolicy()); } /** Choose target for getting additional datanodes for an existing pipeline. */ public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(String src, int numAdditionalNodes, Node clientnode, List<DatanodeStorageInfo> chosen, Set<Node> excludes, long blocksize, byte storagePolicyID, boolean isStriped) { final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID); final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(isStriped); return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode, chosen, true, excludes, blocksize, storagePolicy); } /** * Choose target datanodes for creating a new block. * * @throws IOException * if the number of targets < minimum replication. * @see BlockPlacementPolicy#chooseTarget(String, int, Node, * Set, long, List, BlockStoragePolicy) */ public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src, final int numOfReplicas, final Node client, final Set<Node> excludedNodes, final long blocksize, final List<String> favoredNodes, final byte storagePolicyID, final boolean isStriped) throws IOException { List<DatanodeDescriptor> favoredDatanodeDescriptors = getDatanodeDescriptors(favoredNodes); final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID); final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(isStriped); final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src, numOfReplicas, client, excludedNodes, blocksize, favoredDatanodeDescriptors, storagePolicy); if (targets.length < minReplication) { throw new IOException("File " + src + " could only be replicated to " + targets.length + " nodes instead of minReplication (=" + minReplication + "). There are " + getDatanodeManager().getNetworkTopology().getNumOfLeaves() + " datanode(s) running and " + (excludedNodes == null? "no": excludedNodes.size()) + " node(s) are excluded in this operation."); } return targets; } /** * Get list of datanode descriptors for given list of nodes. Nodes are * hostaddress:port or just hostaddress. */ List<DatanodeDescriptor> getDatanodeDescriptors(List<String> nodes) { List<DatanodeDescriptor> datanodeDescriptors = null; if (nodes != null) { datanodeDescriptors = new ArrayList<DatanodeDescriptor>(nodes.size()); for (int i = 0; i < nodes.size(); i++) { DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodes.get(i)); if (node != null) { datanodeDescriptors.add(node); } } } return datanodeDescriptors; } /** * Parse the data-nodes the block belongs to and choose a certain number * from them to be the recovery sources. * * We prefer nodes that are in DECOMMISSION_INPROGRESS state to other nodes * since the former do not have write traffic and hence are less busy. * We do not use already decommissioned nodes as a source. * Otherwise we randomly choose nodes among those that did not reach their * replication limits. However, if the recovery work is of the highest * priority and all nodes have reached their replication limits, we will * randomly choose the desired number of nodes despite the replication limit. * * In addition form a list of all nodes containing the block * and calculate its replication numbers. * * @param block Block for which a replication source is needed * @param containingNodes List to be populated with nodes found to contain * the given block * @param nodesContainingLiveReplicas List to be populated with nodes found * to contain live replicas of the given * block * @param numReplicas NumberReplicas instance to be initialized with the * counts of live, corrupt, excess, and decommissioned * replicas of the given block. * @param liveBlockIndices List to be populated with indices of healthy * blocks in a striped block group * @param priority integer representing replication priority of the given * block * @return the array of DatanodeDescriptor of the chosen nodes from which to * recover the given block */ @VisibleForTesting DatanodeDescriptor[] chooseSourceDatanodes(BlockInfo block, List<DatanodeDescriptor> containingNodes, List<DatanodeStorageInfo> nodesContainingLiveReplicas, NumberReplicas numReplicas, List<Short> liveBlockIndices, int priority) { containingNodes.clear(); nodesContainingLiveReplicas.clear(); List<DatanodeDescriptor> srcNodes = new ArrayList<>(); int live = 0; int decommissioned = 0; int decommissioning = 0; int corrupt = 0; int excess = 0; liveBlockIndices.clear(); final boolean isStriped = block.isStriped(); Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block); for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get(node.getDatanodeUuid()); int countableReplica = storage.getState() == State.NORMAL ? 1 : 0; if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) corrupt += countableReplica; else if (node.isDecommissionInProgress()) { decommissioning += countableReplica; } else if (node.isDecommissioned()) { decommissioned += countableReplica; } else if (excessBlocks != null && excessBlocks.contains(block)) { excess += countableReplica; } else { nodesContainingLiveReplicas.add(storage); live += countableReplica; } containingNodes.add(node); // Check if this replica is corrupt // If so, do not select the node as src node if ((nodesCorrupt != null) && nodesCorrupt.contains(node)) continue; if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY && !node.isDecommissionInProgress() && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) { continue; // already reached replication limit } if (node.getNumberOfBlocksToBeReplicated() >= replicationStreamsHardLimit) { continue; } // the block must not be scheduled for removal on srcNode if(excessBlocks != null && excessBlocks.contains(block)) continue; // never use already decommissioned nodes if(node.isDecommissioned()) continue; if(isStriped || srcNodes.isEmpty()) { srcNodes.add(node); if (isStriped) { liveBlockIndices.add((short) ((BlockInfoStriped) block). getStorageBlockIndex(storage)); } continue; } // for replicated block, switch to a different node randomly // this to prevent from deterministically selecting the same node even // if the node failed to replicate the block on previous iterations if (!isStriped && ThreadLocalRandom.current().nextBoolean()) { srcNodes.set(0, node); } } if(numReplicas != null) numReplicas.initialize(live, decommissioned, decommissioning, corrupt, excess, 0); return srcNodes.toArray(new DatanodeDescriptor[srcNodes.size()]); } /** * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ private void processPendingReplications() { BlockInfo[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { namesystem.writeLock(); try { for (int i = 0; i < timedOutItems.length; i++) { /* * Use the blockinfo from the blocksmap to be certain we're working * with the most up-to-date block information (e.g. genstamp). */ BlockInfo bi = blocksMap.getStoredBlock(timedOutItems[i]); if (bi == null) { continue; } NumberReplicas num = countNodes(timedOutItems[i]); if (isNeededReplication(bi, num.liveReplicas())) { neededReplications.add(bi, num.liveReplicas(), num.decommissionedAndDecommissioning(), getReplication(bi)); } } } finally { namesystem.writeUnlock(); } /* If we know the target datanodes where the replication timedout, * we could invoke decBlocksScheduled() on it. Its ok for now. */ } } public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) { assert namesystem.hasReadLock(); DatanodeDescriptor node = null; try { node = datanodeManager.getDatanode(nodeReg); } catch (UnregisteredNodeException e) { LOG.warn("Unregistered datanode {}", nodeReg); return 0; } if (node == null) { LOG.warn("Failed to find datanode {}", nodeReg); return 0; } // Request a new block report lease. The BlockReportLeaseManager has // its own internal locking. long leaseId = blockReportLeaseManager.requestLease(node); BlockManagerFaultInjector.getInstance(). requestBlockReportLease(node, leaseId); return leaseId; } /** * StatefulBlockInfo is used to build the "toUC" list, which is a list of * updates to the information about under-construction blocks. * Besides the block in question, it provides the ReplicaState * reported by the datanode in the block report. */ static class StatefulBlockInfo { final BlockInfo storedBlock; // should be UC block final Block reportedBlock; final ReplicaState reportedState; StatefulBlockInfo(BlockInfo storedBlock, Block reportedBlock, ReplicaState reportedState) { Preconditions.checkArgument(!storedBlock.isComplete()); this.storedBlock = storedBlock; this.reportedBlock = reportedBlock; this.reportedState = reportedState; } } private static class BlockInfoToAdd { final BlockInfo stored; final Block reported; BlockInfoToAdd(BlockInfo stored, Block reported) { this.stored = stored; this.reported = reported; } } /** * The given storage is reporting all its blocks. * Update the (storage-->block list) and (block-->storage list) maps. * * @return true if all known storages of the given DN have finished reporting. * @throws IOException */ public boolean processReport(final DatanodeID nodeID, final DatanodeStorage storage, final BlockListAsLongs newReport, BlockReportContext context, boolean lastStorageInRpc) throws IOException { namesystem.writeLock(); final long startTime = Time.monotonicNow(); //after acquiring write lock final long endTime; DatanodeDescriptor node; Collection<Block> invalidatedBlocks = null; try { node = datanodeManager.getDatanode(nodeID); if (node == null || !node.isAlive) { throw new IOException( "ProcessReport from dead or unregistered node: " + nodeID); } // To minimize startup time, we discard any second (or later) block reports // that we receive while still in startup phase. DatanodeStorageInfo storageInfo = node.getStorageInfo(storage.getStorageID()); if (storageInfo == null) { // We handle this for backwards compatibility. storageInfo = node.updateStorage(storage); } if (namesystem.isInStartupSafeMode() && storageInfo.getBlockReportCount() > 0) { blockLog.info("BLOCK* processReport: " + "discarded non-initial block report from {}" + " because namenode still in startup phase", nodeID); blockReportLeaseManager.removeLease(node); return !node.hasStaleStorages(); } if (context != null) { if (!blockReportLeaseManager.checkLease(node, startTime, context.getLeaseId())) { return false; } } if (storageInfo.getBlockReportCount() == 0) { // The first block report can be processed a lot more efficiently than // ordinary block reports. This shortens restart times. LOG.info("Processing first storage report for " + storageInfo.getStorageID() + " from datanode " + nodeID.getDatanodeUuid()); processFirstBlockReport(storageInfo, newReport); } else { invalidatedBlocks = processReport(storageInfo, newReport); } storageInfo.receivedBlockReport(); if (context != null) { storageInfo.setLastBlockReportId(context.getReportId()); if (lastStorageInRpc) { int rpcsSeen = node.updateBlockReportContext(context); if (rpcsSeen >= context.getTotalRpcs()) { long leaseId = blockReportLeaseManager.removeLease(node); BlockManagerFaultInjector.getInstance(). removeBlockReportLease(node, leaseId); List<DatanodeStorageInfo> zombies = node.removeZombieStorages(); if (zombies.isEmpty()) { LOG.debug("processReport 0x{}: no zombie storages found.", Long.toHexString(context.getReportId())); } else { for (DatanodeStorageInfo zombie : zombies) { removeZombieReplicas(context, zombie); } } node.clearBlockReportContext(); } else { LOG.debug("processReport 0x{}: {} more RPCs remaining in this " + "report.", Long.toHexString(context.getReportId()), (context.getTotalRpcs() - rpcsSeen) ); } } } } finally { endTime = Time.monotonicNow(); namesystem.writeUnlock(); } if (invalidatedBlocks != null) { for (Block b : invalidatedBlocks) { blockLog.info("BLOCK* processReport: {} on node {} size {} does not " + "belong to any file", b, node, b.getNumBytes()); } } // Log the block report processing stats from Namenode perspective final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); if (metrics != null) { metrics.addBlockReport((int) (endTime - startTime)); } blockLog.info("BLOCK* processReport: from storage {} node {}, " + "blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage .getStorageID(), nodeID, newReport.getNumberOfBlocks(), node.hasStaleStorages(), (endTime - startTime)); return !node.hasStaleStorages(); } private void removeZombieReplicas(BlockReportContext context, DatanodeStorageInfo zombie) { LOG.warn("processReport 0x{}: removing zombie storage {}, which no " + "longer exists on the DataNode.", Long.toHexString(context.getReportId()), zombie.getStorageID()); assert(namesystem.hasWriteLock()); Iterator<BlockInfo> iter = zombie.getBlockIterator(); int prevBlocks = zombie.numBlocks(); while (iter.hasNext()) { BlockInfo block = iter.next(); // We assume that a block can be on only one storage in a DataNode. // That's why we pass in the DatanodeDescriptor rather than the // DatanodeStorageInfo. // TODO: remove this assumption in case we want to put a block on // more than one storage on a datanode (and because it's a difficult // assumption to really enforce) removeStoredBlock(block, zombie.getDatanodeDescriptor()); Block b = getBlockOnStorage(block, zombie); if (b != null) { invalidateBlocks.remove(zombie.getDatanodeDescriptor(), b); } } assert(zombie.numBlocks() == 0); LOG.warn("processReport 0x{}: removed {} replicas from storage {}, " + "which no longer exists on the DataNode.", Long.toHexString(context.getReportId()), prevBlocks, zombie.getStorageID()); } /** * Rescan the list of blocks which were previously postponed. */ void rescanPostponedMisreplicatedBlocks() { if (getPostponedMisreplicatedBlocksCount() == 0) { return; } long startTimeRescanPostponedMisReplicatedBlocks = Time.monotonicNow(); long startPostponedMisReplicatedBlocksCount = getPostponedMisreplicatedBlocksCount(); namesystem.writeLock(); try { // blocksPerRescan is the configured number of blocks per rescan. // Randomly select blocksPerRescan consecutive blocks from the HashSet // when the number of blocks remaining is larger than blocksPerRescan. // The reason we don't always pick the first blocksPerRescan blocks is to // handle the case if for some reason some datanodes remain in // content stale state for a long time and only impact the first // blocksPerRescan blocks. int i = 0; long startIndex = 0; long blocksPerRescan = datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan(); long base = getPostponedMisreplicatedBlocksCount() - blocksPerRescan; if (base > 0) { startIndex = ThreadLocalRandom.current().nextLong() % (base+1); if (startIndex < 0) { startIndex += (base+1); } } Iterator<Block> it = postponedMisreplicatedBlocks.iterator(); for (int tmp = 0; tmp < startIndex; tmp++) { it.next(); } for (;it.hasNext(); i++) { Block b = it.next(); if (i >= blocksPerRescan) { break; } BlockInfo bi = getStoredBlock(b); if (bi == null) { if (LOG.isDebugEnabled()) { LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " + "Postponed mis-replicated block " + b + " no longer found " + "in block map."); } it.remove(); postponedMisreplicatedBlocksCount.decrementAndGet(); continue; } MisReplicationResult res = processMisReplicatedBlock(bi); if (LOG.isDebugEnabled()) { LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " + "Re-scanned block " + b + ", result is " + res); } if (res != MisReplicationResult.POSTPONE) { it.remove(); postponedMisreplicatedBlocksCount.decrementAndGet(); } } } finally { namesystem.writeUnlock(); long endPostponedMisReplicatedBlocksCount = getPostponedMisreplicatedBlocksCount(); LOG.info("Rescan of postponedMisreplicatedBlocks completed in " + (Time.monotonicNow() - startTimeRescanPostponedMisReplicatedBlocks) + " msecs. " + endPostponedMisReplicatedBlocksCount + " blocks are left. " + (startPostponedMisReplicatedBlocksCount - endPostponedMisReplicatedBlocksCount) + " blocks are removed."); } } private Collection<Block> processReport( final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { // Normal case: // Modify the (block-->datanode) map, according to the difference // between the old and new block report. // Collection<BlockInfoToAdd> toAdd = new LinkedList<>(); Collection<BlockInfo> toRemove = new TreeSet<>(); Collection<Block> toInvalidate = new LinkedList<>(); Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<>(); Collection<StatefulBlockInfo> toUC = new LinkedList<>(); reportDiff(storageInfo, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Process the blocks on each queue for (StatefulBlockInfo b : toUC) { addStoredBlockUnderConstruction(b, storageInfo); } for (BlockInfo b : toRemove) { removeStoredBlock(b, node); } int numBlocksLogged = 0; for (BlockInfoToAdd b : toAdd) { addStoredBlock(b.stored, b.reported, storageInfo, null, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { blockLog.info("BLOCK* processReport: logged info for {} of {} " + "reported.", maxNumBlocksToLog, numBlocksLogged); } for (Block b : toInvalidate) { addToInvalidates(b, node); } for (BlockToMarkCorrupt b : toCorrupt) { markBlockAsCorrupt(b, storageInfo, node); } return toInvalidate; } /** * Mark block replicas as corrupt except those on the storages in * newStorages list. */ public void markBlockReplicasAsCorrupt(Block oldBlock, BlockInfo block, long oldGenerationStamp, long oldNumBytes, DatanodeStorageInfo[] newStorages) throws IOException { assert namesystem.hasWriteLock(); BlockToMarkCorrupt b = null; if (block.getGenerationStamp() != oldGenerationStamp) { b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp, "genstamp does not match " + oldGenerationStamp + " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else if (block.getNumBytes() != oldNumBytes) { b = new BlockToMarkCorrupt(oldBlock, block, "length does not match " + oldNumBytes + " : " + block.getNumBytes(), Reason.SIZE_MISMATCH); } else { return; } for (DatanodeStorageInfo storage : getStorages(block)) { boolean isCorrupt = true; if (newStorages != null) { for (DatanodeStorageInfo newStorage : newStorages) { if (newStorage!= null && storage.equals(newStorage)) { isCorrupt = false; break; } } } if (isCorrupt) { blockLog.debug("BLOCK* markBlockReplicasAsCorrupt: mark block replica" + " {} on {} as corrupt because the dn is not in the new committed " + "storage list.", b, storage.getDatanodeDescriptor()); markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor()); } } } /** * processFirstBlockReport is intended only for processing "initial" block * reports, the first block report received from a DN after it registers. * It just adds all the valid replicas to the datanode, without calculating * a toRemove list (since there won't be any). It also silently discards * any invalid blocks, thereby deferring their processing until * the next block report. * @param storageInfo - DatanodeStorageInfo that sent the report * @param report - the initial block report, to be processed * @throws IOException */ private void processFirstBlockReport( final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { if (report == null) return; assert (namesystem.hasWriteLock()); assert (storageInfo.getBlockReportCount() == 0); for (BlockReportReplica iblk : report) { ReplicaState reportedState = iblk.getState(); if (LOG.isDebugEnabled()) { LOG.debug("Initial report of block " + iblk.getBlockName() + " on " + storageInfo.getDatanodeDescriptor() + " size " + iblk.getNumBytes() + " replicaState = " + reportedState); } if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(iblk)) { queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); continue; } BlockInfo storedBlock = getStoredBlock(iblk); // If block does not belong to any file, we are done. if (storedBlock == null) continue; // If block is corrupt, mark it and continue to next block. BlockUCState ucState = storedBlock.getBlockUCState(); BlockToMarkCorrupt c = checkReplicaCorrupt( iblk, reportedState, storedBlock, ucState, storageInfo.getDatanodeDescriptor()); if (c != null) { if (shouldPostponeBlocksFromFuture) { // In the Standby, we may receive a block report for a file that we // just have an out-of-date gen-stamp or state for, for example. queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor()); } continue; } // If block is under construction, add this replica to its list if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { storedBlock.getUnderConstructionFeature() .addReplicaIfNotPresent(storageInfo, iblk, reportedState); // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 if (namesystem.isInSnapshot(storedBlock)) { int numOfReplicas = storedBlock.getUnderConstructionFeature() .getNumExpectedLocations(); namesystem.incrementSafeBlockCount(numOfReplicas, storedBlock); } //and fall through to next clause } //add replica if appropriate if (reportedState == ReplicaState.FINALIZED) { addStoredBlockImmediate(storedBlock, iblk, storageInfo); } } } private void reportDiff(DatanodeStorageInfo storageInfo, BlockListAsLongs newReport, Collection<BlockInfoToAdd> toAdd, // add to DatanodeDescriptor Collection<BlockInfo> toRemove, // remove from DatanodeDescriptor Collection<Block> toInvalidate, // should be removed from DN Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list Collection<StatefulBlockInfo> toUC) { // add to under-construction list // place a delimiter in the list which separates blocks // that have been reported from those that have not Block delimiterBlock = new Block(); BlockInfo delimiter = new BlockInfoContiguous(delimiterBlock, (short) 1); AddBlockResult result = storageInfo.addBlock(delimiter, delimiterBlock); assert result == AddBlockResult.ADDED : "Delimiting block cannot be present in the node"; int headIndex = 0; //currently the delimiter is in the head of the list int curIndex; if (newReport == null) { newReport = BlockListAsLongs.EMPTY; } // scan the report and process newly reported blocks for (BlockReportReplica iblk : newReport) { ReplicaState iState = iblk.getState(); BlockInfo storedBlock = processReportedBlock(storageInfo, iblk, iState, toAdd, toInvalidate, toCorrupt, toUC); // move block to the head of the list if (storedBlock != null && (curIndex = storedBlock.findStorageInfo(storageInfo)) >= 0) { headIndex = storageInfo.moveBlockToHead(storedBlock, curIndex, headIndex); } } // collect blocks that have not been reported // all of them are next to the delimiter Iterator<BlockInfo> it = storageInfo.new BlockIterator(delimiter.getNext(0)); while (it.hasNext()) { toRemove.add(it.next()); } storageInfo.removeBlock(delimiter); } /** * Process a block replica reported by the data-node. * No side effects except adding to the passed-in Collections. * * <ol> * <li>If the block is not known to the system (not in blocksMap) then the * data-node should be notified to invalidate this block.</li> * <li>If the reported replica is valid that is has the same generation stamp * and length as recorded on the name-node, then the replica location should * be added to the name-node.</li> * <li>If the reported replica is not valid, then it is marked as corrupt, * which triggers replication of the existing valid replicas. * Corrupt replicas are removed from the system when the block * is fully replicated.</li> * <li>If the reported replica is for a block currently marked "under * construction" in the NN, then it should be added to the * BlockUnderConstructionFeature's list of replicas.</li> * </ol> * * @param storageInfo DatanodeStorageInfo that sent the report. * @param block reported block replica * @param reportedState reported replica state * @param toAdd add to DatanodeDescriptor * @param toInvalidate missing blocks (not in the blocks map) * should be removed from the data-node * @param toCorrupt replicas with unexpected length or generation stamp; * add to corrupt replicas * @param toUC replicas of blocks currently under construction * @return the up-to-date stored block, if it should be kept. * Otherwise, null. */ private BlockInfo processReportedBlock( final DatanodeStorageInfo storageInfo, final Block block, final ReplicaState reportedState, final Collection<BlockInfoToAdd> toAdd, final Collection<Block> toInvalidate, final Collection<BlockToMarkCorrupt> toCorrupt, final Collection<StatefulBlockInfo> toUC) { DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor(); if(LOG.isDebugEnabled()) { LOG.debug("Reported block " + block + " on " + dn + " size " + block.getNumBytes() + " replicaState = " + reportedState); } if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(block)) { queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); return null; } // find block by blockId BlockInfo storedBlock = getStoredBlock(block); if(storedBlock == null) { // If blocksMap does not contain reported block id, // the replica should be removed from the data-node. toInvalidate.add(new Block(block)); return null; } BlockUCState ucState = storedBlock.getBlockUCState(); // Block is on the NN if(LOG.isDebugEnabled()) { LOG.debug("In memory blockUCState = " + ucState); } // Ignore replicas already scheduled to be removed from the DN if(invalidateBlocks.contains(dn, block)) { return storedBlock; } BlockToMarkCorrupt c = checkReplicaCorrupt( block, reportedState, storedBlock, ucState, dn); if (c != null) { if (shouldPostponeBlocksFromFuture) { // If the block is an out-of-date generation stamp or state, // but we're the standby, we shouldn't treat it as corrupt, // but instead just queue it for later processing. // TODO: Pretty confident this should be s/storedBlock/block below, // since we should be postponing the info of the reported block, not // the stored block. See HDFS-6289 for more context. queueReportedBlock(storageInfo, storedBlock, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { toCorrupt.add(c); } return storedBlock; } if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { toUC.add(new StatefulBlockInfo(storedBlock, new Block(block), reportedState)); return storedBlock; } // Add replica if appropriate. If the replica was previously corrupt // but now okay, it might need to be updated. if (reportedState == ReplicaState.FINALIZED && (storedBlock.findStorageInfo(storageInfo) == -1 || corruptReplicas.isReplicaCorrupt(storedBlock, dn))) { toAdd.add(new BlockInfoToAdd(storedBlock, block)); } return storedBlock; } /** * Queue the given reported block for later processing in the * standby node. @see PendingDataNodeMessages. * @param reason a textual reason to report in the debug logs */ private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, String reason) { assert shouldPostponeBlocksFromFuture; if (LOG.isDebugEnabled()) { LOG.debug("Queueing reported block " + block + " in state " + reportedState + " from datanode " + storageInfo.getDatanodeDescriptor() + " for later processing because " + reason + "."); } pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState); } /** * Try to process any messages that were previously queued for the given * block. This is called from FSEditLogLoader whenever a block's state * in the namespace has changed or a new block has been created. */ public void processQueuedMessagesForBlock(Block b) throws IOException { Queue<ReportedBlockInfo> queue = pendingDNMessages.takeBlockQueue(b); if (queue == null) { // Nothing to re-process return; } processQueuedMessages(queue); } private void processQueuedMessages(Iterable<ReportedBlockInfo> rbis) throws IOException { for (ReportedBlockInfo rbi : rbis) { if (LOG.isDebugEnabled()) { LOG.debug("Processing previouly queued message " + rbi); } if (rbi.getReportedState() == null) { // This is a DELETE_BLOCK request DatanodeStorageInfo storageInfo = rbi.getStorageInfo(); removeStoredBlock(getStoredBlock(rbi.getBlock()), storageInfo.getDatanodeDescriptor()); } else { processAndHandleReportedBlock(rbi.getStorageInfo(), rbi.getBlock(), rbi.getReportedState(), null); } } } /** * Process any remaining queued datanode messages after entering * active state. At this point they will not be re-queued since * we are the definitive master node and thus should be up-to-date * with the namespace information. */ public void processAllPendingDNMessages() throws IOException { assert !shouldPostponeBlocksFromFuture : "processAllPendingDNMessages() should be called after disabling " + "block postponement."; int count = pendingDNMessages.count(); if (count > 0) { LOG.info("Processing " + count + " messages from DataNodes " + "that were previously queued during standby state"); } processQueuedMessages(pendingDNMessages.takeAll()); assert pendingDNMessages.count() == 0; } /** * The next two methods test the various cases under which we must conclude * the replica is corrupt, or under construction. These are laid out * as switch statements, on the theory that it is easier to understand * the combinatorics of reportedState and ucState that way. It should be * at least as efficient as boolean expressions. * * @return a BlockToMarkCorrupt object, or null if the replica is not corrupt */ private BlockToMarkCorrupt checkReplicaCorrupt( Block reported, ReplicaState reportedState, BlockInfo storedBlock, BlockUCState ucState, DatanodeDescriptor dn) { switch(reportedState) { case FINALIZED: switch(ucState) { case COMPLETE: case COMMITTED: if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, "block is " + ucState + " and reported genstamp " + reportedGS + " does not match genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } boolean wrongSize; if (storedBlock.isStriped()) { assert BlockIdManager.isStripedBlockID(reported.getBlockId()); assert storedBlock.getBlockId() == BlockIdManager.convertToStripedID(reported.getBlockId()); BlockInfoStriped stripedBlock = (BlockInfoStriped) storedBlock; int reportedBlkIdx = BlockIdManager.getBlockIndex(reported); wrongSize = reported.getNumBytes() != getInternalBlockLength( stripedBlock.getNumBytes(), stripedBlock.getCellSize(), stripedBlock.getDataBlockNum(), reportedBlkIdx); } else { wrongSize = storedBlock.getNumBytes() != reported.getNumBytes(); } if (wrongSize) { return new BlockToMarkCorrupt(new Block(reported), storedBlock, "block is " + ucState + " and reported length " + reported.getNumBytes() + " does not match " + "length in block map " + storedBlock.getNumBytes(), Reason.SIZE_MISMATCH); } else { return null; // not corrupt } case UNDER_CONSTRUCTION: if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, "block is " + ucState + " and reported state " + reportedState + ", But reported genstamp " + reportedGS + " does not match genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } return null; default: return null; } case RBW: case RWR: if (!storedBlock.isComplete()) { return null; // not corrupt } else if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS, "reported " + reportedState + " replica with genstamp " + reportedGS + " does not match COMPLETE block's genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else { // COMPLETE block, same genstamp if (reportedState == ReplicaState.RBW) { // If it's a RBW report for a COMPLETE block, it may just be that // the block report got a little bit delayed after the pipeline // closed. So, ignore this report, assuming we will get a // FINALIZED replica later. See HDFS-2791 LOG.info("Received an RBW replica for " + storedBlock + " on " + dn + ": ignoring it, since it is " + "complete with the same genstamp"); return null; } else { return new BlockToMarkCorrupt(new Block(reported), storedBlock, "reported replica has invalid state " + reportedState, Reason.INVALID_STATE); } } case RUR: // should not be reported case TEMPORARY: // should not be reported default: String msg = "Unexpected replica state " + reportedState + " for block: " + storedBlock + " on " + dn + " size " + storedBlock.getNumBytes(); // log here at WARN level since this is really a broken HDFS invariant LOG.warn(msg); return new BlockToMarkCorrupt(new Block(reported), storedBlock, msg, Reason.INVALID_STATE); } } private boolean isBlockUnderConstruction(BlockInfo storedBlock, BlockUCState ucState, ReplicaState reportedState) { switch(reportedState) { case FINALIZED: switch(ucState) { case UNDER_CONSTRUCTION: case UNDER_RECOVERY: return true; default: return false; } case RBW: case RWR: return (!storedBlock.isComplete()); case RUR: // should not be reported case TEMPORARY: // should not be reported default: return false; } } void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, DatanodeStorageInfo storageInfo) throws IOException { BlockInfo block = ucBlock.storedBlock; block.getUnderConstructionFeature().addReplicaIfNotPresent( storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); if (ucBlock.reportedState == ReplicaState.FINALIZED && (block.findStorageInfo(storageInfo) < 0)) { addStoredBlock(block, ucBlock.reportedBlock, storageInfo, null, true); } } /** * Faster version of {@link #addStoredBlock}, * intended for use with initial block report at startup. If not in startup * safe mode, will call standard addStoredBlock(). Assumes this method is * called "immediately" so there is no need to refresh the storedBlock from * blocksMap. Doesn't handle underReplication/overReplication, or worry about * pendingReplications or corruptReplicas, because it's in startup safe mode. * Doesn't log every block, because there are typically millions of them. * * @throws IOException */ private void addStoredBlockImmediate(BlockInfo storedBlock, Block reported, DatanodeStorageInfo storageInfo) throws IOException { assert (storedBlock != null && namesystem.hasWriteLock()); if (!namesystem.isInStartupSafeMode() || isPopulatingReplQueues()) { addStoredBlock(storedBlock, reported, storageInfo, null, false); return; } // just add it AddBlockResult result = storageInfo.addBlock(storedBlock, reported); // Now check for completion of blocks and safe block count int numCurrentReplica = countLiveNodes(storedBlock); if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED && hasMinStorage(storedBlock, numCurrentReplica)) { completeBlock(storedBlock, false); } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) { // check whether safe replication is reached for the block // only complete blocks are counted towards that. // In the case that the block just became complete above, completeBlock() // handles the safe block count maintenance. namesystem.incrementSafeBlockCount(numCurrentReplica, storedBlock); } } /** * Modify (block-->datanode) map. Remove block from set of * needed replications if this takes care of the problem. * @return the block that is stored in blocksMap. */ private Block addStoredBlock(final BlockInfo block, final Block reportedBlock, DatanodeStorageInfo storageInfo, DatanodeDescriptor delNodeHint, boolean logEveryBlock) throws IOException { assert block != null && namesystem.hasWriteLock(); BlockInfo storedBlock; DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); if (!block.isComplete()) { //refresh our copy in case the block got completed in another thread storedBlock = getStoredBlock(block); } else { storedBlock = block; } if (storedBlock == null || storedBlock.isDeleted()) { // If this block does not belong to anyfile, then we are done. blockLog.debug("BLOCK* addStoredBlock: {} on {} size {} but it does not" + " belong to any file", block, node, block.getNumBytes()); // we could add this block to invalidate set of this datanode. // it will happen in next block report otherwise. return block; } BlockCollection bc = getBlockCollection(storedBlock); assert bc != null : "Block must belong to a file"; // add block to the datanode AddBlockResult result = storageInfo.addBlock(storedBlock, reportedBlock); int curReplicaDelta; if (result == AddBlockResult.ADDED) { curReplicaDelta = 1; if (logEveryBlock) { logAddStoredBlock(storedBlock, node); } } else if (result == AddBlockResult.REPLACED) { curReplicaDelta = 0; blockLog.warn("BLOCK* addStoredBlock: block {} moved to storageType " + "{} on node {}", storedBlock, storageInfo.getStorageType(), node); } else { // if the same block is added again and the replica was corrupt // previously because of a wrong gen stamp, remove it from the // corrupt block list. corruptReplicas.removeFromCorruptReplicasMap(block, node, Reason.GENSTAMP_MISMATCH); curReplicaDelta = 0; blockLog.warn("BLOCK* addStoredBlock: Redundant addStoredBlock request" + " received for {} on node {} size {}", storedBlock, node, storedBlock.getNumBytes()); } // Now check for completion of blocks and safe block count NumberReplicas num = countNodes(storedBlock); int numLiveReplicas = num.liveReplicas(); int numCurrentReplica = numLiveReplicas + pendingReplications.getNumReplicas(storedBlock); if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED && hasMinStorage(storedBlock, numLiveReplicas)) { completeBlock(storedBlock, false); } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) { // check whether safe replication is reached for the block // only complete blocks are counted towards that // Is no-op if not in safe mode. // In the case that the block just became complete above, completeBlock() // handles the safe block count maintenance. namesystem.incrementSafeBlockCount(numCurrentReplica, storedBlock); } // if file is under construction, then done for now if (bc.isUnderConstruction()) { return storedBlock; } // do not try to handle over/under-replicated blocks during first safe mode if (!isPopulatingReplQueues()) { return storedBlock; } // handle underReplication/overReplication short fileReplication = getExpectedReplicaNum(storedBlock); if (!isNeededReplication(storedBlock, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedAndDecommissioning(), fileReplication); } else { updateNeededReplications(storedBlock, curReplicaDelta, 0); } if (numCurrentReplica > fileReplication) { processOverReplicatedBlock(storedBlock, fileReplication, node, delNodeHint); } // If the file replication has reached desired value // we can remove any corrupt replicas the block may have int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock); int numCorruptNodes = num.corruptReplicas(); if (numCorruptNodes != corruptReplicasCount) { LOG.warn("Inconsistent number of corrupt replicas for " + storedBlock + ". blockMap has " + numCorruptNodes + " but corrupt replicas map has " + corruptReplicasCount); } if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) { invalidateCorruptReplicas(storedBlock, reportedBlock, num); } return storedBlock; } private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { if (!blockLog.isDebugEnabled()) { return; } StringBuilder sb = new StringBuilder(500); sb.append("BLOCK* addStoredBlock: blockMap updated: ") .append(node) .append(" is added to "); storedBlock.appendStringTo(sb); sb.append(" size " ) .append(storedBlock.getNumBytes()); blockLog.debug(sb.toString()); } /** * Invalidate corrupt replicas. * <p> * This will remove the replicas from the block's location list, * add them to {@link #invalidateBlocks} so that they could be further * deleted from the respective data-nodes, * and remove the block from corruptReplicasMap. * <p> * This method should be called when the block has sufficient * number of live replicas. * * @param blk Block whose corrupt replicas need to be invalidated */ private void invalidateCorruptReplicas(BlockInfo blk, Block reported, NumberReplicas numberReplicas) { Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk); boolean removedFromBlocksMap = true; if (nodes == null) return; // make a copy of the array of nodes in order to avoid // ConcurrentModificationException, when the block is removed from the node DatanodeDescriptor[] nodesCopy = nodes.toArray(new DatanodeDescriptor[nodes.size()]); for (DatanodeDescriptor node : nodesCopy) { try { if (!invalidateBlock(new BlockToMarkCorrupt(reported, blk, null, Reason.ANY), node, numberReplicas)) { removedFromBlocksMap = false; } } catch (IOException e) { blockLog.debug("invalidateCorruptReplicas error in deleting bad block" + " {} on {}", blk, node, e); removedFromBlocksMap = false; } } // Remove the block from corruptReplicasMap if (removedFromBlocksMap) { corruptReplicas.removeFromCorruptReplicasMap(blk); } } /** * For each block in the name-node verify whether it belongs to any file, * over or under replicated. Place it into the respective queue. */ public void processMisReplicatedBlocks() { assert namesystem.hasWriteLock(); stopReplicationInitializer(); neededReplications.clear(); replicationQueuesInitializer = new Daemon() { @Override public void run() { try { processMisReplicatesAsync(); } catch (InterruptedException ie) { LOG.info("Interrupted while processing replication queues."); } catch (Exception e) { LOG.error("Error while processing replication queues async", e); } } }; replicationQueuesInitializer.setName("Replication Queue Initializer"); replicationQueuesInitializer.start(); } /* * Stop the ongoing initialisation of replication queues */ private void stopReplicationInitializer() { if (replicationQueuesInitializer != null) { replicationQueuesInitializer.interrupt(); try { replicationQueuesInitializer.join(); } catch (final InterruptedException e) { LOG.warn("Interrupted while waiting for replicationQueueInitializer. Returning.."); return; } finally { replicationQueuesInitializer = null; } } } /* * Since the BlocksMapGset does not throw the ConcurrentModificationException * and supports further iteration after modification to list, there is a * chance of missing the newly added block while iterating. Since every * addition to blocksMap will check for mis-replication, missing mis-replication * check for new blocks will not be a problem. */ private void processMisReplicatesAsync() throws InterruptedException { long nrInvalid = 0, nrOverReplicated = 0; long nrUnderReplicated = 0, nrPostponed = 0, nrUnderConstruction = 0; long startTimeMisReplicatedScan = Time.monotonicNow(); Iterator<BlockInfo> blocksItr = blocksMap.getBlocks().iterator(); long totalBlocks = blocksMap.size(); replicationQueuesInitProgress = 0; long totalProcessed = 0; long sleepDuration = Math.max(1, Math.min(numBlocksPerIteration/1000, 10000)); while (namesystem.isRunning() && !Thread.currentThread().isInterrupted()) { int processed = 0; namesystem.writeLockInterruptibly(); try { while (processed < numBlocksPerIteration && blocksItr.hasNext()) { BlockInfo block = blocksItr.next(); MisReplicationResult res = processMisReplicatedBlock(block); if (LOG.isTraceEnabled()) { LOG.trace("block " + block + ": " + res); } switch (res) { case UNDER_REPLICATED: nrUnderReplicated++; break; case OVER_REPLICATED: nrOverReplicated++; break; case INVALID: nrInvalid++; break; case POSTPONE: nrPostponed++; postponeBlock(block); break; case UNDER_CONSTRUCTION: nrUnderConstruction++; break; case OK: break; default: throw new AssertionError("Invalid enum value: " + res); } processed++; } totalProcessed += processed; // there is a possibility that if any of the blocks deleted/added during // initialisation, then progress might be different. replicationQueuesInitProgress = Math.min((double) totalProcessed / totalBlocks, 1.0); if (!blocksItr.hasNext()) { LOG.info("Total number of blocks = " + blocksMap.size()); LOG.info("Number of invalid blocks = " + nrInvalid); LOG.info("Number of under-replicated blocks = " + nrUnderReplicated); LOG.info("Number of over-replicated blocks = " + nrOverReplicated + ((nrPostponed > 0) ? (" (" + nrPostponed + " postponed)") : "")); LOG.info("Number of blocks being written = " + nrUnderConstruction); NameNode.stateChangeLog .info("STATE* Replication Queue initialization " + "scan for invalid, over- and under-replicated blocks " + "completed in " + (Time.monotonicNow() - startTimeMisReplicatedScan) + " msec"); break; } } finally { namesystem.writeUnlock(); // Make sure it is out of the write lock for sufficiently long time. Thread.sleep(sleepDuration); } } if (Thread.currentThread().isInterrupted()) { LOG.info("Interrupted while processing replication queues."); } } /** * Get the progress of the Replication queues initialisation * * @return Returns values between 0 and 1 for the progress. */ public double getReplicationQueuesInitProgress() { return replicationQueuesInitProgress; } /** * Get the value of whether there are any non-EC blocks using StripedID. * * @return Returns the value of whether there are any non-EC blocks using StripedID. */ public boolean hasNonEcBlockUsingStripedID(){ return hasNonEcBlockUsingStripedID; } /** * Process a single possibly misreplicated block. This adds it to the * appropriate queues if necessary, and returns a result code indicating * what happened with it. */ private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { if (block.isDeleted()) { // block does not belong to any file addToInvalidates(block); return MisReplicationResult.INVALID; } if (!block.isComplete()) { // Incomplete blocks are never considered mis-replicated -- // they'll be reached when they are completed or recovered. return MisReplicationResult.UNDER_CONSTRUCTION; } // calculate current replication short expectedReplication = getExpectedReplicaNum(block); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be if (isNeededReplication(block, numCurrentReplica)) { if (neededReplications.add(block, numCurrentReplica, num .decommissionedAndDecommissioning(), expectedReplication)) { return MisReplicationResult.UNDER_REPLICATED; } } if (numCurrentReplica > expectedReplication) { if (num.replicasOnStaleNodes() > 0) { // If any of the replicas of this block are on nodes that are // considered "stale", then these replicas may in fact have // already been deleted. So, we cannot safely act on the // over-replication until a later point in time, when // the "stale" nodes have block reported. return MisReplicationResult.POSTPONE; } // over-replicated block processOverReplicatedBlock(block, expectedReplication, null, null); return MisReplicationResult.OVER_REPLICATED; } return MisReplicationResult.OK; } /** Set replication for the blocks. */ public void setReplication( final short oldRepl, final short newRepl, final BlockInfo b) { if (newRepl == oldRepl) { return; } // update needReplication priority queues b.setReplication(newRepl); updateNeededReplications(b, 0, newRepl - oldRepl); if (oldRepl > newRepl) { processOverReplicatedBlock(b, newRepl, null, null); } } /** * Find how many of the containing nodes are "extra", if any. * If there are any extras, call chooseExcessReplicates() to * mark them in the excessReplicateMap. */ private void processOverReplicatedBlock(final BlockInfo block, final short replication, final DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { assert namesystem.hasWriteLock(); if (addedNode == delNodeHint) { delNodeHint = null; } Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas .getNodes(block); for(DatanodeStorageInfo storage : blocksMap.getStorages(block, State.NORMAL)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (storage.areBlockContentsStale()) { LOG.info("BLOCK* processOverReplicatedBlock: " + "Postponing processing of over-replicated " + block + " since storage + " + storage + "datanode " + cur + " does not yet have up-to-date " + "block information."); postponeBlock(block); return; } LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get( cur.getDatanodeUuid()); if (excessBlocks == null || !excessBlocks.contains(block)) { if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { // exclude corrupt replicas if (corruptNodes == null || !corruptNodes.contains(cur)) { nonExcess.add(storage); } } } } chooseExcessReplicates(nonExcess, block, replication, addedNode, delNodeHint); } private void chooseExcessReplicates( final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { assert namesystem.hasWriteLock(); // first form a rack to datanodes map and BlockCollection bc = getBlockCollection(storedBlock); if (storedBlock.isStriped()) { chooseExcessReplicasStriped(bc, nonExcess, storedBlock, delNodeHint); } else { final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy( bc.getStoragePolicyID()); final List<StorageType> excessTypes = storagePolicy.chooseExcess( replication, DatanodeStorageInfo.toStorageTypes(nonExcess)); chooseExcessReplicasContiguous(bc, nonExcess, storedBlock, replication, addedNode, delNodeHint, excessTypes); } } /** * We want "replication" replicates for the block, but we now have too many. * In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that: * * srcNodes.size() - dstNodes.size() == replication * * We pick node that make sure that replicas are spread across racks and * also try hard to pick one with least free space. * The algorithm is first to pick a node with least free space from nodes * that are on a rack holding more than one replicas of the block. * So removing such a replica won't remove a rack. * If no such a node is available, * then pick a node with least free space */ private void chooseExcessReplicasContiguous(BlockCollection bc, final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint, List<StorageType> excessTypes) { BlockPlacementPolicy replicator = placementPolicies.getPolicy(false); final Map<String, List<DatanodeStorageInfo>> rackMap = new HashMap<>(); final List<DatanodeStorageInfo> moreThanOne = new ArrayList<>(); final List<DatanodeStorageInfo> exactlyOne = new ArrayList<>(); // split nodes into two sets // moreThanOne contains nodes on rack with more than one replica // exactlyOne contains the remaining nodes replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne, exactlyOne); // pick one node to delete that favors the delete hint // otherwise pick one with least space from priSet if it is not empty // otherwise one node with least space from remains boolean firstOne = true; final DatanodeStorageInfo delNodeHintStorage = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, delNodeHint); final DatanodeStorageInfo addedNodeStorage = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, addedNode); while (nonExcess.size() - replication > 0) { final DatanodeStorageInfo cur; if (useDelHint(firstOne, delNodeHintStorage, addedNodeStorage, moreThanOne, excessTypes)) { cur = delNodeHintStorage; } else { // regular excessive replica removal cur = replicator.chooseReplicaToDelete(bc, storedBlock, replication, moreThanOne, exactlyOne, excessTypes); } firstOne = false; // adjust rackmap, moreThanOne, and exactlyOne replicator.adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur); processChosenExcessReplica(nonExcess, cur, storedBlock); } } /** * We want block group has every internal block, but we have redundant * internal blocks (which have the same index). * In this method, we delete the redundant internal blocks until only one * left for each index. * * The block placement policy will make sure that the left internal blocks are * spread across racks and also try hard to pick one with least free space. */ private void chooseExcessReplicasStriped(BlockCollection bc, final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock, DatanodeDescriptor delNodeHint) { assert storedBlock instanceof BlockInfoStriped; BlockInfoStriped sblk = (BlockInfoStriped) storedBlock; short groupSize = sblk.getTotalBlockNum(); BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(true); List<DatanodeStorageInfo> empty = new ArrayList<>(0); // find all duplicated indices BitSet found = new BitSet(groupSize); //indices found BitSet duplicated = new BitSet(groupSize); //indices found more than once HashMap<DatanodeStorageInfo, Integer> storage2index = new HashMap<>(); for (DatanodeStorageInfo storage : nonExcess) { int index = sblk.getStorageBlockIndex(storage); assert index >= 0; if (found.get(index)) { duplicated.set(index); } found.set(index); storage2index.put(storage, index); } // the number of target left replicas equals to the of number of the found // indices. int numOfTarget = found.cardinality(); final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy( bc.getStoragePolicyID()); final List<StorageType> excessTypes = storagePolicy.chooseExcess( (short)numOfTarget, DatanodeStorageInfo.toStorageTypes(nonExcess)); // use delHint only if delHint is duplicated final DatanodeStorageInfo delStorageHint = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, delNodeHint); if (delStorageHint != null) { Integer index = storage2index.get(delStorageHint); if (index != null && duplicated.get(index)) { processChosenExcessReplica(nonExcess, delStorageHint, storedBlock); } } // for each duplicated index, delete some replicas until only one left for (int targetIndex = duplicated.nextSetBit(0); targetIndex >= 0; targetIndex = duplicated.nextSetBit(targetIndex + 1)) { List<DatanodeStorageInfo> candidates = new ArrayList<>(); for (DatanodeStorageInfo storage : nonExcess) { int index = storage2index.get(storage); if (index == targetIndex) { candidates.add(storage); } } Block internalBlock = new Block(storedBlock); internalBlock.setBlockId(storedBlock.getBlockId() + targetIndex); while (candidates.size() > 1) { DatanodeStorageInfo target = placementPolicy.chooseReplicaToDelete(bc, internalBlock, (short)1, candidates, empty, excessTypes); processChosenExcessReplica(nonExcess, target, storedBlock); candidates.remove(target); } duplicated.clear(targetIndex); } } private void processChosenExcessReplica( final Collection<DatanodeStorageInfo> nonExcess, final DatanodeStorageInfo chosen, BlockInfo storedBlock) { nonExcess.remove(chosen); addToExcessReplicate(chosen.getDatanodeDescriptor(), storedBlock); // // The 'excessblocks' tracks blocks until we get confirmation // that the datanode has deleted them; the only way we remove them // is when we get a "removeBlock" message. // // The 'invalidate' list is used to inform the datanode the block // should be deleted. Items are removed from the invalidate list // upon giving instructions to the datanodes. // final Block blockToInvalidate = getBlockOnStorage(storedBlock, chosen); addToInvalidates(blockToInvalidate, chosen.getDatanodeDescriptor()); blockLog.debug("BLOCK* chooseExcessReplicates: " + "({}, {}) is added to invalidated blocks set", chosen, storedBlock); } /** Check if we can use delHint */ static boolean useDelHint(boolean isFirst, DatanodeStorageInfo delHint, DatanodeStorageInfo added, List<DatanodeStorageInfo> moreThan1Racks, List<StorageType> excessTypes) { if (!isFirst) { return false; // only consider delHint for the first case } else if (delHint == null) { return false; // no delHint } else if (!excessTypes.contains(delHint.getStorageType())) { return false; // delHint storage type is not an excess type } else { // check if removing delHint reduces the number of racks if (moreThan1Racks.contains(delHint)) { return true; // delHint and some other nodes are under the same rack } else if (added != null && !moreThan1Racks.contains(added)) { return true; // the added node adds a new rack } return false; // removing delHint reduces the number of racks; } } private void addToExcessReplicate(DatanodeInfo dn, BlockInfo storedBlock) { assert namesystem.hasWriteLock(); LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get( dn.getDatanodeUuid()); if (excessBlocks == null) { excessBlocks = new LightWeightLinkedSet<>(); excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks); } if (excessBlocks.add(storedBlock)) { excessBlocksCount.incrementAndGet(); blockLog.debug("BLOCK* addToExcessReplicate: ({}, {}) is added to" + " excessReplicateMap", dn, storedBlock); } } private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block, DatanodeDescriptor node) { if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(block)) { queueReportedBlock(storageInfo, block, null, QUEUE_REASON_FUTURE_GENSTAMP); return; } removeStoredBlock(getStoredBlock(block), node); } /** * Modify (block-->datanode) map. Possibly generate replication tasks, if the * removed block is still valid. */ public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node); assert (namesystem.hasWriteLock()); { if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) { blockLog.debug("BLOCK* removeStoredBlock: {} has already been" + " removed from node {}", storedBlock, node); return; } CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks() .get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false)); if (cblock != null) { boolean removed = false; removed |= node.getPendingCached().remove(cblock); removed |= node.getCached().remove(cblock); removed |= node.getPendingUncached().remove(cblock); if (removed) { blockLog.debug("BLOCK* removeStoredBlock: {} removed from caching " + "related lists on node {}", storedBlock, node); } } // // It's possible that the block was removed because of a datanode // failure. If the block is still valid, check if replication is // necessary. In that case, put block on a possibly-will- // be-replicated list. // BlockCollection bc = getBlockCollection(storedBlock); if (bc != null) { namesystem.decrementSafeBlockCount(storedBlock); updateNeededReplications(storedBlock, -1, 0); } // // We've removed a block from a node, so it's definitely no longer // in "excess" there. // LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get( node.getDatanodeUuid()); if (excessBlocks != null) { if (excessBlocks.remove(storedBlock)) { excessBlocksCount.decrementAndGet(); blockLog.debug("BLOCK* removeStoredBlock: {} is removed from " + "excessBlocks", storedBlock); if (excessBlocks.size() == 0) { excessReplicateMap.remove(node.getDatanodeUuid()); } } } // Remove the replica from corruptReplicas corruptReplicas.removeFromCorruptReplicasMap(storedBlock, node); } } /** * Get all valid locations of the block & add the block to results * @return the length of the added block; 0 if the block is not added. If the * added block is a block group, return its approximate internal block size */ private long addBlock(BlockInfo block, List<BlockWithLocations> results) { final List<DatanodeStorageInfo> locations = getValidLocations(block); if(locations.size() == 0) { return 0; } else { final String[] datanodeUuids = new String[locations.size()]; final String[] storageIDs = new String[datanodeUuids.length]; final StorageType[] storageTypes = new StorageType[datanodeUuids.length]; for(int i = 0; i < locations.size(); i++) { final DatanodeStorageInfo s = locations.get(i); datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid(); storageIDs[i] = s.getStorageID(); storageTypes[i] = s.getStorageType(); } BlockWithLocations blkWithLocs = new BlockWithLocations(block, datanodeUuids, storageIDs, storageTypes); if(block.isStriped()) { BlockInfoStriped blockStriped = (BlockInfoStriped) block; byte[] indices = new byte[locations.size()]; for (int i = 0; i < locations.size(); i++) { indices[i] = (byte) blockStriped.getStorageBlockIndex(locations.get(i)); } results.add(new StripedBlockWithLocations(blkWithLocs, indices, blockStriped.getDataBlockNum(), blockStriped.getCellSize())); // approximate size return block.getNumBytes() / blockStriped.getDataBlockNum(); }else{ results.add(blkWithLocs); return block.getNumBytes(); } } } /** * The given node is reporting that it received a certain block. */ @VisibleForTesting void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint) throws IOException { DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Decrement number of blocks scheduled to this datanode. // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with // RECEIVED_BLOCK), we currently also decrease the approximate number. node.decrementBlocksScheduled(storageInfo.getStorageType()); // get the deletion hint node DatanodeDescriptor delHintNode = null; if (delHint != null && delHint.length() != 0) { delHintNode = datanodeManager.getDatanode(delHint); if (delHintNode == null) { blockLog.warn("BLOCK* blockReceived: {} is expected to be removed " + "from an unrecorded node {}", block, delHint); } } // // Modify the blocks->datanode map and node's map. // BlockInfo storedBlock = getStoredBlock(block); if (storedBlock != null) { pendingReplications.decrement(storedBlock, node); } processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED, delHintNode); } private void processAndHandleReportedBlock( DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, DatanodeDescriptor delHintNode) throws IOException { // blockReceived reports a finalized block Collection<BlockInfoToAdd> toAdd = new LinkedList<>(); Collection<Block> toInvalidate = new LinkedList<Block>(); Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>(); Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>(); final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); processReportedBlock(storageInfo, block, reportedState, toAdd, toInvalidate, toCorrupt, toUC); // the block is only in one of the to-do lists // if it is in none then data-node already has it assert toUC.size() + toAdd.size() + toInvalidate.size() + toCorrupt.size() <= 1 : "The block should be only in one of the lists."; for (StatefulBlockInfo b : toUC) { addStoredBlockUnderConstruction(b, storageInfo); } long numBlocksLogged = 0; for (BlockInfoToAdd b : toAdd) { addStoredBlock(b.stored, b.reported, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { blockLog.debug("BLOCK* addBlock: logged info for {} of {} reported.", maxNumBlocksToLog, numBlocksLogged); } for (Block b : toInvalidate) { blockLog.debug("BLOCK* addBlock: block {} on node {} size {} does not " + "belong to any file", b, node, b.getNumBytes()); addToInvalidates(b, node); } for (BlockToMarkCorrupt b : toCorrupt) { markBlockAsCorrupt(b, storageInfo, node); } } /** * The given node is reporting incremental information about some blocks. * This includes blocks that are starting to be received, completed being * received, or deleted. * * This method must be called with FSNamesystem lock held. */ public void processIncrementalBlockReport(final DatanodeID nodeID, final StorageReceivedDeletedBlocks srdb) throws IOException { assert namesystem.hasWriteLock(); int received = 0; int deleted = 0; int receiving = 0; final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); if (node == null || !node.isAlive) { blockLog.warn("BLOCK* processIncrementalBlockReport" + " is received from dead or unregistered node {}", nodeID); throw new IOException( "Got incremental block report from unregistered or dead node"); } DatanodeStorageInfo storageInfo = node.getStorageInfo(srdb.getStorage().getStorageID()); if (storageInfo == null) { // The DataNode is reporting an unknown storage. Usually the NN learns // about new storages from heartbeats but during NN restart we may // receive a block report or incremental report before the heartbeat. // We must handle this for protocol compatibility. This issue was // uncovered by HDFS-6094. storageInfo = node.updateStorage(srdb.getStorage()); } for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) { switch (rdbi.getStatus()) { case DELETED_BLOCK: removeStoredBlock(storageInfo, rdbi.getBlock(), node); deleted++; break; case RECEIVED_BLOCK: addBlock(storageInfo, rdbi.getBlock(), rdbi.getDelHints()); received++; break; case RECEIVING_BLOCK: receiving++; processAndHandleReportedBlock(storageInfo, rdbi.getBlock(), ReplicaState.RBW, null); break; default: String msg = "Unknown block status code reported by " + nodeID + ": " + rdbi; blockLog.warn(msg); assert false : msg; // if assertions are enabled, throw. break; } blockLog.debug("BLOCK* block {}: {} is received from {}", rdbi.getStatus(), rdbi.getBlock(), nodeID); } blockLog.debug("*BLOCK* NameNode.processIncrementalBlockReport: from " + "{} receiving: {}, received: {}, deleted: {}", nodeID, receiving, received, deleted); } /** * Return the number of nodes hosting a given block, grouped * by the state of those replicas. * For a striped block, this includes nodes storing blocks belonging to the * striped block group. */ public NumberReplicas countNodes(BlockInfo b) { int decommissioned = 0; int decommissioning = 0; int live = 0; int corrupt = 0; int excess = 0; int stale = 0; Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b); for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) { corrupt++; } else if (node.isDecommissionInProgress()) { decommissioning++; } else if (node.isDecommissioned()) { decommissioned++; } else { LightWeightLinkedSet<BlockInfo> blocksExcess = excessReplicateMap.get( node.getDatanodeUuid()); if (blocksExcess != null && blocksExcess.contains(b)) { excess++; } else { live++; } } if (storage.areBlockContentsStale()) { stale++; } } return new NumberReplicas(live, decommissioned, decommissioning, corrupt, excess, stale); } /** * Simpler, faster form of {@link #countNodes} that only returns the number * of live nodes. If in startup safemode (or its 30-sec extension period), * then it gains speed by ignoring issues of excess replicas or nodes * that are decommissioned or in process of becoming decommissioned. * If not in startup, then it calls {@link #countNodes} instead. * * @param b - the block being tested * @return count of live nodes for this block */ int countLiveNodes(BlockInfo b) { if (!namesystem.isInStartupSafeMode()) { return countNodes(b).liveReplicas(); } // else proceed with fast case int live = 0; Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b); for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node))) live++; } return live; } /** * On stopping decommission, check if the node has excess replicas. * If there are any excess replicas, call processOverReplicatedBlock(). * Process over replicated blocks only when active NN is out of safe mode. */ void processOverReplicatedBlocksOnReCommission( final DatanodeDescriptor srcNode) { if (!isPopulatingReplQueues()) { return; } final Iterator<BlockInfo> it = srcNode.getBlockIterator(); int numOverReplicated = 0; while(it.hasNext()) { final BlockInfo block = it.next(); int expectedReplication = this.getReplication(block); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { // over-replicated block processOverReplicatedBlock(block, (short) expectedReplication, null, null); numOverReplicated++; } } LOG.info("Invalidated " + numOverReplicated + " over-replicated blocks on " + srcNode + " during recommissioning"); } /** * Returns whether a node can be safely decommissioned based on its * liveness. Dead nodes cannot always be safely decommissioned. */ boolean isNodeHealthyForDecommission(DatanodeDescriptor node) { if (!node.checkBlockReportReceived()) { LOG.info("Node {} hasn't sent its first block report.", node); return false; } if (node.isAlive) { return true; } updateState(); if (pendingReplicationBlocksCount == 0 && underReplicatedBlocksCount == 0) { LOG.info("Node {} is dead and there are no under-replicated" + " blocks or blocks pending replication. Safe to decommission.", node); return true; } LOG.warn("Node {} is dead " + "while decommission is in progress. Cannot be safely " + "decommissioned since there is risk of reduced " + "data durability or data loss. Either restart the failed node or" + " force decommissioning by removing, calling refreshNodes, " + "then re-adding to the excludes files.", node); return false; } public int getActiveBlockCount() { return blocksMap.size(); } public DatanodeStorageInfo[] getStorages(BlockInfo block) { final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[block.numNodes()]; int i = 0; for(DatanodeStorageInfo s : blocksMap.getStorages(block)) { storages[i++] = s; } return storages; } /** @return an iterator of the datanodes. */ public Iterable<DatanodeStorageInfo> getStorages(final Block block) { return blocksMap.getStorages(block); } public int getTotalBlocks() { return blocksMap.size(); } public void removeBlock(BlockInfo block) { assert namesystem.hasWriteLock(); // No need to ACK blocks that are being removed entirely // from the namespace, since the removal of the associated // file already removes them from the block map below. block.setNumBytes(BlockCommand.NO_ACK); addToInvalidates(block); removeBlockFromMap(block); // Remove the block from pendingReplications and neededReplications pendingReplications.remove(block); neededReplications.remove(block, UnderReplicatedBlocks.LEVEL); if (postponedMisreplicatedBlocks.remove(block)) { postponedMisreplicatedBlocksCount.decrementAndGet(); } } public BlockInfo getStoredBlock(Block block) { if (!BlockIdManager.isStripedBlockID(block.getBlockId())) { return blocksMap.getStoredBlock(block); } if (!hasNonEcBlockUsingStripedID) { return blocksMap.getStoredBlock( new Block(BlockIdManager.convertToStripedID(block.getBlockId()))); } BlockInfo info = blocksMap.getStoredBlock(block); if (info != null) { return info; } return blocksMap.getStoredBlock( new Block(BlockIdManager.convertToStripedID(block.getBlockId()))); } /** updates a block in under replication queue */ private void updateNeededReplications(final BlockInfo block, final int curReplicasDelta, int expectedReplicasDelta) { namesystem.writeLock(); try { if (!isPopulatingReplQueues()) { return; } NumberReplicas repl = countNodes(block); int curExpectedReplicas = getReplication(block); if (isNeededReplication(block, repl.liveReplicas())) { neededReplications.update(block, repl.liveReplicas(), repl .decommissionedAndDecommissioning(), curExpectedReplicas, curReplicasDelta, expectedReplicasDelta); } else { int oldReplicas = repl.liveReplicas()-curReplicasDelta; int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta; neededReplications.remove(block, oldReplicas, repl.decommissionedAndDecommissioning(), oldExpectedReplicas); } } finally { namesystem.writeUnlock(); } } /** * Check replication of the blocks in the collection. * If any block is needed replication, insert it into the replication queue. * Otherwise, if the block is more than the expected replication factor, * process it as an over replicated block. */ public void checkReplication(BlockCollection bc) { for (BlockInfo block : bc.getBlocks()) { short expected = getExpectedReplicaNum(block); final NumberReplicas n = countNodes(block); if (isNeededReplication(block, n.liveReplicas())) { neededReplications.add(block, n.liveReplicas(), n.decommissionedAndDecommissioning(), expected); } else if (n.liveReplicas() > expected) { processOverReplicatedBlock(block, expected, null, null); } } } /** * Check that the indicated blocks are present and * replicated. */ public boolean checkBlocksProperlyReplicated( String src, BlockInfo[] blocks) { for (BlockInfo b: blocks) { if (!b.isComplete()) { final int numNodes = b.numNodes(); final int min = getMinStorageNum(b); final BlockUCState state = b.getBlockUCState(); LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " + state + ", replication# = " + numNodes + (numNodes < min ? " < " : " >= ") + " minimum = " + min + ") in file " + src); return false; } } return true; } /** * @return 0 if the block is not found; * otherwise, return the replication factor of the block. */ private int getReplication(BlockInfo block) { return getExpectedReplicaNum(block); } /** * Get blocks to invalidate for <i>nodeId</i> * in {@link #invalidateBlocks}. * * @return number of blocks scheduled for removal during this iteration. */ private int invalidateWorkForOneNode(DatanodeInfo dn) { final List<Block> toInvalidate; namesystem.writeLock(); try { // blocks should not be replicated or removed if safe mode is on if (namesystem.isInSafeMode()) { LOG.debug("In safemode, not computing replication work"); return 0; } try { DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn); if (dnDescriptor == null) { LOG.warn("DataNode " + dn + " cannot be found with UUID " + dn.getDatanodeUuid() + ", removing block invalidation work."); invalidateBlocks.remove(dn); return 0; } toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor); if (toInvalidate == null) { return 0; } } catch(UnregisteredNodeException une) { return 0; } } finally { namesystem.writeUnlock(); } blockLog.debug("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(), dn, toInvalidate); return toInvalidate.size(); } @VisibleForTesting public boolean containsInvalidateBlock(final DatanodeInfo dn, final Block block) { return invalidateBlocks.contains(dn, block); } boolean blockHasEnoughRacks(BlockInfo storedBlock, int expectedStorageNum) { if (!this.shouldCheckForEnoughRacks) { return true; } Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(storedBlock); if (storedBlock.isStriped()) { return blockHasEnoughRacksStriped(storedBlock, corruptNodes); } else { return blockHashEnoughRacksContiguous(storedBlock, expectedStorageNum, corruptNodes); } } /** * Verify whether given striped block is distributed through enough racks. * As dicussed in HDFS-7613, ec file requires racks at least as many as * the number of data block number. */ boolean blockHasEnoughRacksStriped(BlockInfo storedBlock, Collection<DatanodeDescriptor> corruptNodes) { if (!datanodeManager.hasClusterEverBeenMultiRack()) { return true; } boolean enoughRacks = false; Set<String> rackNameSet = new HashSet<>(); int dataBlockNum = ((BlockInfoStriped)storedBlock).getRealDataBlockNum(); for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { if ((corruptNodes == null) || !corruptNodes.contains(cur)) { String rackNameNew = cur.getNetworkLocation(); rackNameSet.add(rackNameNew); if (rackNameSet.size() >= dataBlockNum) { enoughRacks = true; break; } } } } return enoughRacks; } boolean blockHashEnoughRacksContiguous(BlockInfo storedBlock, int expectedStorageNum, Collection<DatanodeDescriptor> corruptNodes) { boolean enoughRacks = false; String rackName = null; for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { if ((corruptNodes == null ) || !corruptNodes.contains(cur)) { if (expectedStorageNum == 1 || (expectedStorageNum > 1 && !datanodeManager.hasClusterEverBeenMultiRack())) { enoughRacks = true; break; } String rackNameNew = cur.getNetworkLocation(); if (rackName == null) { rackName = rackNameNew; } else if (!rackName.equals(rackNameNew)) { enoughRacks = true; break; } } } } return enoughRacks; } /** * A block needs replication if the number of replicas is less than expected * or if it does not have enough racks. */ boolean isNeededReplication(BlockInfo storedBlock, int current) { int expected = getExpectedReplicaNum(storedBlock); return current < expected || !blockHasEnoughRacks(storedBlock, expected); } public short getExpectedReplicaNum(BlockInfo block) { return block.isStriped() ? ((BlockInfoStriped) block).getRealTotalBlockNum() : block.getReplication(); } public long getMissingBlocksCount() { // not locking return this.neededReplications.getCorruptBlockSize(); } public long getMissingReplOneBlocksCount() { // not locking return this.neededReplications.getCorruptReplOneBlockSize(); } public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) { return blocksMap.addBlockCollection(block, bc); } /** * Do some check when adding a block to blocksmap. * For HDFS-7994 to check whether then block is a NonEcBlockUsingStripedID. * */ public BlockInfo addBlockCollectionWithCheck( BlockInfo block, BlockCollection bc) { if (!hasNonEcBlockUsingStripedID && !block.isStriped() && BlockIdManager.isStripedBlockID(block.getBlockId())) { hasNonEcBlockUsingStripedID = true; } return addBlockCollection(block, bc); } public BlockCollection getBlockCollection(BlockInfo b) { return namesystem.getBlockCollection(b.getBlockCollectionId()); } public int numCorruptReplicas(Block block) { return corruptReplicas.numCorruptReplicas(block); } public void removeBlockFromMap(Block block) { removeFromExcessReplicateMap(block); blocksMap.removeBlock(block); // If block is removed from blocksMap remove it from corruptReplicasMap corruptReplicas.removeFromCorruptReplicasMap(block); } /** * If a block is removed from blocksMap, remove it from excessReplicateMap. */ private void removeFromExcessReplicateMap(Block block) { for (DatanodeStorageInfo info : blocksMap.getStorages(block)) { String uuid = info.getDatanodeDescriptor().getDatanodeUuid(); LightWeightLinkedSet<BlockInfo> excessReplicas = excessReplicateMap.get(uuid); if (excessReplicas != null) { if (excessReplicas.remove(block)) { excessBlocksCount.decrementAndGet(); if (excessReplicas.isEmpty()) { excessReplicateMap.remove(uuid); } } } } } public int getCapacity() { return blocksMap.getCapacity(); } /** * Return an iterator over the set of blocks for which there are no replicas. */ public Iterator<BlockInfo> getCorruptReplicaBlockIterator() { return neededReplications.iterator( UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS); } /** * Get the replicas which are corrupt for a given block. */ public Collection<DatanodeDescriptor> getCorruptReplicas(Block block) { return corruptReplicas.getNodes(block); } /** * Get reason for certain corrupted replicas for a given block and a given dn. */ public String getCorruptReason(Block block, DatanodeDescriptor node) { return corruptReplicas.getCorruptReason(block, node); } /** @return the size of UnderReplicatedBlocks */ public int numOfUnderReplicatedBlocks() { return neededReplications.size(); } /** * Periodically calls computeBlockRecoveryWork(). */ private class ReplicationMonitor implements Runnable { @Override public void run() { while (namesystem.isRunning()) { try { // Process replication work only when active NN is out of safe mode. if (isPopulatingReplQueues()) { computeDatanodeWork(); processPendingReplications(); rescanPostponedMisreplicatedBlocks(); } Thread.sleep(replicationRecheckInterval); } catch (Throwable t) { if (!namesystem.isRunning()) { LOG.info("Stopping ReplicationMonitor."); if (!(t instanceof InterruptedException)) { LOG.info("ReplicationMonitor received an exception" + " while shutting down.", t); } break; } else if (!checkNSRunning && t instanceof InterruptedException) { LOG.info("Stopping ReplicationMonitor for testing."); break; } LOG.error("ReplicationMonitor thread received Runtime exception. ", t); terminate(1, t); } } } } /** * Compute block replication and block invalidation work that can be scheduled * on data-nodes. The datanode will be informed of this work at the next * heartbeat. * * @return number of blocks scheduled for replication or removal. */ int computeDatanodeWork() { // Blocks should not be replicated or removed if in safe mode. // It's OK to check safe mode here w/o holding lock, in the worst // case extra replications will be scheduled, and these will get // fixed up later. if (namesystem.isInSafeMode()) { return 0; } final int numlive = heartbeatManager.getLiveDatanodeCount(); final int blocksToProcess = numlive * this.blocksReplWorkMultiplier; final int nodesToProcess = (int) Math.ceil(numlive * this.blocksInvalidateWorkPct); int workFound = this.computeBlockRecoveryWork(blocksToProcess); // Update counters namesystem.writeLock(); try { this.updateState(); this.scheduledReplicationBlocksCount = workFound; } finally { namesystem.writeUnlock(); } workFound += this.computeInvalidateWork(nodesToProcess); return workFound; } /** * Clear all queues that hold decisions previously made by * this NameNode. */ public void clearQueues() { neededReplications.clear(); pendingReplications.clear(); excessReplicateMap.clear(); invalidateBlocks.clear(); datanodeManager.clearPendingQueues(); postponedMisreplicatedBlocks.clear(); postponedMisreplicatedBlocksCount.set(0); }; public static LocatedBlock newLocatedBlock( ExtendedBlock b, DatanodeStorageInfo[] storages, long startOffset, boolean corrupt) { // startOffset is unknown return new LocatedBlock( b, DatanodeStorageInfo.toDatanodeInfos(storages), DatanodeStorageInfo.toStorageIDs(storages), DatanodeStorageInfo.toStorageTypes(storages), startOffset, corrupt, null); } public static LocatedStripedBlock newLocatedStripedBlock( ExtendedBlock b, DatanodeStorageInfo[] storages, int[] indices, long startOffset, boolean corrupt) { // startOffset is unknown return new LocatedStripedBlock( b, DatanodeStorageInfo.toDatanodeInfos(storages), DatanodeStorageInfo.toStorageIDs(storages), DatanodeStorageInfo.toStorageTypes(storages), indices, startOffset, corrupt, null); } public static LocatedBlock newLocatedBlock(ExtendedBlock eb, BlockInfo info, DatanodeStorageInfo[] locs, long offset) throws IOException { final LocatedBlock lb; if (info.isStriped()) { lb = newLocatedStripedBlock(eb, locs, info.getUnderConstructionFeature().getBlockIndices(), offset, false); } else { lb = newLocatedBlock(eb, locs, offset, false); } return lb; } /** * A simple result enum for the result of * {@link BlockManager#processMisReplicatedBlock(BlockInfo)}. */ enum MisReplicationResult { /** The block should be invalidated since it belongs to a deleted file. */ INVALID, /** The block is currently under-replicated. */ UNDER_REPLICATED, /** The block is currently over-replicated. */ OVER_REPLICATED, /** A decision can't currently be made about this block. */ POSTPONE, /** The block is under construction, so should be ignored. */ UNDER_CONSTRUCTION, /** The block is properly replicated. */ OK } public void shutdown() { stopReplicationInitializer(); blocksMap.close(); MBeans.unregister(mxBeanName); mxBeanName = null; } public void clear() { clearQueues(); blocksMap.clear(); } public BlockReportLeaseManager getBlockReportLeaseManager() { return blockReportLeaseManager; } @Override // BlockStatsMXBean public Map<StorageType, StorageTypeStats> getStorageTypeStats() { return datanodeManager.getDatanodeStatistics().getStorageTypeStats(); } /** * Initialize replication queues. */ public void initializeReplQueues() { LOG.info("initializing replication queues"); processMisReplicatedBlocks(); initializedReplQueues = true; } /** * Check if replication queues are to be populated * @return true when node is HAState.Active and not in the very first safemode */ public boolean isPopulatingReplQueues() { if (!shouldPopulateReplQueues()) { return false; } return initializedReplQueues; } public void setInitializedReplQueues(boolean v) { this.initializedReplQueues = v; } public boolean shouldPopulateReplQueues() { HAContext haContext = namesystem.getHAContext(); if (haContext == null || haContext.getState() == null) return false; return haContext.getState().shouldPopulateReplQueues(); } }
apache-2.0
JavaTrainingCourse/obog-manager
src/main/java/com/github/javatrainingcourse/obogmanager/ui/view/LoginView.java
5759
/* * Copyright (c) 2017-2018 mikan */ package com.github.javatrainingcourse.obogmanager.ui.view; import com.github.javatrainingcourse.obogmanager.Version; import com.github.javatrainingcourse.obogmanager.domain.model.Convocation; import com.github.javatrainingcourse.obogmanager.domain.model.Membership; import com.github.javatrainingcourse.obogmanager.domain.service.AttendanceService; import com.github.javatrainingcourse.obogmanager.domain.service.ConvocationService; import com.github.javatrainingcourse.obogmanager.domain.service.MembershipService; import com.github.javatrainingcourse.obogmanager.ui.MainUI; import com.github.javatrainingcourse.obogmanager.ui.component.HeadingLabel; import com.github.javatrainingcourse.obogmanager.ui.layout.Wrapper; import com.vaadin.event.ShortcutAction; import com.vaadin.event.ShortcutListener; import com.vaadin.icons.VaadinIcons; import com.vaadin.navigator.View; import com.vaadin.navigator.ViewChangeListener; import com.vaadin.spring.annotation.SpringView; import com.vaadin.ui.*; import com.vaadin.ui.themes.ValoTheme; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.security.core.AuthenticationException; /** * ログイン画面です。 * * @author mikan * @since 0.1 */ @SpringView(name = LoginView.VIEW_NAME) public class LoginView extends Wrapper implements View { public static final String VIEW_NAME = "login"; private static final long serialVersionUID = Version.OBOG_MANAGER_SERIAL_VERSION_UID; private transient final MembershipService membershipService; private transient final ConvocationService convocationService; private transient final AttendanceService attendanceService; private static final Logger log = LoggerFactory.getLogger(LoginView.class); @Value("${server.port}") private String serverPort; @Autowired public LoginView(MembershipService membershipService, ConvocationService convocationService, AttendanceService attendanceService) { this.membershipService = membershipService; this.convocationService = convocationService; this.attendanceService = attendanceService; } @Override public void enter(ViewChangeListener.ViewChangeEvent event) { addComponent(new HeadingLabel("会員ログイン", VaadinIcons.USER)); var form = new FormLayout(); form.setMargin(false); addComponent(form); var emailField = new TextField("E-mail アドレス"); emailField.setWidth(MainUI.FIELD_WIDTH_WIDE, Unit.PIXELS); emailField.focus(); form.addComponent(emailField); var passwordField = new PasswordField("パスワード"); passwordField.setWidth(MainUI.FIELD_WIDTH_WIDE, Unit.PIXELS); form.addComponent(passwordField); var buttonArea = new HorizontalLayout(); buttonArea.setSpacing(true); addComponent(buttonArea); setComponentAlignment(buttonArea, Alignment.MIDDLE_CENTER); var backButton = new Button("戻る", click -> getUI().getNavigator().navigateTo(FrontView.VIEW_NAME)); buttonArea.addComponent(backButton); var passwordResetButton = new Button("パスワードリセット", click -> getUI().getNavigator().navigateTo(ResetPasswordView.VIEW_NAME)); buttonArea.addComponent(passwordResetButton); var loginButton = new Button("ログイン", click -> { if (emailField.isEmpty() || passwordField.isEmpty()) { Notification.show("入力が完了していません"); return; } // Authentication Membership membership; try { membership = membershipService.login(emailField.getValue(), passwordField.getValue()); } catch (AuthenticationException e) { log.info("Authentication failed: " + e.getMessage()); ErrorView.show("E-mail が存在しないか、パスワードが一致していません。", null); return; } catch (RuntimeException e) { ErrorView.show("ログイン処理に失敗しました。", e); return; } // Find latest convocation Convocation latest; try { latest = convocationService.getLatestConvocation(); } catch (IllegalStateException e) { // before creating the first convocation getUI().getNavigator().navigateTo(MenuView.VIEW_NAME); return; } // Switch transition by attend or not var attendance = attendanceService.find(membership, latest); if (attendance == null || !attendance.isAttend()) { getUI().getNavigator().navigateTo(FrontView.VIEW_NAME); } else { getUI().getNavigator().navigateTo(MenuView.VIEW_NAME); } }); loginButton.setIcon(VaadinIcons.SIGN_IN); loginButton.setStyleName(ValoTheme.BUTTON_PRIMARY); buttonArea.addComponent(loginButton); passwordField.addShortcutListener(new ShortcutListener("Enter キーを押すとログインします", ShortcutAction.KeyCode.ENTER, null) { private static final long serialVersionUID = 1L; @Override public void handleAction(Object sender, Object target) { loginButton.click(); } }); } }
apache-2.0
BBN-E/Adept
adept-api/src/main/java/adept/common/LatticePath.java
1846
package adept.common; /*- * #%L * adept-api * %% * Copyright (C) 2012 - 2017 Raytheon BBN Technologies * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import static com.google.common.base.Preconditions.checkArgument; import java.io.Serializable; import java.util.List; /** * The Class LatticePath. */ public class LatticePath implements Serializable { private static final long serialVersionUID = -3112044093379521747L; /** The weight. */ private float weight; /** The token stream list. */ private List<TokenStream> tokenStreamList; /** * Instantiates a new lattice path. */ public LatticePath() { super(); } /** * Gets the weight. * * @return the weight */ public float getWeight() { return weight; } /** * Sets the weight. * * @param weight the new weight */ public void setWeight(float weight) { this.weight = weight; } /** * Gets the token stream list. * * @return the token stream list */ public List<TokenStream> getTokenStreamList() { return tokenStreamList; } /** * Sets the token stream list. * * @param tokenStreamList the new token stream list */ public void setTokenStreamList(List<TokenStream> tokenStreamList) { checkArgument(tokenStreamList != null); this.tokenStreamList = tokenStreamList; } }
apache-2.0
Widen/metadata-extractor
Source/com/drew/metadata/mov/media/QtSoundDirectory.java
1465
package com.drew.metadata.mov.media; import com.drew.lang.annotations.NotNull; import com.drew.metadata.mov.QtDirectory; import java.util.HashMap; public class QtSoundDirectory extends QtDirectory { // Sound Sample Description Atom public static final int TAG_AUDIO_FORMAT = 0x0301; public static final int TAG_NUMBER_OF_CHANNELS = 0x0302; public static final int TAG_AUDIO_SAMPLE_SIZE = 0x0303; public static final int TAG_AUDIO_SAMPLE_RATE = 0x0304; public static final int TAG_SOUND_BALANCE = 0x0305; public QtSoundDirectory() { this.setDescriptor(new QtSoundDescriptor(this)); } @NotNull protected static final HashMap<Integer, String> _tagNameMap = new HashMap<Integer, String>(); static { QtMediaDirectory.addQtMediaTags(_tagNameMap); _tagNameMap.put(TAG_AUDIO_FORMAT, "Format"); _tagNameMap.put(TAG_NUMBER_OF_CHANNELS, "Number of Channels"); _tagNameMap.put(TAG_AUDIO_SAMPLE_SIZE, "Sample Size"); _tagNameMap.put(TAG_AUDIO_SAMPLE_RATE, "Sample Rate"); _tagNameMap.put(TAG_SOUND_BALANCE, "Balance"); } @Override @NotNull public String getName() { return "QT Sound"; } @Override @NotNull protected HashMap<Integer, String> getTagNameMap() { return _tagNameMap; } }
apache-2.0
googleads/google-ads-java
google-ads-stubs-v10/src/main/java/com/google/ads/googleads/v10/resources/CampaignAssetProto.java
4595
// Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/ads/googleads/v10/resources/campaign_asset.proto package com.google.ads.googleads.v10.resources; public final class CampaignAssetProto { private CampaignAssetProto() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistryLite registry) { } public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { registerAllExtensions( (com.google.protobuf.ExtensionRegistryLite) registry); } static final com.google.protobuf.Descriptors.Descriptor internal_static_google_ads_googleads_v10_resources_CampaignAsset_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_ads_googleads_v10_resources_CampaignAsset_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n7google/ads/googleads/v10/resources/cam" + "paign_asset.proto\022\"google.ads.googleads." + "v10.resources\0325google/ads/googleads/v10/" + "enums/asset_field_type.proto\0326google/ads" + "/googleads/v10/enums/asset_link_status.p" + "roto\032\034google/api/annotations.proto\032\037goog" + "le/api/field_behavior.proto\032\031google/api/" + "resource.proto\"\227\004\n\rCampaignAsset\022E\n\rreso" + "urce_name\030\001 \001(\tB.\340A\005\372A(\n&googleads.googl" + "eapis.com/CampaignAsset\022@\n\010campaign\030\006 \001(" + "\tB)\340A\005\372A#\n!googleads.googleapis.com/Camp" + "aignH\000\210\001\001\022:\n\005asset\030\007 \001(\tB&\340A\005\372A \n\036google" + "ads.googleapis.com/AssetH\001\210\001\001\022Z\n\nfield_t" + "ype\030\004 \001(\0162A.google.ads.googleads.v10.enu" + "ms.AssetFieldTypeEnum.AssetFieldTypeB\003\340A" + "\005\022S\n\006status\030\005 \001(\0162C.google.ads.googleads" + ".v10.enums.AssetLinkStatusEnum.AssetLink" + "Status:y\352Av\n&googleads.googleapis.com/Ca" + "mpaignAsset\022Lcustomers/{customer_id}/cam" + "paignAssets/{campaign_id}~{asset_id}~{fi" + "eld_type}B\013\n\t_campaignB\010\n\006_assetB\204\002\n&com" + ".google.ads.googleads.v10.resourcesB\022Cam" + "paignAssetProtoP\001ZKgoogle.golang.org/gen" + "proto/googleapis/ads/googleads/v10/resou" + "rces;resources\242\002\003GAA\252\002\"Google.Ads.Google" + "Ads.V10.Resources\312\002\"Google\\Ads\\GoogleAds" + "\\V10\\Resources\352\002&Google::Ads::GoogleAds:" + ":V10::Resourcesb\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { com.google.ads.googleads.v10.enums.AssetFieldTypeProto.getDescriptor(), com.google.ads.googleads.v10.enums.AssetLinkStatusProto.getDescriptor(), com.google.api.AnnotationsProto.getDescriptor(), com.google.api.FieldBehaviorProto.getDescriptor(), com.google.api.ResourceProto.getDescriptor(), }); internal_static_google_ads_googleads_v10_resources_CampaignAsset_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_google_ads_googleads_v10_resources_CampaignAsset_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_ads_googleads_v10_resources_CampaignAsset_descriptor, new java.lang.String[] { "ResourceName", "Campaign", "Asset", "FieldType", "Status", "Campaign", "Asset", }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); registry.add(com.google.api.ResourceProto.resource); registry.add(com.google.api.ResourceProto.resourceReference); com.google.protobuf.Descriptors.FileDescriptor .internalUpdateFileDescriptor(descriptor, registry); com.google.ads.googleads.v10.enums.AssetFieldTypeProto.getDescriptor(); com.google.ads.googleads.v10.enums.AssetLinkStatusProto.getDescriptor(); com.google.api.AnnotationsProto.getDescriptor(); com.google.api.FieldBehaviorProto.getDescriptor(); com.google.api.ResourceProto.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) }
apache-2.0
oplinkoms/onos
apps/segmentrouting/app/src/main/java/org/onosproject/segmentrouting/SegmentRoutingManager.java
108504
/* * Copyright 2015-present Open Networking Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.segmentrouting; import com.google.common.collect.HashMultimap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; import org.onlab.packet.Ethernet; import org.onlab.packet.ICMP6; import org.onlab.packet.IPv4; import org.onlab.packet.IPv6; import org.onlab.packet.IpAddress; import org.onlab.packet.IpPrefix; import org.onlab.packet.MacAddress; import org.onlab.packet.VlanId; import org.onlab.util.KryoNamespace; import org.onlab.util.Tools; import org.onosproject.cfg.ComponentConfigService; import org.onosproject.cluster.ClusterEvent; import org.onosproject.cluster.ClusterEventListener; import org.onosproject.cluster.ClusterService; import org.onosproject.cluster.LeadershipService; import org.onosproject.cluster.NodeId; import org.onosproject.core.ApplicationId; import org.onosproject.core.CoreService; import org.onosproject.event.Event; import org.onosproject.mastership.MastershipEvent; import org.onosproject.mastership.MastershipListener; import org.onosproject.mastership.MastershipService; import org.onosproject.mcast.api.McastEvent; import org.onosproject.mcast.api.McastListener; import org.onosproject.mcast.api.MulticastRouteService; import org.onosproject.net.ConnectPoint; import org.onosproject.net.Device; import org.onosproject.net.DeviceId; import org.onosproject.net.Host; import org.onosproject.net.HostId; import org.onosproject.net.Link; import org.onosproject.net.Port; import org.onosproject.net.PortNumber; import org.onosproject.net.config.ConfigException; import org.onosproject.net.config.ConfigFactory; import org.onosproject.net.config.NetworkConfigEvent; import org.onosproject.net.config.NetworkConfigListener; import org.onosproject.net.config.NetworkConfigRegistry; import org.onosproject.net.config.basics.InterfaceConfig; import org.onosproject.net.config.basics.McastConfig; import org.onosproject.net.config.basics.SubjectFactories; import org.onosproject.net.device.DeviceAdminService; import org.onosproject.net.device.DeviceEvent; import org.onosproject.net.device.DeviceListener; import org.onosproject.net.device.DeviceService; import org.onosproject.net.flow.TrafficSelector; import org.onosproject.net.flow.TrafficTreatment; import org.onosproject.net.flowobjective.FlowObjectiveService; import org.onosproject.net.flowobjective.NextObjective; import org.onosproject.net.host.HostEvent; import org.onosproject.net.host.HostListener; import org.onosproject.net.host.HostProbingService; import org.onosproject.net.host.HostService; import org.onosproject.net.host.InterfaceIpAddress; import org.onosproject.net.intent.WorkPartitionService; import org.onosproject.net.intf.Interface; import org.onosproject.net.intf.InterfaceService; import org.onosproject.net.link.LinkEvent; import org.onosproject.net.link.LinkListener; import org.onosproject.net.link.LinkService; import org.onosproject.net.neighbour.NeighbourResolutionService; import org.onosproject.net.packet.InboundPacket; import org.onosproject.net.packet.PacketContext; import org.onosproject.net.packet.PacketProcessor; import org.onosproject.net.packet.PacketService; import org.onosproject.net.topology.TopologyEvent; import org.onosproject.net.topology.TopologyListener; import org.onosproject.net.topology.TopologyService; import org.onosproject.routeservice.ResolvedRoute; import org.onosproject.routeservice.RouteEvent; import org.onosproject.routeservice.RouteListener; import org.onosproject.routeservice.RouteService; import org.onosproject.segmentrouting.config.DeviceConfigNotFoundException; import org.onosproject.segmentrouting.config.DeviceConfiguration; import org.onosproject.segmentrouting.config.SegmentRoutingAppConfig; import org.onosproject.segmentrouting.config.SegmentRoutingDeviceConfig; import org.onosproject.segmentrouting.grouphandler.DefaultGroupHandler; import org.onosproject.segmentrouting.grouphandler.DestinationSet; import org.onosproject.segmentrouting.grouphandler.NextNeighbors; import org.onosproject.segmentrouting.mcast.McastFilteringObjStoreKey; import org.onosproject.segmentrouting.mcast.McastHandler; import org.onosproject.segmentrouting.mcast.McastRole; import org.onosproject.segmentrouting.mcast.McastRoleStoreKey; import org.onosproject.segmentrouting.mcast.McastStoreKey; import org.onosproject.segmentrouting.phasedrecovery.api.PhasedRecoveryService; import org.onosproject.segmentrouting.pwaas.DefaultL2Tunnel; import org.onosproject.segmentrouting.pwaas.DefaultL2TunnelDescription; import org.onosproject.segmentrouting.pwaas.DefaultL2TunnelHandler; import org.onosproject.segmentrouting.pwaas.DefaultL2TunnelPolicy; import org.onosproject.segmentrouting.pwaas.L2Tunnel; import org.onosproject.segmentrouting.pwaas.L2TunnelDescription; import org.onosproject.segmentrouting.pwaas.L2TunnelHandler; import org.onosproject.segmentrouting.pwaas.L2TunnelPolicy; import org.onosproject.segmentrouting.storekey.DestinationSetNextObjectiveStoreKey; import org.onosproject.segmentrouting.storekey.MacVlanNextObjectiveStoreKey; import org.onosproject.segmentrouting.storekey.PortNextObjectiveStoreKey; import org.onosproject.segmentrouting.storekey.VlanNextObjectiveStoreKey; import org.onosproject.segmentrouting.storekey.XConnectStoreKey; import org.onosproject.segmentrouting.xconnect.api.XconnectService; import org.onosproject.store.serializers.KryoNamespaces; import org.onosproject.store.service.EventuallyConsistentMap; import org.onosproject.store.service.EventuallyConsistentMapBuilder; import org.onosproject.store.service.StorageService; import org.onosproject.store.service.WallClockTimestamp; import org.osgi.service.component.ComponentContext; import org.osgi.service.component.annotations.Activate; import org.osgi.service.component.annotations.Component; import org.osgi.service.component.annotations.Deactivate; import org.osgi.service.component.annotations.Modified; import org.osgi.service.component.annotations.Reference; import org.osgi.service.component.annotations.ReferenceCardinality; import org.osgi.service.component.annotations.ReferencePolicy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.Dictionary; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import static com.google.common.base.Preconditions.checkState; import static org.onlab.packet.Ethernet.TYPE_ARP; import static org.onlab.util.Tools.groupedThreads; import static org.onosproject.net.config.NetworkConfigEvent.Type.CONFIG_REGISTERED; import static org.onosproject.net.config.NetworkConfigEvent.Type.CONFIG_UNREGISTERED; import static org.onosproject.segmentrouting.OsgiPropertyConstants.ACTIVE_PROBING_DEFAULT; import static org.onosproject.segmentrouting.OsgiPropertyConstants.DEFAULT_INTERNAL_VLAN_DEFAULT; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PROP_ACTIVE_PROBING; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PROP_DEFAULT_INTERNAL_VLAN; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PROP_PW_TRANSPORT_VLAN; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PROP_RESPOND_TO_UNKNOWN_HOSTS; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PROP_ROUTE_DOUBLE_TAGGED_HOSTS; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PROP_ROUTE_SIMPLIFICATION; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PROP_SINGLE_HOMED_DOWN; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PROP_SYMMETRIC_PROBING; import static org.onosproject.segmentrouting.OsgiPropertyConstants.PW_TRANSPORT_VLAN_DEFAULT; import static org.onosproject.segmentrouting.OsgiPropertyConstants.RESPOND_TO_UNKNOWN_HOSTS_DEFAULT; import static org.onosproject.segmentrouting.OsgiPropertyConstants.ROUTE_DOUBLE_TAGGED_HOSTS_DEFAULT; import static org.onosproject.segmentrouting.OsgiPropertyConstants.ROUTE_SIMPLIFICATION_DEFAULT; import static org.onosproject.segmentrouting.OsgiPropertyConstants.SINGLE_HOMED_DOWN_DEFAULT; import static org.onosproject.segmentrouting.OsgiPropertyConstants.SYMMETRIC_PROBING_DEFAULT; /** * Segment routing manager. */ @Component( immediate = true, service = SegmentRoutingService.class, property = { PROP_ACTIVE_PROBING + ":Boolean=" + ACTIVE_PROBING_DEFAULT, PROP_SINGLE_HOMED_DOWN + ":Boolean=" + SINGLE_HOMED_DOWN_DEFAULT, PROP_RESPOND_TO_UNKNOWN_HOSTS + ":Boolean=" + RESPOND_TO_UNKNOWN_HOSTS_DEFAULT, PROP_ROUTE_DOUBLE_TAGGED_HOSTS + ":Boolean=" + ROUTE_DOUBLE_TAGGED_HOSTS_DEFAULT, PROP_DEFAULT_INTERNAL_VLAN + ":Integer=" + DEFAULT_INTERNAL_VLAN_DEFAULT, PROP_PW_TRANSPORT_VLAN + ":Integer=" + PW_TRANSPORT_VLAN_DEFAULT, PROP_SYMMETRIC_PROBING + ":Boolean=" + SYMMETRIC_PROBING_DEFAULT, PROP_ROUTE_SIMPLIFICATION + ":Boolean=" + ROUTE_SIMPLIFICATION_DEFAULT } ) public class SegmentRoutingManager implements SegmentRoutingService { private static Logger log = LoggerFactory.getLogger(SegmentRoutingManager.class); private static final String NOT_MASTER = "Current instance is not the master of {}. Ignore."; @Reference(cardinality = ReferenceCardinality.MANDATORY) private ComponentConfigService compCfgService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public NeighbourResolutionService neighbourResolutionService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public CoreService coreService; @Reference(cardinality = ReferenceCardinality.MANDATORY) PacketService packetService; @Reference(cardinality = ReferenceCardinality.MANDATORY) HostService hostService; @Reference(cardinality = ReferenceCardinality.MANDATORY) HostProbingService probingService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public DeviceService deviceService; @Reference(cardinality = ReferenceCardinality.MANDATORY) DeviceAdminService deviceAdminService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public FlowObjectiveService flowObjectiveService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public LinkService linkService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public MastershipService mastershipService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public StorageService storageService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public MulticastRouteService multicastRouteService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public TopologyService topologyService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public RouteService routeService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public NetworkConfigRegistry cfgService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public InterfaceService interfaceService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public ClusterService clusterService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public WorkPartitionService workPartitionService; @Reference(cardinality = ReferenceCardinality.MANDATORY) public LeadershipService leadershipService; @Reference(cardinality = ReferenceCardinality.OPTIONAL, policy = ReferencePolicy.DYNAMIC) public volatile XconnectService xconnectService; @Reference(cardinality = ReferenceCardinality.OPTIONAL, policy = ReferencePolicy.DYNAMIC) volatile PhasedRecoveryService phasedRecoveryService; /** Enable active probing to discover dual-homed hosts. */ boolean activeProbing = ACTIVE_PROBING_DEFAULT; /** Enable only send probe on the same port number of the pair device. */ boolean symmetricProbing = SYMMETRIC_PROBING_DEFAULT; /** Enable administratively taking down single-homed hosts. */ boolean singleHomedDown = SINGLE_HOMED_DOWN_DEFAULT; /** Enable this to respond to ARP/NDP requests from unknown hosts. */ boolean respondToUnknownHosts = RESPOND_TO_UNKNOWN_HOSTS_DEFAULT; /** Program flows and groups to pop and route double tagged hosts. */ boolean routeDoubleTaggedHosts = ROUTE_DOUBLE_TAGGED_HOSTS_DEFAULT; /** internal vlan assigned by default to unconfigured ports. */ private int defaultInternalVlan = DEFAULT_INTERNAL_VLAN_DEFAULT; /** vlan used for transport of pseudowires between switches. */ private int pwTransportVlan = PW_TRANSPORT_VLAN_DEFAULT; /** Enabling route simplification. */ boolean routeSimplification = ROUTE_SIMPLIFICATION_DEFAULT; ArpHandler arpHandler = null; IcmpHandler icmpHandler = null; IpHandler ipHandler = null; RoutingRulePopulator routingRulePopulator = null; ApplicationId appId; DeviceConfiguration deviceConfiguration = null; DefaultRoutingHandler defaultRoutingHandler = null; private TunnelHandler tunnelHandler = null; private PolicyHandler policyHandler = null; private InternalPacketProcessor processor = null; private InternalLinkListener linkListener = null; private InternalDeviceListener deviceListener = null; private AppConfigHandler appCfgHandler = null; McastHandler mcastHandler = null; HostHandler hostHandler = null; private RouteHandler routeHandler = null; LinkHandler linkHandler = null; private SegmentRoutingNeighbourDispatcher neighbourHandler = null; private DefaultL2TunnelHandler l2TunnelHandler = null; private TopologyHandler topologyHandler = null; private final InternalHostListener hostListener = new InternalHostListener(); private final InternalConfigListener cfgListener = new InternalConfigListener(this); private final InternalMcastListener mcastListener = new InternalMcastListener(); private final InternalRouteEventListener routeListener = new InternalRouteEventListener(); private final InternalTopologyListener topologyListener = new InternalTopologyListener(); private final InternalMastershipListener mastershipListener = new InternalMastershipListener(); final InternalClusterListener clusterListener = new InternalClusterListener(); //Completable future for network configuration process to buffer config events handling during activation private CompletableFuture<Boolean> networkConfigCompletion = null; private final Object networkConfigCompletionLock = new Object(); private List<Event> queuedEvents = new CopyOnWriteArrayList<>(); // Handles device, link, topology and network config events private ScheduledExecutorService mainEventExecutor; // Handles host, route and mcast events respectively private ScheduledExecutorService hostEventExecutor; private ScheduledExecutorService routeEventExecutor; private ScheduledExecutorService mcastEventExecutor; private ExecutorService packetExecutor; ExecutorService neighborExecutor; Map<DeviceId, DefaultGroupHandler> groupHandlerMap = new ConcurrentHashMap<>(); /** * Per device next objective ID store with (device id + destination set) as key. * Used to keep track on MPLS group information. */ private EventuallyConsistentMap<DestinationSetNextObjectiveStoreKey, NextNeighbors> dsNextObjStore = null; /** * Per device next objective ID store with (device id + vlanid) as key. * Used to keep track on L2 flood group information. */ private EventuallyConsistentMap<VlanNextObjectiveStoreKey, Integer> vlanNextObjStore = null; /** * Per device next objective ID store with (device id + port + treatment + meta) as key. * Used to keep track on L2 interface group and L3 unicast group information for direct hosts. */ private EventuallyConsistentMap<PortNextObjectiveStoreKey, Integer> portNextObjStore = null; /** * Per device next objective ID store with (device id + MAC address + vlan) as key. * Used to keep track of L3 unicast group for indirect hosts. */ private EventuallyConsistentMap<MacVlanNextObjectiveStoreKey, Integer> macVlanNextObjStore = null; private EventuallyConsistentMap<String, Tunnel> tunnelStore = null; private EventuallyConsistentMap<String, Policy> policyStore = null; private AtomicBoolean programmingScheduled = new AtomicBoolean(); private final ConfigFactory<DeviceId, SegmentRoutingDeviceConfig> deviceConfigFactory = new ConfigFactory<DeviceId, SegmentRoutingDeviceConfig>( SubjectFactories.DEVICE_SUBJECT_FACTORY, SegmentRoutingDeviceConfig.class, "segmentrouting") { @Override public SegmentRoutingDeviceConfig createConfig() { return new SegmentRoutingDeviceConfig(); } }; private final ConfigFactory<ApplicationId, SegmentRoutingAppConfig> appConfigFactory = new ConfigFactory<ApplicationId, SegmentRoutingAppConfig>( SubjectFactories.APP_SUBJECT_FACTORY, SegmentRoutingAppConfig.class, "segmentrouting") { @Override public SegmentRoutingAppConfig createConfig() { return new SegmentRoutingAppConfig(); } }; private ConfigFactory<ApplicationId, McastConfig> mcastConfigFactory = new ConfigFactory<ApplicationId, McastConfig>( SubjectFactories.APP_SUBJECT_FACTORY, McastConfig.class, "multicast") { @Override public McastConfig createConfig() { return new McastConfig(); } }; /** * Segment Routing App ID. */ public static final String APP_NAME = "org.onosproject.segmentrouting"; /** * Minumum and maximum value of dummy VLAN ID to be allocated. */ public static final int MIN_DUMMY_VLAN_ID = 2; public static final int MAX_DUMMY_VLAN_ID = 4093; private static final int DEFAULT_POOL_SIZE = 32; Instant lastEdgePortEvent = Instant.EPOCH; @Activate protected void activate(ComponentContext context) { appId = coreService.registerApplication(APP_NAME); mainEventExecutor = Executors.newSingleThreadScheduledExecutor( groupedThreads("onos/sr", "event-main-%d", log)); hostEventExecutor = Executors.newSingleThreadScheduledExecutor( groupedThreads("onos/sr", "event-host-%d", log)); routeEventExecutor = Executors.newSingleThreadScheduledExecutor( groupedThreads("onos/sr", "event-route-%d", log)); mcastEventExecutor = Executors.newSingleThreadScheduledExecutor( groupedThreads("onos/sr", "event-mcast-%d", log)); packetExecutor = Executors.newSingleThreadExecutor(groupedThreads("onos/sr", "packet-%d", log)); neighborExecutor = Executors.newFixedThreadPool(DEFAULT_POOL_SIZE, groupedThreads("onos/sr", "neighbor-%d", log)); log.debug("Creating EC map nsnextobjectivestore"); EventuallyConsistentMapBuilder<DestinationSetNextObjectiveStoreKey, NextNeighbors> nsNextObjMapBuilder = storageService.eventuallyConsistentMapBuilder(); dsNextObjStore = nsNextObjMapBuilder .withName("nsnextobjectivestore") .withSerializer(createSerializer()) .withTimestampProvider((k, v) -> new WallClockTimestamp()) .build(); log.trace("Current size {}", dsNextObjStore.size()); log.debug("Creating EC map vlannextobjectivestore"); EventuallyConsistentMapBuilder<VlanNextObjectiveStoreKey, Integer> vlanNextObjMapBuilder = storageService.eventuallyConsistentMapBuilder(); vlanNextObjStore = vlanNextObjMapBuilder .withName("vlannextobjectivestore") .withSerializer(createSerializer()) .withTimestampProvider((k, v) -> new WallClockTimestamp()) .build(); log.debug("Creating EC map macvlannextobjectivestore"); EventuallyConsistentMapBuilder<MacVlanNextObjectiveStoreKey, Integer> macVlanNextObjMapBuilder = storageService.eventuallyConsistentMapBuilder(); macVlanNextObjStore = macVlanNextObjMapBuilder .withName("macvlannextobjectivestore") .withSerializer(createSerializer()) .withTimestampProvider((k, v) -> new WallClockTimestamp()) .build(); log.debug("Creating EC map subnetnextobjectivestore"); EventuallyConsistentMapBuilder<PortNextObjectiveStoreKey, Integer> portNextObjMapBuilder = storageService.eventuallyConsistentMapBuilder(); portNextObjStore = portNextObjMapBuilder .withName("portnextobjectivestore") .withSerializer(createSerializer()) .withTimestampProvider((k, v) -> new WallClockTimestamp()) .build(); EventuallyConsistentMapBuilder<String, Tunnel> tunnelMapBuilder = storageService.eventuallyConsistentMapBuilder(); tunnelStore = tunnelMapBuilder .withName("tunnelstore") .withSerializer(createSerializer()) .withTimestampProvider((k, v) -> new WallClockTimestamp()) .build(); EventuallyConsistentMapBuilder<String, Policy> policyMapBuilder = storageService.eventuallyConsistentMapBuilder(); policyStore = policyMapBuilder .withName("policystore") .withSerializer(createSerializer()) .withTimestampProvider((k, v) -> new WallClockTimestamp()) .build(); processor = new InternalPacketProcessor(); linkListener = new InternalLinkListener(); deviceListener = new InternalDeviceListener(); appCfgHandler = new AppConfigHandler(this); mcastHandler = new McastHandler(this); hostHandler = new HostHandler(this); linkHandler = new LinkHandler(this); routeHandler = new RouteHandler(this); neighbourHandler = new SegmentRoutingNeighbourDispatcher(this); l2TunnelHandler = new DefaultL2TunnelHandler(this); topologyHandler = new TopologyHandler(this); compCfgService.preSetProperty("org.onosproject.provider.host.impl.HostLocationProvider", "requestInterceptsEnabled", "false", false); compCfgService.preSetProperty("org.onosproject.net.neighbour.impl.NeighbourResolutionManager", "requestInterceptsEnabled", "false", false); compCfgService.preSetProperty("org.onosproject.dhcprelay.DhcpRelayManager", "arpEnabled", "false", false); compCfgService.preSetProperty("org.onosproject.net.host.impl.HostManager", "greedyLearningIpv6", "true", false); compCfgService.preSetProperty("org.onosproject.routing.cpr.ControlPlaneRedirectManager", "forceUnprovision", "true", false); compCfgService.preSetProperty("org.onosproject.routeservice.store.RouteStoreImpl", "distributed", "true", false); compCfgService.preSetProperty("org.onosproject.provider.host.impl.HostLocationProvider", "multihomingEnabled", "true", false); compCfgService.preSetProperty("org.onosproject.provider.lldp.impl.LldpLinkProvider", "staleLinkAge", "15000", false); compCfgService.preSetProperty("org.onosproject.net.host.impl.HostManager", "allowDuplicateIps", "false", false); // For P4 switches compCfgService.preSetProperty("org.onosproject.net.flow.impl.FlowRuleManager", "fallbackFlowPollFrequency", "4", false); compCfgService.preSetProperty("org.onosproject.net.group.impl.GroupManager", "fallbackGroupPollFrequency", "3", false); compCfgService.registerProperties(getClass()); modified(context); cfgService.addListener(cfgListener); cfgService.registerConfigFactory(deviceConfigFactory); cfgService.registerConfigFactory(appConfigFactory); cfgService.registerConfigFactory(mcastConfigFactory); log.info("Configuring network before adding listeners"); cfgListener.configureNetwork(); hostService.addListener(hostListener); packetService.addProcessor(processor, PacketProcessor.director(2)); linkService.addListener(linkListener); deviceService.addListener(deviceListener); multicastRouteService.addListener(mcastListener); routeService.addListener(routeListener); topologyService.addListener(topologyListener); mastershipService.addListener(mastershipListener); clusterService.addListener(clusterListener); linkHandler.init(); l2TunnelHandler.init(); synchronized (networkConfigCompletionLock) { networkConfigCompletion.whenComplete((value, ex) -> { //setting to null for easier fall through networkConfigCompletion = null; //process all queued events queuedEvents.forEach(event -> { mainEventExecutor.execute(new InternalEventHandler(event)); }); }); } log.info("Started"); } KryoNamespace.Builder createSerializer() { return new KryoNamespace.Builder() .register(KryoNamespaces.API) .register(DestinationSetNextObjectiveStoreKey.class, VlanNextObjectiveStoreKey.class, DestinationSet.class, DestinationSet.DestinationSetType.class, NextNeighbors.class, Tunnel.class, DefaultTunnel.class, Policy.class, TunnelPolicy.class, Policy.Type.class, PortNextObjectiveStoreKey.class, XConnectStoreKey.class, L2Tunnel.class, L2TunnelPolicy.class, DefaultL2Tunnel.class, DefaultL2TunnelPolicy.class, MacVlanNextObjectiveStoreKey.class ); } @Deactivate protected void deactivate() { mainEventExecutor.shutdown(); hostEventExecutor.shutdown(); routeEventExecutor.shutdown(); mcastEventExecutor.shutdown(); packetExecutor.shutdown(); neighborExecutor.shutdown(); mainEventExecutor = null; hostEventExecutor = null; routeEventExecutor = null; mcastEventExecutor = null; packetExecutor = null; neighborExecutor = null; cfgService.removeListener(cfgListener); cfgService.unregisterConfigFactory(deviceConfigFactory); cfgService.unregisterConfigFactory(appConfigFactory); cfgService.unregisterConfigFactory(mcastConfigFactory); compCfgService.unregisterProperties(getClass(), false); hostService.removeListener(hostListener); packetService.removeProcessor(processor); linkService.removeListener(linkListener); deviceService.removeListener(deviceListener); multicastRouteService.removeListener(mcastListener); routeService.removeListener(routeListener); topologyService.removeListener(topologyListener); mastershipService.removeListener(mastershipListener); clusterService.removeListener(clusterListener); neighbourResolutionService.unregisterNeighbourHandlers(appId); processor = null; linkListener = null; deviceListener = null; groupHandlerMap.forEach((k, v) -> v.shutdown()); groupHandlerMap.clear(); defaultRoutingHandler.shutdown(); dsNextObjStore.destroy(); vlanNextObjStore.destroy(); macVlanNextObjStore.destroy(); portNextObjStore.destroy(); tunnelStore.destroy(); policyStore.destroy(); mcastHandler.terminate(); hostHandler.terminate(); log.info("Stopped"); } @Modified private void modified(ComponentContext context) { Dictionary<?, ?> properties = context.getProperties(); if (properties == null) { return; } String strActiveProbing = Tools.get(properties, PROP_ACTIVE_PROBING); boolean expectActiveProbing = Boolean.parseBoolean(strActiveProbing); if (expectActiveProbing != activeProbing) { activeProbing = expectActiveProbing; log.info("{} active probing", activeProbing ? "Enabling" : "Disabling"); } String strSymmetricProbing = Tools.get(properties, PROP_SYMMETRIC_PROBING); boolean expectSymmetricProbing = Boolean.parseBoolean(strSymmetricProbing); if (expectSymmetricProbing != symmetricProbing) { symmetricProbing = expectSymmetricProbing; log.info("{} symmetric probing", symmetricProbing ? "Enabling" : "Disabling"); } String strSingleHomedDown = Tools.get(properties, PROP_SINGLE_HOMED_DOWN); boolean expectSingleHomedDown = Boolean.parseBoolean(strSingleHomedDown); if (expectSingleHomedDown != singleHomedDown) { singleHomedDown = expectSingleHomedDown; log.info("{} downing of single homed hosts for lost uplinks", singleHomedDown ? "Enabling" : "Disabling"); if (singleHomedDown && linkHandler != null) { hostService.getHosts().forEach(host -> host.locations() .forEach(loc -> { if (interfaceService.isConfigured(loc)) { linkHandler.checkUplinksForHost(loc); } })); } else { log.warn("Disabling singleHomedDown does not re-enable already " + "downed ports for single-homed hosts"); } } String strRespondToUnknownHosts = Tools.get(properties, PROP_RESPOND_TO_UNKNOWN_HOSTS); boolean expectRespondToUnknownHosts = Boolean.parseBoolean(strRespondToUnknownHosts); if (expectRespondToUnknownHosts != respondToUnknownHosts) { respondToUnknownHosts = expectRespondToUnknownHosts; log.info("{} responding to ARPs/NDPs from unknown hosts", respondToUnknownHosts ? "Enabling" : "Disabling"); } String strRouteDoubleTaggedHosts = Tools.get(properties, PROP_ROUTE_DOUBLE_TAGGED_HOSTS); boolean expectRouteDoubleTaggedHosts = Boolean.parseBoolean(strRouteDoubleTaggedHosts); if (expectRouteDoubleTaggedHosts != routeDoubleTaggedHosts) { routeDoubleTaggedHosts = expectRouteDoubleTaggedHosts; log.info("{} routing for double tagged hosts", routeDoubleTaggedHosts ? "Enabling" : "Disabling"); if (routeDoubleTaggedHosts) { hostHandler.populateAllDoubleTaggedHost(); } else { hostHandler.revokeAllDoubleTaggedHost(); } } String strDefaultInternalVlan = Tools.get(properties, PROP_DEFAULT_INTERNAL_VLAN); int defIntVlan = Integer.parseInt(strDefaultInternalVlan); if (defIntVlan != defaultInternalVlan) { if (canUseVlanId(defIntVlan)) { log.warn("Default internal vlan value changed from {} to {}.. " + "re-programming filtering rules, but NOT any groups already " + "created with the former value", defaultInternalVlan, defIntVlan); VlanId oldDefIntVlan = VlanId.vlanId((short) defaultInternalVlan); defaultInternalVlan = defIntVlan; routingRulePopulator .updateSpecialVlanFilteringRules(true, oldDefIntVlan, VlanId.vlanId((short) defIntVlan)); } else { log.warn("Cannot change default internal vlan to unusable " + "value {}", defIntVlan); } } String strPwTxpVlan = Tools.get(properties, PROP_PW_TRANSPORT_VLAN); int pwTxpVlan = Integer.parseInt(strPwTxpVlan); if (pwTxpVlan != pwTransportVlan) { if (canUseVlanId(pwTxpVlan)) { log.warn("Pseudowire transport vlan value changed from {} to {}.. " + "re-programming filtering rules, but NOT any groups already " + "created with the former value", pwTransportVlan, pwTxpVlan); VlanId oldPwTxpVlan = VlanId.vlanId((short) pwTransportVlan); pwTransportVlan = pwTxpVlan; routingRulePopulator .updateSpecialVlanFilteringRules(false, oldPwTxpVlan, VlanId.vlanId((short) pwTxpVlan)); } else { log.warn("Cannot change pseudowire transport vlan to unusable " + "value {}", pwTxpVlan); } } String strRouteSimplification = Tools.get(properties, PROP_ROUTE_SIMPLIFICATION); boolean expectRouteSimplification = Boolean.parseBoolean(strRouteSimplification); if (expectRouteSimplification != routeSimplification) { routeSimplification = expectRouteSimplification; log.info("{} route simplification", routeSimplification ? "Enabling" : "Disabling"); } } /** * Returns true if given vlan id is not being used in the system currently, * either as one of the default system wide vlans or as one of the * configured interface vlans. * * @param vlanId given vlan id * @return true if vlan is not currently in use */ public boolean canUseVlanId(int vlanId) { if (vlanId >= 4095 || vlanId <= 1) { log.error("Vlan id {} value is not in valid range 2 <--> 4094", vlanId); return false; } VlanId vid = VlanId.vlanId((short) vlanId); if (getDefaultInternalVlan().equals(vid) || getPwTransportVlan().equals(vid)) { log.warn("Vlan id {} value is already in use system-wide. " + "DefaultInternalVlan:{} PwTransportVlan:{} ", vlanId, getDefaultInternalVlan(), getPwTransportVlan()); return false; } if (interfaceService.inUse(vid)) { log.warn("Vlan id {} value is already in use on a configured " + "interface in the system", vlanId); return false; } return true; } /** * Returns the VlanId assigned internally by default to unconfigured ports. * * @return the default internal vlan id */ public VlanId getDefaultInternalVlan() { return VlanId.vlanId((short) defaultInternalVlan); } /** * Returns the Vlan id used to transport pseudowire traffic across the * network. * * @return the pseudowire transport vlan id */ public VlanId getPwTransportVlan() { return VlanId.vlanId((short) pwTransportVlan); } @Override public List<Tunnel> getTunnels() { return tunnelHandler.getTunnels(); } @Override public TunnelHandler.Result createTunnel(Tunnel tunnel) { return tunnelHandler.createTunnel(tunnel); } @Override public TunnelHandler.Result removeTunnel(Tunnel tunnel) { for (Policy policy: policyHandler.getPolicies()) { if (policy.type() == Policy.Type.TUNNEL_FLOW) { TunnelPolicy tunnelPolicy = (TunnelPolicy) policy; if (tunnelPolicy.tunnelId().equals(tunnel.id())) { log.warn("Cannot remove the tunnel used by a policy"); return TunnelHandler.Result.TUNNEL_IN_USE; } } } return tunnelHandler.removeTunnel(tunnel); } @Override public PolicyHandler.Result removePolicy(Policy policy) { return policyHandler.removePolicy(policy); } @Override public PolicyHandler.Result createPolicy(Policy policy) { return policyHandler.createPolicy(policy); } @Override public List<Policy> getPolicies() { return policyHandler.getPolicies(); } @Override public Set<L2TunnelDescription> getL2TunnelDescriptions(boolean pending) { return l2TunnelHandler.getL2Descriptions(pending); } @Override public List<L2Tunnel> getL2Tunnels() { return l2TunnelHandler.getL2Tunnels(); } @Override public List<L2TunnelPolicy> getL2Policies() { return l2TunnelHandler.getL2Policies(); } @Override @Deprecated public L2TunnelHandler.Result addPseudowiresBulk(List<DefaultL2TunnelDescription> bulkPseudowires) { // get both added and pending pseudowires List<L2TunnelDescription> pseudowires = new ArrayList<>(); pseudowires.addAll(l2TunnelHandler.getL2Descriptions(false)); pseudowires.addAll(l2TunnelHandler.getL2Descriptions(true)); pseudowires.addAll(bulkPseudowires); Set<L2TunnelDescription> newPseudowires = new HashSet(bulkPseudowires); L2TunnelHandler.Result retRes = L2TunnelHandler.Result.SUCCESS; L2TunnelHandler.Result res; for (DefaultL2TunnelDescription pw : bulkPseudowires) { res = addPseudowire(pw); if (res != L2TunnelHandler.Result.SUCCESS) { log.error("Pseudowire with id {} can not be instantiated !", res); retRes = res; } } return retRes; } @Override public L2TunnelHandler.Result addPseudowire(L2TunnelDescription l2TunnelDescription) { return l2TunnelHandler.deployPseudowire(l2TunnelDescription); } @Override public L2TunnelHandler.Result removePseudowire(Integer pwId) { return l2TunnelHandler.tearDownPseudowire(pwId); } @Override public void rerouteNetwork() { cfgListener.configureNetwork(); } @Override public Map<DeviceId, Set<IpPrefix>> getDeviceSubnetMap() { Map<DeviceId, Set<IpPrefix>> deviceSubnetMap = Maps.newHashMap(); deviceConfiguration.getRouters().forEach(device -> deviceSubnetMap.put(device, deviceConfiguration.getSubnets(device))); return deviceSubnetMap; } @Override public ImmutableMap<DeviceId, EcmpShortestPathGraph> getCurrentEcmpSpg() { if (defaultRoutingHandler != null) { return defaultRoutingHandler.getCurrentEmcpSpgMap(); } else { return null; } } @Override public ImmutableMap<DestinationSetNextObjectiveStoreKey, NextNeighbors> getDstNextObjStore() { if (dsNextObjStore != null) { return ImmutableMap.copyOf(dsNextObjStore.entrySet()); } else { return ImmutableMap.of(); } } @Override public ImmutableMap<VlanNextObjectiveStoreKey, Integer> getVlanNextObjStore() { if (vlanNextObjStore != null) { return ImmutableMap.copyOf(vlanNextObjStore.entrySet()); } else { return ImmutableMap.of(); } } @Override public ImmutableMap<MacVlanNextObjectiveStoreKey, Integer> getMacVlanNextObjStore() { if (macVlanNextObjStore != null) { return ImmutableMap.copyOf(macVlanNextObjStore.entrySet()); } else { return ImmutableMap.of(); } } @Override public ImmutableMap<PortNextObjectiveStoreKey, Integer> getPortNextObjStore() { if (portNextObjStore != null) { return ImmutableMap.copyOf(portNextObjStore.entrySet()); } else { return ImmutableMap.of(); } } @Override public ImmutableMap<String, NextObjective> getPwInitNext() { if (l2TunnelHandler != null) { return l2TunnelHandler.getInitNext(); } else { return ImmutableMap.of(); } } @Override public ImmutableMap<String, NextObjective> getPwTermNext() { if (l2TunnelHandler != null) { return l2TunnelHandler.getTermNext(); } else { return ImmutableMap.of(); } } @Override public void invalidateNextObj(int nextId) { if (dsNextObjStore != null) { dsNextObjStore.entrySet().forEach(e -> { if (e.getValue().nextId() == nextId) { dsNextObjStore.remove(e.getKey()); } }); } if (vlanNextObjStore != null) { vlanNextObjStore.entrySet().forEach(e -> { if (e.getValue() == nextId) { vlanNextObjStore.remove(e.getKey()); } }); } if (macVlanNextObjStore != null) { macVlanNextObjStore.entrySet().forEach(e -> { if (e.getValue() == nextId) { macVlanNextObjStore.remove(e.getKey()); } }); } if (portNextObjStore != null) { portNextObjStore.entrySet().forEach(e -> { if (e.getValue() == nextId) { portNextObjStore.remove(e.getKey()); } }); } if (mcastHandler != null) { mcastHandler.removeNextId(nextId); } if (l2TunnelHandler != null) { l2TunnelHandler.removeNextId(nextId); } if (xconnectService != null) { xconnectService.removeNextId(nextId); } } @Override public void verifyGroups(DeviceId id) { DefaultGroupHandler gh = groupHandlerMap.get(id); if (gh != null) { gh.triggerBucketCorrector(); } } @Override public ImmutableMap<Link, Boolean> getSeenLinks() { return linkHandler.getSeenLinks(); } @Override public ImmutableMap<DeviceId, Set<PortNumber>> getDownedPortState() { return linkHandler.getDownedPorts(); } @Override public Map<McastStoreKey, Integer> getMcastNextIds(IpAddress mcastIp) { return mcastHandler.getNextIds(mcastIp); } @Override public Map<McastRoleStoreKey, McastRole> getMcastRoles(IpAddress mcastIp, ConnectPoint sourcecp) { return mcastHandler.getMcastRoles(mcastIp, sourcecp); } @Override public Multimap<ConnectPoint, List<ConnectPoint>> getMcastTrees(IpAddress mcastIp, ConnectPoint sourcecp) { return mcastHandler.getMcastTrees(mcastIp, sourcecp); } @Override public Map<IpAddress, NodeId> getMcastLeaders(IpAddress mcastIp) { return mcastHandler.getMcastLeaders(mcastIp); } @Override public Map<DeviceId, List<McastFilteringObjStoreKey>> getMcastFilters() { return mcastHandler.getMcastFilters(); } @Override public Map<Set<DeviceId>, NodeId> getShouldProgram() { return defaultRoutingHandler == null ? ImmutableMap.of() : ImmutableMap.copyOf(defaultRoutingHandler.shouldProgram); } @Override public Map<DeviceId, Boolean> getShouldProgramCache() { return defaultRoutingHandler == null ? ImmutableMap.of() : ImmutableMap.copyOf(defaultRoutingHandler.shouldProgramCache); } @Override public boolean shouldProgram(DeviceId deviceId) { return defaultRoutingHandler.shouldProgram(deviceId); } @Override public boolean isRoutingStable() { return defaultRoutingHandler.isRoutingStable(); } @Override public void initHost(DeviceId deviceId) { hostEventExecutor.execute(() -> hostHandler.init(deviceId)); } @Override public void initRoute(DeviceId deviceId) { routeEventExecutor.execute(() -> routeHandler.init(deviceId)); } @Override public ApplicationId appId() { return appId; } /** * Returns the device configuration. * * @return device configuration */ public DeviceConfiguration deviceConfiguration() { return deviceConfiguration; } /** * Per device next objective ID store with (device id + destination set) as key. * Used to keep track on MPLS group information. * * @return next objective ID store */ public EventuallyConsistentMap<DestinationSetNextObjectiveStoreKey, NextNeighbors> dsNextObjStore() { return dsNextObjStore; } /** * Per device next objective ID store with (device id + vlanid) as key. * Used to keep track on L2 flood group information. * * @return vlan next object store */ public EventuallyConsistentMap<VlanNextObjectiveStoreKey, Integer> vlanNextObjStore() { return vlanNextObjStore; } /** * Per device next objective ID store with (device id + MAC address + vlan) as key. * Used to keep track on L3 Unicast group information for indirect hosts. * * @return mac vlan next object store */ public EventuallyConsistentMap<MacVlanNextObjectiveStoreKey, Integer> macVlanNextObjStore() { return macVlanNextObjStore; } /** * Per device next objective ID store with (device id + port + treatment + meta) as key. * Used to keep track on L2 interface group and L3 unicast group information for direct hosts. * * @return port next object store. */ public EventuallyConsistentMap<PortNextObjectiveStoreKey, Integer> portNextObjStore() { return portNextObjStore; } /** * Returns the MPLS-ECMP configuration which indicates whether ECMP on * labeled packets should be programmed or not. * * @return MPLS-ECMP value */ public boolean getMplsEcmp() { SegmentRoutingAppConfig segmentRoutingAppConfig = cfgService .getConfig(this.appId, SegmentRoutingAppConfig.class); return segmentRoutingAppConfig != null && segmentRoutingAppConfig.mplsEcmp(); } /** * Returns the tunnel object with the tunnel ID. * * @param tunnelId Tunnel ID * @return Tunnel reference */ public Tunnel getTunnel(String tunnelId) { return tunnelHandler.getTunnel(tunnelId); } @Override public VlanId getInternalVlanId(ConnectPoint connectPoint) { VlanId untaggedVlanId = interfaceService.getUntaggedVlanId(connectPoint); VlanId nativeVlanId = interfaceService.getNativeVlanId(connectPoint); return untaggedVlanId != null ? untaggedVlanId : nativeVlanId; } @Override public Optional<DeviceId> getPairDeviceId(DeviceId deviceId) { SegmentRoutingDeviceConfig deviceConfig = cfgService.getConfig(deviceId, SegmentRoutingDeviceConfig.class); return Optional.ofNullable(deviceConfig).map(SegmentRoutingDeviceConfig::pairDeviceId); } @Override public Optional<PortNumber> getPairLocalPort(DeviceId deviceId) { SegmentRoutingDeviceConfig deviceConfig = cfgService.getConfig(deviceId, SegmentRoutingDeviceConfig.class); return Optional.ofNullable(deviceConfig).map(SegmentRoutingDeviceConfig::pairLocalPort); } @Override public Set<PortNumber> getInfraPorts(DeviceId deviceId) { return deviceService.getPorts(deviceId).stream() .map(port -> new ConnectPoint(port.element().id(), port.number())) .filter(cp -> interfaceService.getInterfacesByPort(cp).isEmpty()) .map(ConnectPoint::port) .collect(Collectors.toSet()); } @Override public Set<PortNumber> getEdgePorts(DeviceId deviceId) { return deviceService.getPorts(deviceId).stream() .map(port -> new ConnectPoint(port.element().id(), port.number())) .filter(cp -> !interfaceService.getInterfacesByPort(cp).isEmpty() && !cp.port().equals(getPairLocalPort(deviceId).orElse(null))) .map(ConnectPoint::port) .collect(Collectors.toSet()); } /** * Returns locations of given resolved route. * * @param resolvedRoute resolved route * @return locations of nexthop. Might be empty if next hop is not found */ public Set<ConnectPoint> nextHopLocations(ResolvedRoute resolvedRoute) { HostId hostId = HostId.hostId(resolvedRoute.nextHopMac(), resolvedRoute.nextHopVlan()); return Optional.ofNullable(hostService.getHost(hostId)) .map(Host::locations).orElse(Sets.newHashSet()) .stream().map(l -> (ConnectPoint) l).collect(Collectors.toSet()); } /** * Returns vlan port map of given device. * * @param deviceId device id * @return vlan-port multimap */ public Multimap<VlanId, PortNumber> getVlanPortMap(DeviceId deviceId) { HashMultimap<VlanId, PortNumber> vlanPortMap = HashMultimap.create(); interfaceService.getInterfaces().stream() .filter(intf -> intf.connectPoint().deviceId().equals(deviceId)) .forEach(intf -> { vlanPortMap.put(intf.vlanUntagged(), intf.connectPoint().port()); intf.vlanTagged().forEach(vlanTagged -> vlanPortMap.put(vlanTagged, intf.connectPoint().port()) ); vlanPortMap.put(intf.vlanNative(), intf.connectPoint().port()); }); vlanPortMap.removeAll(VlanId.NONE); return vlanPortMap; } /** * Returns the next objective ID for the given vlan id. It is expected * that the next-objective has been pre-created from configuration. * * @param deviceId Device ID * @param vlanId VLAN ID * @return next objective ID or -1 if it was not found */ int getVlanNextObjectiveId(DeviceId deviceId, VlanId vlanId) { if (groupHandlerMap.get(deviceId) != null) { log.trace("getVlanNextObjectiveId query in device {}", deviceId); return groupHandlerMap.get(deviceId).getVlanNextObjectiveId(vlanId); } else { log.warn("getVlanNextObjectiveId query - groupHandler for " + "device {} not found", deviceId); return -1; } } /** * Returns the next objective ID for the given portNumber, given the treatment. * There could be multiple different treatments to the same outport, which * would result in different objectives. If the next object does not exist, * and should be created, a new one is created and its id is returned. * * @param deviceId Device ID * @param portNum port number on device for which NextObjective is queried * @param treatment the actions to apply on the packets (should include outport) * @param meta metadata passed into the creation of a Next Objective if necessary * @param createIfMissing true if a next object should be created if not found * @return next objective ID or -1 if an error occurred during retrieval or creation */ public int getPortNextObjectiveId(DeviceId deviceId, PortNumber portNum, TrafficTreatment treatment, TrafficSelector meta, boolean createIfMissing) { DefaultGroupHandler ghdlr = groupHandlerMap.get(deviceId); if (ghdlr != null) { return ghdlr.getPortNextObjectiveId(portNum, treatment, meta, createIfMissing); } else { log.warn("getPortNextObjectiveId query - groupHandler for device {}" + " not found", deviceId); return -1; } } /** * Returns the next Objective ID for the given mac and vlan, given the treatment. * There could be multiple different treatments to the same outport, which * would result in different objectives. If the next object does not exist, * and should be created, a new one is created and its id is returned. * * @param deviceId Device ID * @param macAddr mac of host for which Next ID is required. * @param vlanId vlan of host for which Next ID is required. * @param port port with which to create the Next Obj. * @param createIfMissing true if a next object should be created if not found * @return next objective ID or -1 if an error occurred during retrieval or creation */ public int getMacVlanNextObjectiveId(DeviceId deviceId, MacAddress macAddr, VlanId vlanId, PortNumber port, boolean createIfMissing) { DefaultGroupHandler ghdlr = groupHandlerMap.get(deviceId); if (ghdlr != null) { return ghdlr.getMacVlanNextObjectiveId(macAddr, vlanId, port, createIfMissing); } else { log.warn("getMacVlanNextObjectiveId query - groupHandler for device {}" + " not found", deviceId); return -1; } } /** * Updates the next objective for the given nextId . * * @param deviceId Device ID * @param hostMac mac of host for which Next obj is to be updated. * @param hostVlanId vlan of host for which Next obj is to be updated. * @param port port with which to update the Next Obj. * @param nextId of Next Obj which needs to be updated. */ public void updateMacVlanTreatment(DeviceId deviceId, MacAddress hostMac, VlanId hostVlanId, PortNumber port, int nextId) { // Check if we are the king of this device // just one instance should perform this update if (!defaultRoutingHandler.shouldProgram(deviceId)) { log.debug("This instance is not handling the routing towards the " + "device {}", deviceId); return; } // Get the handler and perform the update DefaultGroupHandler ghdlr = groupHandlerMap.get(deviceId); if (ghdlr != null) { ghdlr.updateL3UcastGroupBucket(hostMac, hostVlanId, port, nextId); } else { log.warn("updateL3UcastGroupBucket query - groupHandler for device {}" + " not found", deviceId); } } /** * Returns the group handler object for the specified device id. * * @param devId the device identifier * @return the groupHandler object for the device id, or null if not found */ DefaultGroupHandler getGroupHandler(DeviceId devId) { return groupHandlerMap.get(devId); } /** * Returns the default routing handler object. * * @return the default routing handler object */ public DefaultRoutingHandler getRoutingHandler() { return defaultRoutingHandler; } private class InternalPacketProcessor implements PacketProcessor { @Override public void process(PacketContext context) { packetExecutor.execute(() -> processPacketInternal(context)); } private void processPacketInternal(PacketContext context) { if (context.isHandled()) { return; } InboundPacket pkt = context.inPacket(); Ethernet ethernet = pkt.parsed(); if (ethernet == null) { return; } log.trace("Rcvd pktin from {}: {}", context.inPacket().receivedFrom(), ethernet); if (ethernet.getEtherType() == TYPE_ARP) { log.warn("Received unexpected ARP packet on {}", context.inPacket().receivedFrom()); log.trace("{}", ethernet); return; } else if (ethernet.getEtherType() == Ethernet.TYPE_IPV4) { IPv4 ipv4Packet = (IPv4) ethernet.getPayload(); //ipHandler.addToPacketBuffer(ipv4Packet); if (ipv4Packet.getProtocol() == IPv4.PROTOCOL_ICMP) { icmpHandler.processIcmp(ethernet, pkt.receivedFrom()); } else { // NOTE: We don't support IP learning at this moment so this // is not necessary. Also it causes duplication of DHCP packets. // ipHandler.processPacketIn(ipv4Packet, pkt.receivedFrom()); } } else if (ethernet.getEtherType() == Ethernet.TYPE_IPV6) { IPv6 ipv6Packet = (IPv6) ethernet.getPayload(); //ipHandler.addToPacketBuffer(ipv6Packet); // We deal with the packet only if the packet is a ICMP6 ECHO/REPLY if (ipv6Packet.getNextHeader() == IPv6.PROTOCOL_ICMP6) { ICMP6 icmp6Packet = (ICMP6) ipv6Packet.getPayload(); if (icmp6Packet.getIcmpType() == ICMP6.ECHO_REQUEST || icmp6Packet.getIcmpType() == ICMP6.ECHO_REPLY) { icmpHandler.processIcmpv6(ethernet, pkt.receivedFrom()); } else { log.trace("Received ICMPv6 0x{} - not handled", Integer.toHexString(icmp6Packet.getIcmpType() & 0xff)); } } else { // NOTE: We don't support IP learning at this moment so this // is not necessary. Also it causes duplication of DHCPv6 packets. // ipHandler.processPacketIn(ipv6Packet, pkt.receivedFrom()); } } } } private class InternalEventHandler implements Runnable { private Event event; InternalEventHandler(Event event) { this.event = event; } @Override public void run() { try { // TODO We should also change SR routing and PW to listen to TopologyEvents if (event.type() == LinkEvent.Type.LINK_ADDED || event.type() == LinkEvent.Type.LINK_UPDATED) { linkHandler.processLinkAdded((Link) event.subject()); } else if (event.type() == LinkEvent.Type.LINK_REMOVED) { linkHandler.processLinkRemoved((Link) event.subject()); } else if (event.type() == DeviceEvent.Type.DEVICE_ADDED || event.type() == DeviceEvent.Type.DEVICE_AVAILABILITY_CHANGED || event.type() == DeviceEvent.Type.DEVICE_UPDATED) { DeviceId deviceId = ((Device) event.subject()).id(); if (deviceService.isAvailable(deviceId)) { log.info("** DEVICE UP Processing device event {} " + "for available device {}", event.type(), ((Device) event.subject()).id()); processDeviceAdded((Device) event.subject()); } else { if (event.type() == DeviceEvent.Type.DEVICE_ADDED) { // Note: For p4 devices, the device will be added but unavailable at the beginning. // The device will later on being marked as available once the pipeline is pushed // to the device. log.info("** DEVICE ADDED but unavailable. Ignore"); return; } log.info(" ** DEVICE DOWN Processing device event {}" + " for unavailable device {}", event.type(), ((Device) event.subject()).id()); processDeviceRemoved((Device) event.subject()); } } else if (event.type() == DeviceEvent.Type.PORT_ADDED) { // typically these calls come when device is added first time // so port filtering rules are handled at the device_added event. // port added calls represent all ports on the device, // enabled or not. log.trace("** PORT ADDED {}/{} -> {}", ((DeviceEvent) event).subject().id(), ((DeviceEvent) event).port().number(), event.type()); } else if (event.type() == DeviceEvent.Type.PORT_UPDATED) { // these calls happen for every subsequent event // ports enabled, disabled, switch goes away, comes back log.info("** PORT UPDATED {}/{} -> {}", event.subject(), ((DeviceEvent) event).port(), event.type()); processPortUpdatedInternal(((Device) event.subject()), ((DeviceEvent) event).port()); mcastHandler.processPortUpdate(((Device) event.subject()), ((DeviceEvent) event).port()); } else if (event.type() == TopologyEvent.Type.TOPOLOGY_CHANGED) { // Process topology event, needed for all modules relying on // topology service for path computation TopologyEvent topologyEvent = (TopologyEvent) event; log.info("Processing topology event {}, topology age {}, reasons {}", event.type(), topologyEvent.subject().time(), topologyEvent.reasons().size()); topologyHandler.processTopologyChange(topologyEvent.reasons()); } else if (event.type() == HostEvent.Type.HOST_ADDED) { hostHandler.processHostAddedEvent((HostEvent) event); } else if (event.type() == HostEvent.Type.HOST_MOVED) { hostHandler.processHostMovedEvent((HostEvent) event); routeHandler.processHostMovedEvent((HostEvent) event); } else if (event.type() == HostEvent.Type.HOST_AUX_MOVED) { hostHandler.processHostMovedEvent((HostEvent) event); // TODO RouteHandler also needs to process this event in order to // support nexthops that has auxLocations } else if (event.type() == HostEvent.Type.HOST_REMOVED) { hostHandler.processHostRemovedEvent((HostEvent) event); } else if (event.type() == HostEvent.Type.HOST_UPDATED) { hostHandler.processHostUpdatedEvent((HostEvent) event); } else if (event.type() == RouteEvent.Type.ROUTE_ADDED) { routeHandler.processRouteAdded((RouteEvent) event); } else if (event.type() == RouteEvent.Type.ROUTE_UPDATED) { routeHandler.processRouteUpdated((RouteEvent) event); } else if (event.type() == RouteEvent.Type.ROUTE_REMOVED) { routeHandler.processRouteRemoved((RouteEvent) event); } else if (event.type() == RouteEvent.Type.ALTERNATIVE_ROUTES_CHANGED) { routeHandler.processAlternativeRoutesChanged((RouteEvent) event); } else if (event.type() == McastEvent.Type.SOURCES_ADDED || event.type() == McastEvent.Type.SOURCES_REMOVED || event.type() == McastEvent.Type.SINKS_ADDED || event.type() == McastEvent.Type.SINKS_REMOVED || event.type() == McastEvent.Type.ROUTE_ADDED || event.type() == McastEvent.Type.ROUTE_REMOVED) { mcastHandler.processMcastEvent((McastEvent) event); } else if (event.type() == NetworkConfigEvent.Type.CONFIG_ADDED) { NetworkConfigEvent netcfgEvent = (NetworkConfigEvent) event; Class configClass = netcfgEvent.configClass(); if (configClass.equals(SegmentRoutingAppConfig.class)) { appCfgHandler.processAppConfigAdded(netcfgEvent); log.info("App config event .. configuring network"); cfgListener.configureNetwork(); } else if (configClass.equals(SegmentRoutingDeviceConfig.class)) { log.info("Segment Routing Device Config added for {}", event.subject()); cfgListener.configureNetwork(); } else if (configClass.equals(InterfaceConfig.class)) { log.info("Interface Config added for {}", event.subject()); cfgListener.configureNetwork(); } else { log.error("Unhandled config class: {}", configClass); } } else if (event.type() == NetworkConfigEvent.Type.CONFIG_UPDATED) { NetworkConfigEvent netcfgEvent = (NetworkConfigEvent) event; Class configClass = netcfgEvent.configClass(); if (configClass.equals(SegmentRoutingAppConfig.class)) { appCfgHandler.processAppConfigUpdated(netcfgEvent); log.info("App config event .. configuring network"); cfgListener.configureNetwork(); } else if (configClass.equals(SegmentRoutingDeviceConfig.class)) { log.info("Segment Routing Device Config updated for {}", event.subject()); createOrUpdateDeviceConfiguration(); } else if (configClass.equals(InterfaceConfig.class)) { log.info("Interface Config updated for {}", event.subject()); createOrUpdateDeviceConfiguration(); updateInterface((InterfaceConfig) netcfgEvent.config().get(), (InterfaceConfig) netcfgEvent.prevConfig().get()); } else { log.error("Unhandled config class: {}", configClass); } } else if (event.type() == NetworkConfigEvent.Type.CONFIG_REMOVED) { NetworkConfigEvent netcfgEvent = (NetworkConfigEvent) event; Class configClass = netcfgEvent.configClass(); if (configClass.equals(SegmentRoutingAppConfig.class)) { appCfgHandler.processAppConfigRemoved(netcfgEvent); log.info("App config event .. configuring network"); cfgListener.configureNetwork(); } else if (configClass.equals(SegmentRoutingDeviceConfig.class)) { // TODO Handle sr device config removal log.info("SegmentRoutingDeviceConfig removal is not handled in current implementation"); } else if (configClass.equals(InterfaceConfig.class)) { // TODO Handle interface removal log.info("InterfaceConfig removal is not handled in current implementation"); } else { log.error("Unhandled config class: {}", configClass); } } else if (event.type() == MastershipEvent.Type.MASTER_CHANGED) { MastershipEvent me = (MastershipEvent) event; DeviceId deviceId = me.subject(); Optional<DeviceId> pairDeviceId = getPairDeviceId(deviceId); log.info(" ** MASTERSHIP CHANGED Invalidating shouldProgram cache" + " for {}/pair={} due to change", deviceId, pairDeviceId); defaultRoutingHandler.invalidateShouldProgramCache(deviceId); pairDeviceId.ifPresent(defaultRoutingHandler::invalidateShouldProgramCache); defaultRoutingHandler.checkFullRerouteForMasterChange(deviceId, me); } else { log.warn("Unhandled event type: {}", event.type()); } } catch (Exception e) { log.error("SegmentRouting event handler thread thrown an exception: {}", e.getMessage(), e); } } } void processDeviceAdded(Device device) { log.info("** DEVICE ADDED with ID {}", device.id()); // NOTE: Punt ARP/NDP even when the device is not configured. // Host learning without network config is required for CORD config generator. routingRulePopulator.populateIpPunts(device.id()); routingRulePopulator.populateArpNdpPunts(device.id()); if (deviceConfiguration == null || !deviceConfiguration.isConfigured(device.id())) { log.warn("Device configuration unavailable. Device {} will be " + "processed after configuration.", device.id()); return; } processDeviceAddedInternal(device.id()); } private void processDeviceAddedInternal(DeviceId deviceId) { // Irrespective of whether the local is a MASTER or not for this device, // we need to create a SR-group-handler instance. This is because in a // multi-instance setup, any instance can initiate forwarding/next-objectives // for any switch (even if this instance is a SLAVE or not even connected // to the switch). To handle this, a default-group-handler instance is necessary // per switch. log.debug("Current groupHandlerMap devs: {}", groupHandlerMap.keySet()); if (groupHandlerMap.get(deviceId) == null) { DefaultGroupHandler groupHandler; try { groupHandler = DefaultGroupHandler. createGroupHandler(deviceId, appId, deviceConfiguration, linkService, flowObjectiveService, this); } catch (DeviceConfigNotFoundException e) { log.warn(e.getMessage() + " Aborting processDeviceAdded."); return; } log.debug("updating groupHandlerMap with new grpHdlr for device: {}", deviceId); groupHandlerMap.put(deviceId, groupHandler); } if (mastershipService.isLocalMaster(deviceId)) { defaultRoutingHandler.populatePortAddressingRules(deviceId); defaultRoutingHandler.purgeSeenBeforeRoutes(deviceId); DefaultGroupHandler groupHandler = groupHandlerMap.get(deviceId); groupHandler.createGroupsFromVlanConfig(); routingRulePopulator.populateSubnetBroadcastRule(deviceId); phasedRecoveryService.init(deviceId); } appCfgHandler.init(deviceId); } private void processDeviceRemoved(Device device) { dsNextObjStore.entrySet().stream() .filter(entry -> entry.getKey().deviceId().equals(device.id())) .forEach(entry -> dsNextObjStore.remove(entry.getKey())); vlanNextObjStore.entrySet().stream() .filter(entry -> entry.getKey().deviceId().equals(device.id())) .forEach(entry -> vlanNextObjStore.remove(entry.getKey())); macVlanNextObjStore.entrySet().stream() .filter(entry -> entry.getKey().deviceId().equals(device.id())) .forEach(entry -> macVlanNextObjStore.remove(entry.getKey())); portNextObjStore.entrySet().stream() .filter(entry -> entry.getKey().deviceId().equals(device.id())) .forEach(entry -> portNextObjStore.remove(entry.getKey())); linkHandler.processDeviceRemoved(device); DefaultGroupHandler gh = groupHandlerMap.remove(device.id()); if (gh != null) { gh.shutdown(); } // Note that a switch going down is associated with all of its links // going down as well, but it is treated as a single switch down event // while the link-downs are ignored. We cannot rely on the ordering of // events - i.e we cannot expect all link-downs to come before the // switch down - so we purge all seen-links for the switch before // handling route-path changes for the switch-down defaultRoutingHandler .populateRoutingRulesForLinkStatusChange(null, null, device.id(), true); defaultRoutingHandler.purgeEcmpGraph(device.id()); // Cleanup all internal groupHandler stores for this device. Should be // done after all rerouting or rehashing has been completed groupHandlerMap.entrySet() .forEach(entry -> entry.getValue().cleanUpForNeighborDown(device.id())); phasedRecoveryService.reset(device.id()); } /** * Purge the destinationSet nextObjective store of entries with this device * as key. Erases app-level knowledge of hashed groups in this device. * * @param devId the device identifier */ void purgeHashedNextObjectiveStore(DeviceId devId) { log.debug("Purging hashed next-obj store for dev:{}", devId); dsNextObjStore.entrySet().stream() .filter(entry -> entry.getKey().deviceId().equals(devId)) .forEach(entry -> dsNextObjStore.remove(entry.getKey())); } private void processPortUpdatedInternal(Device device, Port port) { if (deviceConfiguration == null || !deviceConfiguration.isConfigured(device.id())) { log.warn("Device configuration uploading. Not handling port event for" + "dev: {} port: {}", device.id(), port.number()); return; } if (interfaceService.isConfigured(new ConnectPoint(device.id(), port.number()))) { lastEdgePortEvent = Instant.now(); } if (!mastershipService.isLocalMaster(device.id())) { log.debug("Not master for dev:{} .. not handling port updated event" + "for port {}", device.id(), port.number()); return; } processPortUpdated(device.id(), port); } /** * Adds or remove filtering rules for the given switchport. If switchport is * an edge facing port, additionally handles host probing and broadcast * rules. Must be called by local master of device. * * @param deviceId the device identifier * @param port the port to update */ void processPortUpdated(DeviceId deviceId, Port port) { // first we handle filtering rules associated with the port if (port.isEnabled()) { log.info("Switchport {}/{} enabled..programming filters", deviceId, port.number()); routingRulePopulator.processSinglePortFilters(deviceId, port.number(), true); } else { log.info("Switchport {}/{} disabled..removing filters", deviceId, port.number()); routingRulePopulator.processSinglePortFilters(deviceId, port.number(), false); } // portUpdated calls are for ports that have gone down or up. For switch // to switch ports, link-events should take care of any re-routing or // group editing necessary for port up/down. Here we only process edge ports // that are already configured. ConnectPoint cp = new ConnectPoint(deviceId, port.number()); VlanId untaggedVlan = interfaceService.getUntaggedVlanId(cp); VlanId nativeVlan = interfaceService.getNativeVlanId(cp); Set<VlanId> taggedVlans = interfaceService.getTaggedVlanId(cp); if (untaggedVlan == null && nativeVlan == null && taggedVlans.isEmpty()) { log.debug("Not handling port updated event for non-edge port (unconfigured) " + "dev/port: {}/{}", deviceId, port.number()); return; } if (untaggedVlan != null) { processEdgePort(deviceId, port, untaggedVlan, true); } if (nativeVlan != null) { processEdgePort(deviceId, port, nativeVlan, true); } if (!taggedVlans.isEmpty()) { taggedVlans.forEach(tag -> processEdgePort(deviceId, port, tag, false)); } } private void processEdgePort(DeviceId deviceId, Port port, VlanId vlanId, boolean popVlan) { boolean portUp = port.isEnabled(); if (portUp) { log.info("Device:EdgePort {}:{} is enabled in vlan: {}", deviceId, port.number(), vlanId); hostEventExecutor.execute(() -> hostHandler.processPortUp(new ConnectPoint(deviceId, port.number()))); } else { log.info("Device:EdgePort {}:{} is disabled in vlan: {}", deviceId, port.number(), vlanId); } DefaultGroupHandler groupHandler = groupHandlerMap.get(deviceId); if (groupHandler != null) { groupHandler.processEdgePort(port.number(), vlanId, popVlan, portUp); } else { log.warn("Group handler not found for dev:{}. Not handling edge port" + " {} event for port:{}", deviceId, (portUp) ? "UP" : "DOWN", port.number()); } } private void createOrUpdateDeviceConfiguration() { if (deviceConfiguration == null) { log.info("Creating new DeviceConfiguration"); deviceConfiguration = new DeviceConfiguration(this); } else { log.info("Updating DeviceConfiguration"); deviceConfiguration.updateConfig(); } } private void createOrUpdateDefaultRoutingHandler() { if (defaultRoutingHandler == null) { log.info("Creating new DefaultRoutingHandler"); defaultRoutingHandler = new DefaultRoutingHandler(this); } else { log.info("Updating DefaultRoutingHandler"); defaultRoutingHandler.update(this); } } /** * Registers the given connect point with the NRS, this is necessary * to receive the NDP and ARP packets from the NRS. * * @param portToRegister connect point to register */ public void registerConnectPoint(ConnectPoint portToRegister) { neighbourResolutionService.registerNeighbourHandler( portToRegister, neighbourHandler, appId ); } private class InternalConfigListener implements NetworkConfigListener { private static final long PROGRAM_DELAY = 2; SegmentRoutingManager srManager; /** * Constructs the internal network config listener. * * @param srManager segment routing manager */ InternalConfigListener(SegmentRoutingManager srManager) { this.srManager = srManager; } /** * Reads network config and initializes related data structure accordingly. */ void configureNetwork() { log.info("Configuring network ..."); // Setting handling of network configuration events completable future // The completable future is needed because of the async behaviour of the configureNetwork, // listener registration and event arrival // Enables us to buffer the events and execute them when the configure network is done. synchronized (networkConfigCompletionLock) { networkConfigCompletion = new CompletableFuture<>(); // add a small delay to absorb multiple network config added notifications if (!programmingScheduled.get()) { log.info("Buffering config calls for {} secs", PROGRAM_DELAY); programmingScheduled.set(true); mainEventExecutor.schedule(new ConfigChange(), PROGRAM_DELAY, TimeUnit.SECONDS); } createOrUpdateDeviceConfiguration(); arpHandler = new ArpHandler(srManager); icmpHandler = new IcmpHandler(srManager); ipHandler = new IpHandler(srManager); routingRulePopulator = new RoutingRulePopulator(srManager); createOrUpdateDefaultRoutingHandler(); tunnelHandler = new TunnelHandler(linkService, deviceConfiguration, groupHandlerMap, tunnelStore); policyHandler = new PolicyHandler(appId, deviceConfiguration, flowObjectiveService, tunnelHandler, policyStore); networkConfigCompletion.complete(true); } mcastHandler.init(); } @Override public void event(NetworkConfigEvent event) { if (mainEventExecutor == null) { return; } checkState(appCfgHandler != null, "NetworkConfigEventHandler is not initialized"); switch (event.type()) { case CONFIG_ADDED: case CONFIG_UPDATED: case CONFIG_REMOVED: log.trace("Schedule Network Config event {}", event); if (networkConfigCompletion == null || networkConfigCompletion.isDone()) { mainEventExecutor.execute(new InternalEventHandler(event)); } else { queuedEvents.add(event); } break; default: break; } } @Override public boolean isRelevant(NetworkConfigEvent event) { if (event.type() == CONFIG_REGISTERED || event.type() == CONFIG_UNREGISTERED) { log.debug("Ignore event {} due to type mismatch", event); return false; } if (!event.configClass().equals(SegmentRoutingDeviceConfig.class) && !event.configClass().equals(SegmentRoutingAppConfig.class) && !event.configClass().equals(InterfaceConfig.class)) { log.debug("Ignore event {} due to class mismatch", event); return false; } return true; } private final class ConfigChange implements Runnable { @Override public void run() { programmingScheduled.set(false); log.info("Reacting to config changes after buffer delay"); for (Device device : deviceService.getDevices()) { processDeviceAdded(device); } defaultRoutingHandler.startPopulationProcess(); } } } private class InternalLinkListener implements LinkListener { @Override public void event(LinkEvent event) { if (mainEventExecutor == null) { return; } if (event.type() == LinkEvent.Type.LINK_ADDED || event.type() == LinkEvent.Type.LINK_UPDATED || event.type() == LinkEvent.Type.LINK_REMOVED) { log.trace("Schedule Link event {}", event); if (networkConfigCompletion == null || networkConfigCompletion.isDone()) { mainEventExecutor.execute(new InternalEventHandler(event)); } else { queuedEvents.add(event); } } } } private class InternalDeviceListener implements DeviceListener { @Override public void event(DeviceEvent event) { if (mainEventExecutor == null) { return; } switch (event.type()) { case DEVICE_ADDED: case PORT_UPDATED: case PORT_ADDED: case DEVICE_UPDATED: case DEVICE_AVAILABILITY_CHANGED: log.trace("Schedule Device event {}", event); if (networkConfigCompletion == null || networkConfigCompletion.isDone()) { mainEventExecutor.execute(new InternalEventHandler(event)); } else { queuedEvents.add(event); } break; default: } } } private class InternalTopologyListener implements TopologyListener { @Override public void event(TopologyEvent event) { if (mainEventExecutor == null) { return; } switch (event.type()) { case TOPOLOGY_CHANGED: log.trace("Schedule Topology event {}", event); if (networkConfigCompletion == null || networkConfigCompletion.isDone()) { mainEventExecutor.execute(new InternalEventHandler(event)); } else { queuedEvents.add(event); } break; default: } } } private class InternalHostListener implements HostListener { @Override public void event(HostEvent event) { if (hostEventExecutor == null) { return; } switch (event.type()) { case HOST_ADDED: case HOST_MOVED: case HOST_REMOVED: case HOST_UPDATED: log.trace("Schedule Host event {}", event); hostEventExecutor.execute(new InternalEventHandler(event)); break; default: log.warn("Unsupported host event type: {}", event.type()); break; } } } private class InternalMcastListener implements McastListener { @Override public void event(McastEvent event) { if (mcastEventExecutor == null) { return; } switch (event.type()) { case SOURCES_ADDED: case SOURCES_REMOVED: case SINKS_ADDED: case SINKS_REMOVED: case ROUTE_REMOVED: case ROUTE_ADDED: log.trace("Schedule Mcast event {}", event); mcastEventExecutor.execute(new InternalEventHandler(event)); break; default: log.warn("Unsupported mcast event type: {}", event.type()); break; } } } private class InternalRouteEventListener implements RouteListener { @Override public void event(RouteEvent event) { if (routeEventExecutor == null) { return; } switch (event.type()) { case ROUTE_ADDED: case ROUTE_UPDATED: case ROUTE_REMOVED: case ALTERNATIVE_ROUTES_CHANGED: log.trace("Schedule Route event {}", event); routeEventExecutor.execute(new InternalEventHandler(event)); break; default: log.warn("Unsupported route event type: {}", event.type()); break; } } } private class InternalMastershipListener implements MastershipListener { @Override public void event(MastershipEvent event) { if (mainEventExecutor == null) { return; } switch (event.type()) { case MASTER_CHANGED: log.debug("Mastership event: {}/{}", event.subject(), event.roleInfo()); mainEventExecutor.execute(new InternalEventHandler(event)); break; case BACKUPS_CHANGED: case SUSPENDED: default: log.debug("Mastership event type {} not handled", event.type()); break; } } } class InternalClusterListener implements ClusterEventListener { private Instant lastClusterEvent = Instant.EPOCH; long timeSinceLastClusterEvent() { return Instant.now().toEpochMilli() - lastClusterEvent.toEpochMilli(); } @Override public void event(ClusterEvent event) { switch (event.type()) { case INSTANCE_ACTIVATED: case INSTANCE_ADDED: case INSTANCE_READY: log.debug("Cluster event {} ignored", event.type()); break; case INSTANCE_DEACTIVATED: case INSTANCE_REMOVED: log.info("** Cluster event {}", event.type()); lastClusterEvent = Instant.now(); break; default: break; } } } private void updateInterface(InterfaceConfig conf, InterfaceConfig prevConf) { try { Set<Interface> intfs = conf.getInterfaces(); Set<Interface> prevIntfs = prevConf.getInterfaces(); // Now we only handle one interface config at each port. if (intfs.size() != 1 || prevIntfs.size() != 1) { log.warn("Interface update aborted - one at a time is allowed, " + "but {} / {}(prev) received.", intfs.size(), prevIntfs.size()); return; } //The system is in an incoherent state, abort if (defaultRoutingHandler == null) { log.warn("Interface update aborted, defaultRoutingHandler is null"); return; } Interface intf = intfs.stream().findFirst().get(); Interface prevIntf = prevIntfs.stream().findFirst().get(); DeviceId deviceId = intf.connectPoint().deviceId(); PortNumber portNum = intf.connectPoint().port(); removeSubnetConfig(prevIntf.connectPoint(), Sets.difference(new HashSet<>(prevIntf.ipAddressesList()), new HashSet<>(intf.ipAddressesList()))); if (!prevIntf.vlanNative().equals(VlanId.NONE) && !prevIntf.vlanNative().equals(intf.vlanUntagged()) && !prevIntf.vlanNative().equals(intf.vlanNative())) { if (intf.vlanTagged().contains(prevIntf.vlanNative())) { // Update filtering objective and L2IG group bucket updatePortVlanTreatment(deviceId, portNum, prevIntf.vlanNative(), false); } else { // RemoveVlanNative updateVlanConfigInternal(deviceId, portNum, prevIntf.vlanNative(), true, false); } } if (!prevIntf.vlanUntagged().equals(VlanId.NONE) && !prevIntf.vlanUntagged().equals(intf.vlanUntagged()) && !prevIntf.vlanUntagged().equals(intf.vlanNative())) { if (intf.vlanTagged().contains(prevIntf.vlanUntagged())) { // Update filtering objective and L2IG group bucket updatePortVlanTreatment(deviceId, portNum, prevIntf.vlanUntagged(), false); } else { // RemoveVlanUntagged updateVlanConfigInternal(deviceId, portNum, prevIntf.vlanUntagged(), true, false); } } if (!prevIntf.vlanTagged().isEmpty() && !intf.vlanTagged().equals(prevIntf.vlanTagged())) { // RemoveVlanTagged Sets.difference(prevIntf.vlanTagged(), intf.vlanTagged()).stream() .filter(i -> !intf.vlanUntagged().equals(i)) .filter(i -> !intf.vlanNative().equals(i)) .forEach(vlanId -> updateVlanConfigInternal( deviceId, portNum, vlanId, false, false)); } if (!intf.vlanNative().equals(VlanId.NONE) && !prevIntf.vlanNative().equals(intf.vlanNative()) && !prevIntf.vlanUntagged().equals(intf.vlanNative())) { if (prevIntf.vlanTagged().contains(intf.vlanNative())) { // Update filtering objective and L2IG group bucket updatePortVlanTreatment(deviceId, portNum, intf.vlanNative(), true); } else { // AddVlanNative updateVlanConfigInternal(deviceId, portNum, intf.vlanNative(), true, true); } } if (!intf.vlanTagged().isEmpty() && !intf.vlanTagged().equals(prevIntf.vlanTagged())) { // AddVlanTagged Sets.difference(intf.vlanTagged(), prevIntf.vlanTagged()).stream() .filter(i -> !prevIntf.vlanUntagged().equals(i)) .filter(i -> !prevIntf.vlanNative().equals(i)) .forEach(vlanId -> updateVlanConfigInternal( deviceId, portNum, vlanId, false, true) ); } if (!intf.vlanUntagged().equals(VlanId.NONE) && !prevIntf.vlanUntagged().equals(intf.vlanUntagged()) && !prevIntf.vlanNative().equals(intf.vlanUntagged())) { if (prevIntf.vlanTagged().contains(intf.vlanUntagged())) { // Update filtering objective and L2IG group bucket updatePortVlanTreatment(deviceId, portNum, intf.vlanUntagged(), true); } else { // AddVlanUntagged updateVlanConfigInternal(deviceId, portNum, intf.vlanUntagged(), true, true); } } addSubnetConfig(prevIntf.connectPoint(), Sets.difference(new HashSet<>(intf.ipAddressesList()), new HashSet<>(prevIntf.ipAddressesList()))); } catch (ConfigException e) { log.error("Error in configuration"); } } private void updatePortVlanTreatment(DeviceId deviceId, PortNumber portNum, VlanId vlanId, boolean pushVlan) { DefaultGroupHandler grpHandler = getGroupHandler(deviceId); if (grpHandler == null) { log.warn("Failed to retrieve group handler for device {}", deviceId); return; } // Update filtering objective for a single port routingRulePopulator.updateSinglePortFilters(deviceId, portNum, !pushVlan, vlanId, false); routingRulePopulator.updateSinglePortFilters(deviceId, portNum, pushVlan, vlanId, true); if (getVlanNextObjectiveId(deviceId, vlanId) != -1) { // Update L2IG bucket of the port grpHandler.updateL2InterfaceGroupBucket(portNum, vlanId, pushVlan); // Update bridging and unicast routing rule for each host hostEventExecutor.execute(() -> hostHandler.processIntfVlanUpdatedEvent(deviceId, portNum, vlanId, !pushVlan, false)); hostEventExecutor.execute(() -> hostHandler.processIntfVlanUpdatedEvent(deviceId, portNum, vlanId, pushVlan, true)); } else { log.warn("Failed to retrieve next objective for vlan {} in device {}:{}", vlanId, deviceId, portNum); } } private void updateVlanConfigInternal(DeviceId deviceId, PortNumber portNum, VlanId vlanId, boolean pushVlan, boolean install) { DefaultGroupHandler grpHandler = getGroupHandler(deviceId); if (grpHandler == null) { log.warn("Failed to retrieve group handler for device {}", deviceId); return; } // Update filtering objective for a single port routingRulePopulator.updateSinglePortFilters(deviceId, portNum, pushVlan, vlanId, install); // Update filtering objective for multicast ingress port mcastHandler.updateFilterToDevice(deviceId, portNum, vlanId, install); int nextId = getVlanNextObjectiveId(deviceId, vlanId); if (nextId != -1 && !install) { // Remove L2 Bridging rule and L3 Unicast rule to the host hostEventExecutor.execute(() -> hostHandler.processIntfVlanUpdatedEvent(deviceId, portNum, vlanId, pushVlan, install)); // Remove broadcast forwarding rule and corresponding L2FG for VLAN // only if there is no port configured on that VLAN ID if (!getVlanPortMap(deviceId).containsKey(vlanId)) { // Remove broadcast forwarding rule for the VLAN routingRulePopulator.updateSubnetBroadcastRule(deviceId, vlanId, install); // Remove L2FG for VLAN grpHandler.removeBcastGroupFromVlan(deviceId, portNum, vlanId, pushVlan); } else { // Remove a single port from L2FG grpHandler.updateGroupFromVlanConfiguration(vlanId, portNum, nextId, install); } // Remove L2IG of the port grpHandler.removePortNextObjective(deviceId, portNum, vlanId, pushVlan); } else if (install) { // Create L2IG of the port grpHandler.createPortNextObjective(deviceId, portNum, vlanId, pushVlan); // Create L2 Bridging rule and L3 Unicast rule to the host hostEventExecutor.execute(() -> hostHandler.processIntfVlanUpdatedEvent(deviceId, portNum, vlanId, pushVlan, install)); if (nextId != -1) { // Add a single port to L2FG grpHandler.updateGroupFromVlanConfiguration(vlanId, portNum, nextId, install); } else { // Create L2FG for VLAN grpHandler.createBcastGroupFromVlan(vlanId, Collections.singleton(portNum)); routingRulePopulator.updateSubnetBroadcastRule(deviceId, vlanId, install); } } else { log.warn("Failed to retrieve next objective for vlan {} in device {}:{}", vlanId, deviceId, portNum); } } private void removeSubnetConfig(ConnectPoint cp, Set<InterfaceIpAddress> ipAddressSet) { Set<IpPrefix> ipPrefixSet = ipAddressSet.stream(). map(InterfaceIpAddress::subnetAddress).collect(Collectors.toSet()); Set<InterfaceIpAddress> deviceIntfIpAddrs = interfaceService.getInterfaces().stream() .filter(intf -> intf.connectPoint().deviceId().equals(cp.deviceId())) .filter(intf -> !intf.connectPoint().equals(cp)) .flatMap(intf -> intf.ipAddressesList().stream()) .collect(Collectors.toSet()); // 1. Partial subnet population // Remove routing rules for removed subnet from previous configuration, // which does not also exist in other interfaces in the same device Set<IpPrefix> deviceIpPrefixSet = deviceIntfIpAddrs.stream() .map(InterfaceIpAddress::subnetAddress) .collect(Collectors.toSet()); Set<IpPrefix> subnetsToBeRevoked = ipPrefixSet.stream() .filter(ipPrefix -> !deviceIpPrefixSet.contains(ipPrefix)) .collect(Collectors.toSet()); // Check if any of the subnets to be revoked is configured in the pairDevice. // If any, repopulate the subnet with pairDevice connectPoint instead of revoking. Optional<DeviceId> pairDevice = getPairDeviceId(cp.deviceId()); if (pairDevice.isPresent()) { Set<IpPrefix> pairDeviceIpPrefix = getDeviceSubnetMap().get(pairDevice.get()); Set<IpPrefix> subnetsExistingInPairDevice = subnetsToBeRevoked.stream() .filter(ipPrefix -> pairDeviceIpPrefix.contains(ipPrefix)) .collect(Collectors.toSet()); // Update the subnets existing in pair device with pair device connect point. if (!subnetsExistingInPairDevice.isEmpty()) { // PortNumber of connect point is not relevant in populate subnet and hence providing as ANY. ConnectPoint pairDeviceCp = new ConnectPoint(pairDevice.get(), PortNumber.ANY); log.debug("Updating the subnets: {} with pairDevice connectPoint as it exists in the Pair device: {}", subnetsExistingInPairDevice, pairDeviceCp); defaultRoutingHandler.populateSubnet(Collections.singleton(pairDeviceCp), subnetsExistingInPairDevice); } // Remove only the subnets that are not configured in the pairDevice. subnetsToBeRevoked = Sets.difference(subnetsToBeRevoked, subnetsExistingInPairDevice); } if (!subnetsToBeRevoked.isEmpty()) { log.debug("Removing subnets for connectPoint: {}, subnets: {}", cp, subnetsToBeRevoked); defaultRoutingHandler.revokeSubnet(subnetsToBeRevoked); } // 2. Interface IP punts // Remove IP punts for old Intf address Set<IpAddress> deviceIpAddrs = deviceIntfIpAddrs.stream() .map(InterfaceIpAddress::ipAddress) .collect(Collectors.toSet()); ipAddressSet.stream() .map(InterfaceIpAddress::ipAddress) .filter(interfaceIpAddress -> !deviceIpAddrs.contains(interfaceIpAddress)) .forEach(interfaceIpAddress -> routingRulePopulator.revokeSingleIpPunts( cp.deviceId(), interfaceIpAddress)); // 3. Host unicast routing rule // Remove unicast routing rule hostEventExecutor.execute(() -> hostHandler.processIntfIpUpdatedEvent(cp, ipPrefixSet, false)); } private void addSubnetConfig(ConnectPoint cp, Set<InterfaceIpAddress> ipAddressSet) { Set<IpPrefix> ipPrefixSet = ipAddressSet.stream(). map(InterfaceIpAddress::subnetAddress).collect(Collectors.toSet()); Set<InterfaceIpAddress> deviceIntfIpAddrs = interfaceService.getInterfaces().stream() .filter(intf -> intf.connectPoint().deviceId().equals(cp.deviceId())) .filter(intf -> !intf.connectPoint().equals(cp)) .flatMap(intf -> intf.ipAddressesList().stream()) .collect(Collectors.toSet()); // 1. Partial subnet population // Add routing rules for newly added subnet, which does not also exist in // other interfaces in the same device Set<IpPrefix> deviceIpPrefixSet = deviceIntfIpAddrs.stream() .map(InterfaceIpAddress::subnetAddress) .collect(Collectors.toSet()); Set<IpPrefix> subnetsToBePopulated = ipPrefixSet.stream() .filter(ipPrefix -> !deviceIpPrefixSet.contains(ipPrefix)) .collect(Collectors.toSet()); if (!subnetsToBePopulated.isEmpty()) { log.debug("Adding subnets for connectPoint: {}, subnets: {}", cp, subnetsToBePopulated); // check if pair-device has the same subnet configured? Optional<DeviceId> pairDevice = getPairDeviceId(cp.deviceId()); if (pairDevice.isPresent()) { Set<IpPrefix> pairDeviceIpPrefix = getDeviceSubnetMap().get(pairDevice.get()); Set<IpPrefix> subnetsToBePopulatedAsDualHomed = subnetsToBePopulated.stream() .filter(ipPrefix -> pairDeviceIpPrefix.contains(ipPrefix)) .collect(Collectors.toSet()); Set<IpPrefix> subnetsToBePopulatedAsSingleHomed = Sets.difference(subnetsToBePopulated, subnetsToBePopulatedAsDualHomed); if (!subnetsToBePopulatedAsSingleHomed.isEmpty()) { defaultRoutingHandler.populateSubnet( Collections.singleton(cp), subnetsToBePopulatedAsSingleHomed); } if (!subnetsToBePopulatedAsDualHomed.isEmpty()) { Set<ConnectPoint> cpts = new HashSet<>(); cpts.add(cp); // As Subnets is DualHomed adding the pairDevice also as ConnectPoint. // PortNumber of connect point is not relevant in populate subnet and hence providing as ANY. ConnectPoint pairCp = new ConnectPoint(pairDevice.get(), PortNumber.ANY); cpts.add(pairCp); log.debug("Adding DualHomed subnets for connectPoint: {} and its pair device: {}, subnets: {}", cp, pairCp, subnetsToBePopulatedAsDualHomed); // populating the subnets as DualHomed defaultRoutingHandler.populateSubnet( cpts, subnetsToBePopulated); // revoking the subnets populated in the device as it is now Dualhomed. defaultRoutingHandler.revokeSubnet(Collections.singleton(cp.deviceId()), subnetsToBePopulatedAsDualHomed); } } else { defaultRoutingHandler.populateSubnet( Collections.singleton(cp), subnetsToBePopulated); } } // 2. Interface IP punts // Add IP punts for new Intf address Set<IpAddress> deviceIpAddrs = deviceIntfIpAddrs.stream() .map(InterfaceIpAddress::ipAddress) .collect(Collectors.toSet()); ipAddressSet.stream() .map(InterfaceIpAddress::ipAddress) .filter(interfaceIpAddress -> !deviceIpAddrs.contains(interfaceIpAddress)) .forEach(interfaceIpAddress -> routingRulePopulator.populateSingleIpPunts( cp.deviceId(), interfaceIpAddress)); // 3. Host unicast routing rule // Add unicast routing rule hostEventExecutor.execute(() -> hostHandler.processIntfIpUpdatedEvent(cp, ipPrefixSet, true)); } }
apache-2.0
dagnir/aws-sdk-java
aws-java-sdk-directory/src/main/java/com/amazonaws/services/directory/model/transform/CreateTrustRequestProtocolMarshaller.java
2619
/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.directory.model.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.Request; import com.amazonaws.http.HttpMethodName; import com.amazonaws.services.directory.model.*; import com.amazonaws.transform.Marshaller; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * CreateTrustRequest Marshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class CreateTrustRequestProtocolMarshaller implements Marshaller<Request<CreateTrustRequest>, CreateTrustRequest> { private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/") .httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true) .operationIdentifier("DirectoryService_20150416.CreateTrust").serviceName("AWSDirectoryService").build(); private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory; public CreateTrustRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) { this.protocolFactory = protocolFactory; } public Request<CreateTrustRequest> marshall(CreateTrustRequest createTrustRequest) { if (createTrustRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { final ProtocolRequestMarshaller<CreateTrustRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(SDK_OPERATION_BINDING, createTrustRequest); protocolMarshaller.startMarshalling(); CreateTrustRequestMarshaller.getInstance().marshall(createTrustRequest, protocolMarshaller); return protocolMarshaller.finishMarshalling(); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
dagnir/aws-sdk-java
aws-java-sdk-inspector/src/main/java/com/amazonaws/services/inspector/model/transform/DescribeRulesPackagesResultJsonUnmarshaller.java
3368
/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.inspector.model.transform; import java.math.*; import javax.annotation.Generated; import com.amazonaws.services.inspector.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * DescribeRulesPackagesResult JSON Unmarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class DescribeRulesPackagesResultJsonUnmarshaller implements Unmarshaller<DescribeRulesPackagesResult, JsonUnmarshallerContext> { public DescribeRulesPackagesResult unmarshall(JsonUnmarshallerContext context) throws Exception { DescribeRulesPackagesResult describeRulesPackagesResult = new DescribeRulesPackagesResult(); int originalDepth = context.getCurrentDepth(); String currentParentElement = context.getCurrentParentElement(); int targetDepth = originalDepth + 1; JsonToken token = context.getCurrentToken(); if (token == null) token = context.nextToken(); if (token == VALUE_NULL) { return describeRulesPackagesResult; } while (true) { if (token == null) break; if (token == FIELD_NAME || token == START_OBJECT) { if (context.testExpression("rulesPackages", targetDepth)) { context.nextToken(); describeRulesPackagesResult.setRulesPackages(new ListUnmarshaller<RulesPackage>(RulesPackageJsonUnmarshaller.getInstance()) .unmarshall(context)); } if (context.testExpression("failedItems", targetDepth)) { context.nextToken(); describeRulesPackagesResult.setFailedItems(new MapUnmarshaller<String, FailedItemDetails>(context.getUnmarshaller(String.class), FailedItemDetailsJsonUnmarshaller.getInstance()).unmarshall(context)); } } else if (token == END_ARRAY || token == END_OBJECT) { if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) { if (context.getCurrentDepth() <= originalDepth) break; } } token = context.nextToken(); } return describeRulesPackagesResult; } private static DescribeRulesPackagesResultJsonUnmarshaller instance; public static DescribeRulesPackagesResultJsonUnmarshaller getInstance() { if (instance == null) instance = new DescribeRulesPackagesResultJsonUnmarshaller(); return instance; } }
apache-2.0
katjahahn/PortEx
src/main/java/com/github/katjahahn/parser/HeaderKey.java
1008
/******************************************************************************* * Copyright 2014 Katja Hahn * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.github.katjahahn.parser; import com.github.katjahahn.tools.anomalies.FieldOrStructureKey; /** * * Represents a key for a {@link StandardField}. * * @author Katja Hahn * */ public interface HeaderKey extends FieldOrStructureKey { }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-codebuild/src/main/java/com/amazonaws/services/codebuild/model/ReportGroupTrendFieldType.java
2099
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.codebuild.model; import javax.annotation.Generated; /** * */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public enum ReportGroupTrendFieldType { PASS_RATE("PASS_RATE"), DURATION("DURATION"), TOTAL("TOTAL"), LINE_COVERAGE("LINE_COVERAGE"), LINES_COVERED("LINES_COVERED"), LINES_MISSED("LINES_MISSED"), BRANCH_COVERAGE("BRANCH_COVERAGE"), BRANCHES_COVERED("BRANCHES_COVERED"), BRANCHES_MISSED("BRANCHES_MISSED"); private String value; private ReportGroupTrendFieldType(String value) { this.value = value; } @Override public String toString() { return this.value; } /** * Use this in place of valueOf. * * @param value * real value * @return ReportGroupTrendFieldType corresponding to the value * * @throws IllegalArgumentException * If the specified value does not map to one of the known values in this enum. */ public static ReportGroupTrendFieldType fromValue(String value) { if (value == null || "".equals(value)) { throw new IllegalArgumentException("Value cannot be null or empty!"); } for (ReportGroupTrendFieldType enumEntry : ReportGroupTrendFieldType.values()) { if (enumEntry.toString().equals(value)) { return enumEntry; } } throw new IllegalArgumentException("Cannot create enum from " + value + " value!"); } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-opsworks/src/main/java/com/amazonaws/services/opsworks/model/RegisterElasticIpResult.java
3751
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.opsworks.model; import java.io.Serializable; import javax.annotation.Generated; /** * <p> * Contains the response to a <code>RegisterElasticIp</code> request. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/opsworks-2013-02-18/RegisterElasticIp" target="_top">AWS API * Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class RegisterElasticIpResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable { /** * <p> * The Elastic IP address. * </p> */ private String elasticIp; /** * <p> * The Elastic IP address. * </p> * * @param elasticIp * The Elastic IP address. */ public void setElasticIp(String elasticIp) { this.elasticIp = elasticIp; } /** * <p> * The Elastic IP address. * </p> * * @return The Elastic IP address. */ public String getElasticIp() { return this.elasticIp; } /** * <p> * The Elastic IP address. * </p> * * @param elasticIp * The Elastic IP address. * @return Returns a reference to this object so that method calls can be chained together. */ public RegisterElasticIpResult withElasticIp(String elasticIp) { setElasticIp(elasticIp); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getElasticIp() != null) sb.append("ElasticIp: ").append(getElasticIp()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof RegisterElasticIpResult == false) return false; RegisterElasticIpResult other = (RegisterElasticIpResult) obj; if (other.getElasticIp() == null ^ this.getElasticIp() == null) return false; if (other.getElasticIp() != null && other.getElasticIp().equals(this.getElasticIp()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getElasticIp() == null) ? 0 : getElasticIp().hashCode()); return hashCode; } @Override public RegisterElasticIpResult clone() { try { return (RegisterElasticIpResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
apache-2.0
404neko/jandan_plus
app/src/main/java/com/socks/jandan/utils/NetWorkUtil.java
1042
package com.socks.jandan.utils; import android.content.Context; import android.net.ConnectivityManager; import android.net.NetworkInfo; /** * Created by zhaokaiqiang on 15/4/22. */ public class NetWorkUtil { /** * 判断当前网络是否已连接 * * @param context * @return */ public static boolean isNetWorkConnected(Context context) { boolean result; ConnectivityManager cm = (ConnectivityManager) context .getSystemService(Context.CONNECTIVITY_SERVICE); NetworkInfo netInfo = cm.getActiveNetworkInfo(); result = netInfo != null && netInfo.isConnected(); return result; } /** * 判断当前的网络连接方式是否为WIFI * * @param context * @return */ public static boolean isWifiConnected(Context context) { ConnectivityManager connectivityManager = (ConnectivityManager) context .getSystemService(Context.CONNECTIVITY_SERVICE); NetworkInfo wifiNetworkInfo = connectivityManager .getNetworkInfo(ConnectivityManager.TYPE_WIFI); return wifiNetworkInfo.isConnected(); } }
apache-2.0
kjots/gwt-lib
uri-emul/src/test/java/org/kjots/lib/gwt/test/UriEmulGwtTestSuite.java
1922
/* * Copyright © 2011 Karl J. Ots <kjots@kjots.org> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kjots.lib.gwt.test; import com.google.gwt.junit.tools.GWTTestSuite; import junit.framework.Test; import junit.framework.TestSuite; import org.kjots.lib.gwt.client.org.apache.harmony.luni.tests.java.net.URIGwtTest; import org.kjots.lib.gwt.client.org.apache.harmony.luni.tests.java.net.URISyntaxExceptionGwtTest; /** * java.net.URI Emulation GWT Test Suite. * <p> * Created: 23rd March 2011. * * @author <a href="mailto:kjots@kjots.org">Karl J. Ots &lt;kjots@kjots.org&gt;</a> * @since 1.0 */ public class UriEmulGwtTestSuite extends GWTTestSuite { /** * Create the test suite. * * @return The test suite. */ public static Test suite() { UriEmulGwtTestSuite suite = new UriEmulGwtTestSuite(); addTestSuites(suite); return suite; } /** * Add the test suites. * * @param suite The test suite. */ public static void addTestSuites(TestSuite suite) { suite.addTestSuite(URIGwtTest.class); suite.addTestSuite(URISyntaxExceptionGwtTest.class); } /** * Construct a new java.net.URI Emulation GWT Test Suite. * <p> * This constructor is declared <code>private</code> to prevent external * instantiation. */ private UriEmulGwtTestSuite() { super("java.net.URI Emulation GWT Test Suite."); } }
apache-2.0
Aksyumov/saksyumov
chapter_001/src/main/java/ru/job4j/array/ArrayToArray.java
815
package ru.job4j.array; public class ArrayToArray { public int[] arrayPlusArray(int[] first, int[] second) { int [] result = new int[first.length + second.length]; int firstIndex = 0; int secondIndex = 0; for(int i=0; i < result.length; i++) { if(firstIndex >= first.length){ result[i] = second[secondIndex]; secondIndex ++; }else if(secondIndex >= second.length){ result[i] = first[firstIndex]; firstIndex ++; }else if( first[firstIndex]<second[secondIndex]){ result[i] = first[firstIndex]; firstIndex ++; }else{ result[i] = second[secondIndex]; secondIndex ++; } } return result; } }
apache-2.0
datastax/java-driver
core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java
728
/* * Copyright DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** Specialization of the native protocol layer for the driver, based on Netty. */ package com.datastax.oss.driver.internal.core.protocol;
apache-2.0
sjaco002/incubator-asterixdb
asterix-om/src/main/java/edu/uci/ics/asterix/om/base/AMutableLine.java
897
/* * Copyright 2009-2013 by The Regents of the University of California * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * you may obtain a copy of the License from * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.asterix.om.base; public class AMutableLine extends ALine { public AMutableLine(APoint p1, APoint p2) { super(p1, p2); } public void setValue(APoint p1, APoint p2) { this.p1 = p1; this.p2 = p2; } }
apache-2.0
VigorousLiang/RxjavaRetrofitNetwork
networklib/src/main/java/com/vigorous/asynchronized/network/request/DecryptResponsebodyConverter.java
926
package com.vigorous.asynchronized.network.request; import com.google.gson.Gson; import com.google.gson.TypeAdapter; import java.io.IOException; import okhttp3.ResponseBody; import retrofit2.Converter; /** * Created by Vigorous.Liang on 2017/9/22. */ public class DecryptResponsebodyConverter<T> implements Converter<ResponseBody, T> { private final Gson mGson;// gson对象 private final TypeAdapter<T> adapter; /** * 构造器 */ public DecryptResponsebodyConverter(Gson gson, TypeAdapter<T> adapter) { this.mGson = gson; this.adapter = adapter; } /** * 转换 * * @param responseBody * @return * @throws IOException */ @Override public T convert(ResponseBody responseBody) throws IOException { String response = responseBody.string(); // TODO Decrypt return adapter.fromJson(response); } }
apache-2.0
eddhkim/postmaster-runner
src/test/java/co/poynt/postmaster/PostmasterCollectionRunnerTest.java
584
package co.poynt.postmaster; import org.testng.Assert; import org.testng.annotations.Test; import co.poynt.postman.PostmanRunResult; public class PostmasterCollectionRunnerTest { // example to run the postmaster @Test(enabled = false) public void runCollection() throws Exception { PostmasterCollectionRunner br = new PostmasterCollectionRunner(); PostmanRunResult runResult = br.runCollection( "classpath:sample_postmaster_collection.json", "classpath:sample_postmaster_environment.json", null, false); Assert.assertTrue(runResult.isSuccessful()); } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-waf/src/main/java/com/amazonaws/services/waf/model/waf/transform/SizeConstraintSetSummaryMarshaller.java
2370
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.waf.model.waf.transform; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.waf.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * SizeConstraintSetSummaryMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class SizeConstraintSetSummaryMarshaller { private static final MarshallingInfo<String> SIZECONSTRAINTSETID_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("SizeConstraintSetId").build(); private static final MarshallingInfo<String> NAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("Name").build(); private static final SizeConstraintSetSummaryMarshaller instance = new SizeConstraintSetSummaryMarshaller(); public static SizeConstraintSetSummaryMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(SizeConstraintSetSummary sizeConstraintSetSummary, ProtocolMarshaller protocolMarshaller) { if (sizeConstraintSetSummary == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(sizeConstraintSetSummary.getSizeConstraintSetId(), SIZECONSTRAINTSETID_BINDING); protocolMarshaller.marshall(sizeConstraintSetSummary.getName(), NAME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
apache-2.0
wareninja/ip2location-jsondata
src/main/java/com/wareninja/opensource/ip2location/services/Provider.java
1025
package com.wareninja.opensource.ip2location.services; import java.util.List; import com.google.gson.JsonObject; import com.wareninja.opensource.ip2location.config.YamlConfiguration; public interface Provider { default void transfer(final BulkService bulkService,YamlConfiguration config, final Runnable closeConnections) { long count = this.getCount(); final int limit = config.getElastic().batchSize; int skip = 0; if (count != 0 && config.getElastic().dropDataset) bulkService.dropDataSet(); while (count >= limit) { List content = this.buildJSONContent(skip, limit); bulkService.proceed(content); count -= limit; skip += limit; } if (count > 0) { List content = this.buildJSONContent(skip, (int) count); bulkService.proceed(content); } closeConnections.run(); } long getCount(); List<JsonObject> buildJSONContent(int skip, int limit); }
apache-2.0
lllsssxxx/coolweather
app/src/main/java/com/coolweather/android/gson/AQI.java
219
package com.coolweather.android.gson; /** * Created by lllsssxxx on 2017/9/18. */ public class AQI { public AQICity city; public class AQICity{ public String api; public String pm25; } }
apache-2.0
ibrahimshbat/JGroups
src/org/jgroups/protocols/jzookeeper/z2pinfinspan/ViewManager.java
2095
package org.jgroups.protocols.jzookeeper.z2pinfinspan; import org.jgroups.Address; import org.jgroups.View; import java.util.*; /** * A class to provide a means of reducing a views size in bytes by storing each view in a byte array, and storing past view * data at each node in the cluster. * * @author Ryan Emerson * @since 4.0 */ public class ViewManager { private volatile View currentView; public void setCurrentView(View view) { this.currentView = view; } public boolean containsAddress(MessageOrderInfo messageOrderInfo, Address address) { return getDestinations(messageOrderInfo).contains(address); } public List<Address> getDestinations(MessageOrderInfo messageOrderInfo) { return getAddresses(currentView, messageOrderInfo.getDestinations()); } public byte[] getDestinationsAsByteArray(Collection<Address> addresses) { if (addresses.size() > Byte.MAX_VALUE) throw new IllegalArgumentException("Number of addresses cannot be greater than " + Byte.MAX_VALUE); byte[] destinations = new byte[addresses.size()]; int index = 0; for (Address address : addresses) destinations[index++] = (byte) currentView.getMembers().indexOf(address); return destinations; } public long getClientLastOrdering(MessageOrderInfo messageOrderInfo, Address address) { List<Address> destinations = getDestinations(messageOrderInfo); int addressIndex = destinations.indexOf(address); if (addressIndex >= 0) return messageOrderInfo.getclientsLastOrder()[addressIndex]; else return -1; } private List<Address> getAddresses(View view, byte[] indexes) { if (view == null) throw new IllegalArgumentException("View cannot be null"); List<Address> addresses = new ArrayList<Address>(); for (byte index : indexes) { addresses.add(view.getMembers().get(index)); } return addresses; } }
apache-2.0
benjchristensen/RxJava
src/test/java/io/reactivex/internal/operators/observable/ObservableSubscribeOnTest.java
6761
/** * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See * the License for the specific language governing permissions and limitations under the License. */ package io.reactivex.internal.operators.observable; import static org.junit.Assert.assertEquals; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import org.junit.*; import io.reactivex.*; import io.reactivex.disposables.*; import io.reactivex.observers.TestObserver; import io.reactivex.schedulers.*; public class ObservableSubscribeOnTest { @Test(timeout = 2000) public void testIssue813() throws InterruptedException { // https://github.com/ReactiveX/RxJava/issues/813 final CountDownLatch scheduled = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch doneLatch = new CountDownLatch(1); TestObserver<Integer> to = new TestObserver<Integer>(); Observable .unsafeCreate(new ObservableSource<Integer>() { @Override public void subscribe( final Observer<? super Integer> observer) { observer.onSubscribe(Disposables.empty()); scheduled.countDown(); try { try { latch.await(); } catch (InterruptedException e) { // this means we were unsubscribed (Scheduler shut down and interrupts) // ... but we'll pretend we are like many Observables that ignore interrupts } observer.onComplete(); } catch (Throwable e) { observer.onError(e); } finally { doneLatch.countDown(); } } }).subscribeOn(Schedulers.computation()).subscribe(to); // wait for scheduling scheduled.await(); // trigger unsubscribe to.dispose(); latch.countDown(); doneLatch.await(); to.assertNoErrors(); to.assertComplete(); } @Test @Ignore("ObservableSource.subscribe can't throw") public void testThrownErrorHandling() { TestObserver<String> ts = new TestObserver<String>(); Observable.unsafeCreate(new ObservableSource<String>() { @Override public void subscribe(Observer<? super String> s) { throw new RuntimeException("fail"); } }).subscribeOn(Schedulers.computation()).subscribe(ts); ts.awaitTerminalEvent(1000, TimeUnit.MILLISECONDS); ts.assertTerminated(); } @Test public void testOnError() { TestObserver<String> ts = new TestObserver<String>(); Observable.unsafeCreate(new ObservableSource<String>() { @Override public void subscribe(Observer<? super String> s) { s.onSubscribe(Disposables.empty()); s.onError(new RuntimeException("fail")); } }).subscribeOn(Schedulers.computation()).subscribe(ts); ts.awaitTerminalEvent(1000, TimeUnit.MILLISECONDS); ts.assertTerminated(); } public static class SlowScheduler extends Scheduler { final Scheduler actual; final long delay; final TimeUnit unit; public SlowScheduler() { this(Schedulers.computation(), 2, TimeUnit.SECONDS); } public SlowScheduler(Scheduler actual, long delay, TimeUnit unit) { this.actual = actual; this.delay = delay; this.unit = unit; } @Override public Worker createWorker() { return new SlowInner(actual.createWorker()); } private final class SlowInner extends Worker { private final Scheduler.Worker actualInner; private SlowInner(Worker actual) { this.actualInner = actual; } @Override public void dispose() { actualInner.dispose(); } @Override public boolean isDisposed() { return actualInner.isDisposed(); } @Override public Disposable schedule(final Runnable action) { return actualInner.schedule(action, delay, unit); } @Override public Disposable schedule(final Runnable action, final long delayTime, final TimeUnit delayUnit) { TimeUnit common = delayUnit.compareTo(unit) < 0 ? delayUnit : unit; long t = common.convert(delayTime, delayUnit) + common.convert(delay, unit); return actualInner.schedule(action, t, common); } } } @Test(timeout = 5000) public void testUnsubscribeInfiniteStream() throws InterruptedException { TestObserver<Integer> ts = new TestObserver<Integer>(); final AtomicInteger count = new AtomicInteger(); Observable.unsafeCreate(new ObservableSource<Integer>() { @Override public void subscribe(Observer<? super Integer> sub) { Disposable d = Disposables.empty(); sub.onSubscribe(d); for (int i = 1; !d.isDisposed(); i++) { count.incrementAndGet(); sub.onNext(i); } } }).subscribeOn(Schedulers.newThread()).take(10).subscribe(ts); ts.awaitTerminalEvent(1000, TimeUnit.MILLISECONDS); ts.dispose(); Thread.sleep(200); // give time for the loop to continue ts.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertEquals(10, count.get()); } @Test public void cancelBeforeActualSubscribe() { TestScheduler test = new TestScheduler(); TestObserver<Integer> to = new TestObserver<Integer>(); Observable.just(1).hide() .subscribeOn(test).subscribe(to); to.dispose(); test.advanceTimeBy(1, TimeUnit.SECONDS); to .assertSubscribed() .assertNoValues() .assertNotTerminated(); } @Test public void dispose() { TestHelper.checkDisposed(Observable.just(1).subscribeOn(Schedulers.single())); } }
apache-2.0
adligo/tests4j_tests.adligo.org
src/org/adligo/tests4j_tests/models/shared/results/BaseTrialResultMutantTrial.java
1225
package org.adligo.tests4j_tests.models.shared.results; import org.adligo.tests4j.models.shared.results.BaseTrialResultMutant; import org.adligo.tests4j.shared.asserts.reference.AllowedReferences; import org.adligo.tests4j.system.shared.trials.SourceFileScope; import org.adligo.tests4j_tests.base_trials.I_CountType; import org.adligo.tests4j_tests.base_trials.SourceFileCountingTrial; import org.adligo.tests4j_tests.references_groups.Tests4J_Results_GwtReferenceGroup; @SourceFileScope (sourceClass=BaseTrialResultMutant.class,minCoverage=0.0) @AllowedReferences (groups=Tests4J_Results_GwtReferenceGroup.class) public class BaseTrialResultMutantTrial extends SourceFileCountingTrial { @Override public int getTests(I_CountType type) { return super.getTests(type, 0, true); } @Override public int getAsserts(I_CountType type) { if (type.isFromMetaWithCoverage()) { //code coverage and circular dependencies return super.getAsserts(type,3); } else { return super.getAsserts(type, 0); } } @Override public int getUniqueAsserts(I_CountType type) { if (type.isFromMetaWithCoverage()) { return super.getUniqueAsserts(type, 3); } else { return super.getUniqueAsserts(type, 0); } } }
apache-2.0
dawutao/MyCoolWeather
app/src/main/java/com/mycoolweather/android/gson/AQI.java
220
package com.mycoolweather.android.gson; /** * Created by wutao on 2017/05/24. */ public class AQI { public AQICity city; public class AQICity { public String aqi; public String pm25; } }
apache-2.0
Nokorbis/unodp
src/main/java/be/un/odp/services/implementations/PortalTypeServiceImpl.java
1532
package be.un.odp.services.implementations; import be.un.odp.dao.PortalTypeDao; import be.un.odp.models.PortalType; import be.un.odp.services.PortalTypeService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.List; /** * Created on 13-03-17. * * @author Nokorbis * @version 1.0.0 * @since 1.0.0 */ @Service("portalTypeService") public class PortalTypeServiceImpl implements PortalTypeService { @Autowired private PortalTypeDao portalTypeDao; @Override public List<PortalType> findAllPortalTypes () { return portalTypeDao.findAllPortalTypes(); } @Override public PortalType findPortalType (int id) { return portalTypeDao.findPortalType(id); } @Override public PortalType findPortalTypeById (int id) { return portalTypeDao.findPortalTypeById(id); } @Override public PortalType findPortalTypeByName (String name) { return portalTypeDao.findPortalTypeByName(name); } @Override public void updatePortalType (PortalType type) { portalTypeDao.updatePortalType(type); } @Override public void createPortalType (PortalType type) { portalTypeDao.createPortalType(type); } @Override public void deletePortalType (PortalType type) { portalTypeDao.deletePortalType(type); } @Override public void deletePortalTypeById (int id) { portalTypeDao.deletePortalTypeById(id); } }
apache-2.0
akira-baruah/bazel
src/main/java/com/google/devtools/build/lib/skylarkbuildapi/java/JavaCcLinkParamsProviderApi.java
3557
// Copyright 2018 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.skylarkbuildapi.java; import com.google.devtools.build.lib.skylarkbuildapi.FileApi; import com.google.devtools.build.lib.skylarkbuildapi.core.ProviderApi; import com.google.devtools.build.lib.skylarkbuildapi.cpp.CcInfoApi; import com.google.devtools.build.lib.skylarkinterface.Param; import com.google.devtools.build.lib.skylarkinterface.SkylarkCallable; import com.google.devtools.build.lib.skylarkinterface.SkylarkConstructor; import com.google.devtools.build.lib.skylarkinterface.StarlarkBuiltin; import com.google.devtools.build.lib.skylarkinterface.StarlarkDocumentationCategory; import com.google.devtools.build.lib.syntax.EvalException; import com.google.devtools.build.lib.syntax.StarlarkSemantics.FlagIdentifier; import com.google.devtools.build.lib.syntax.StarlarkValue; /** A target that provides C++ libraries to be linked into Java targets. */ @StarlarkBuiltin( name = "JavaCcLinkParamsInfo", doc = "Do not use this module. It is intended for migration purposes only. If you depend on it, " + "you will be broken when it is removed." + "Information about the c++ libraries to be linked into Java targets.", documented = true, category = StarlarkDocumentationCategory.PROVIDER) public interface JavaCcLinkParamsProviderApi< FileT extends FileApi, CcInfoApiT extends CcInfoApi<FileT>> extends StarlarkValue { /** Name of this info object. */ String NAME = "JavaCcLinkParamsInfo"; /** Returns the cc linking info */ @SkylarkCallable( name = "cc_info", structField = true, doc = "Returns the CcLinkingInfo provider.", documented = true, enableOnlyWithFlag = FlagIdentifier.EXPERIMENTAL_ENABLE_ANDROID_MIGRATION_APIS) CcInfoApiT getCcInfo(); /** The provider implementing this can construct the JavaCcLinkParamsInfo provider. */ @StarlarkBuiltin( name = "Provider", doc = "Do not use this module. It is intended for migration purposes only. If you depend on " + "it, you will be broken when it is removed.", documented = false) public interface Provider<FileT extends FileApi, CcInfoApiT extends CcInfoApi<FileT>> extends ProviderApi { @SkylarkCallable( name = NAME, doc = "The <code>JavaCcLinkParamsInfo</code> constructor.", documented = true, enableOnlyWithFlag = FlagIdentifier.EXPERIMENTAL_ENABLE_ANDROID_MIGRATION_APIS, parameters = { @Param( name = "store", doc = "The CcInfo provider.", positional = true, named = false, type = CcInfoApi.class), }, selfCall = true) @SkylarkConstructor(objectType = JavaCcLinkParamsProviderApi.class, receiverNameForDoc = NAME) public JavaCcLinkParamsProviderApi<FileT, CcInfoApiT> createInfo(CcInfoApiT store) throws EvalException; } }
apache-2.0
AJ-72/testng
src/test/java/test/thread/parallelization/sample/TestClassAFiveMethodsWithDataProviderOnAllMethodsAndNoDepsSample.java
6668
package test.thread.parallelization.sample; import org.testng.ITestContext; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import test.thread.parallelization.TestNgRunStateTracker; import java.util.Map; import java.util.concurrent.TimeUnit; import static test.thread.parallelization.TestNgRunStateTracker.EventInfo.CLASS_INSTANCE; import static test.thread.parallelization.TestNgRunStateTracker.EventInfo.CLASS_NAME; import static test.thread.parallelization.TestNgRunStateTracker.EventInfo.DATA_PROVIDER_PARAM; import static test.thread.parallelization.TestNgRunStateTracker.EventInfo.METHOD_NAME; import static test.thread.parallelization.TestNgRunStateTracker.EventInfo.SUITE_NAME; import static test.thread.parallelization.TestNgRunStateTracker.EventInfo.TEST_NAME; import static test.thread.parallelization.TestNgRunStateTracker.TestNgRunEvent.TEST_METHOD_EXECUTION; public class TestClassAFiveMethodsWithDataProviderOnAllMethodsAndNoDepsSample { @Test(dataProvider = "data-provider") public void testMethodA(String suiteName, String testName, String sleepFor, String dpVal) throws InterruptedException { long time = System.currentTimeMillis(); TestNgRunStateTracker.logEvent( TestNgRunStateTracker.EventLog.builder() .setEvent(TEST_METHOD_EXECUTION) .setTimeOfEvent(time) .setThread(Thread.currentThread()) .addData(METHOD_NAME, "testMethodA") .addData(CLASS_NAME, getClass().getCanonicalName()) .addData(CLASS_INSTANCE, this) .addData(TEST_NAME, testName) .addData(SUITE_NAME, suiteName) .addData(DATA_PROVIDER_PARAM, dpVal) .build() ); TimeUnit.MILLISECONDS.sleep(Integer.parseInt(sleepFor)); } @Test(dataProvider = "data-provider") public void testMethodB(String suiteName, String testName, String sleepFor, String dpVal) throws InterruptedException { long time = System.currentTimeMillis(); TestNgRunStateTracker.logEvent( TestNgRunStateTracker.EventLog.builder() .setEvent(TEST_METHOD_EXECUTION) .setTimeOfEvent(time) .setThread(Thread.currentThread()) .addData(METHOD_NAME, "testMethodB") .addData(CLASS_NAME, getClass().getCanonicalName()) .addData(CLASS_INSTANCE, this) .addData(TEST_NAME, testName) .addData(SUITE_NAME, suiteName) .addData(DATA_PROVIDER_PARAM, dpVal) .build() ); TimeUnit.MILLISECONDS.sleep(Integer.parseInt(sleepFor)); } @Test(dataProvider = "data-provider") public void testMethodC(String suiteName, String testName, String sleepFor, String dpVal) throws InterruptedException { long time = System.currentTimeMillis(); TestNgRunStateTracker.logEvent( TestNgRunStateTracker.EventLog.builder() .setEvent(TEST_METHOD_EXECUTION) .setTimeOfEvent(time) .setThread(Thread.currentThread()) .addData(METHOD_NAME, "testMethodC") .addData(CLASS_NAME, getClass().getCanonicalName()) .addData(CLASS_INSTANCE, this) .addData(TEST_NAME, testName) .addData(SUITE_NAME, suiteName) .addData(DATA_PROVIDER_PARAM, dpVal) .build() ); TimeUnit.MILLISECONDS.sleep(Integer.parseInt(sleepFor)); } @Test(dataProvider = "data-provider") public void testMethodD(String suiteName, String testName, String sleepFor, String dpVal) throws InterruptedException { long time = System.currentTimeMillis(); TestNgRunStateTracker.logEvent( TestNgRunStateTracker.EventLog.builder() .setEvent(TEST_METHOD_EXECUTION) .setTimeOfEvent(time) .setThread(Thread.currentThread()) .addData(METHOD_NAME, "testMethodD") .addData(CLASS_NAME, getClass().getCanonicalName()) .addData(CLASS_INSTANCE, this) .addData(TEST_NAME, testName) .addData(SUITE_NAME, suiteName) .addData(DATA_PROVIDER_PARAM, dpVal) .build() ); TimeUnit.MILLISECONDS.sleep(Integer.parseInt(sleepFor)); } @Test(dataProvider = "data-provider") public void testMethodE(String suiteName, String testName, String sleepFor, String dpVal) throws InterruptedException { long time = System.currentTimeMillis(); TestNgRunStateTracker.logEvent( TestNgRunStateTracker.EventLog.builder() .setEvent(TEST_METHOD_EXECUTION) .setTimeOfEvent(time) .setThread(Thread.currentThread()) .addData(METHOD_NAME, "testMethodE") .addData(CLASS_NAME, getClass().getCanonicalName()) .addData(CLASS_INSTANCE, this) .addData(TEST_NAME, testName) .addData(SUITE_NAME, suiteName) .addData(DATA_PROVIDER_PARAM, dpVal) .build() ); TimeUnit.MILLISECONDS.sleep(Integer.parseInt(sleepFor)); } @DataProvider(name = "data-provider") public Object[][] dataProvider(ITestContext context) { Map<String,String> params = context.getCurrentXmlTest().getAllParameters(); String suiteName = params.get("suiteName"); String testName = params.get("testName"); String sleepFor = params.get("sleepFor"); String dataProviderParam = params.get("dataProviderParam"); String[] dataProviderVals = dataProviderParam.split(","); Object[][] dataToProvide = new Object[dataProviderVals.length][4]; for(int i = 0; i < dataProviderVals.length; i ++) { dataToProvide[i][0] = suiteName; dataToProvide[i][1] = testName; dataToProvide[i][2] = sleepFor; dataToProvide[i][3] = dataProviderVals[i]; } return dataToProvide; } }
apache-2.0
anjalshireesh/gluster-ovirt-poc
frontend/webadmin/modules/webadmin/src/main/java/org/ovirt/engine/ui/webadmin/section/main/presenter/popup/cluster/ClusterManageNetworkPopupPresenterWidget.java
708
package org.ovirt.engine.ui.webadmin.section.main.presenter.popup.cluster; import org.ovirt.engine.ui.uicommonweb.models.ListModel; import org.ovirt.engine.ui.webadmin.section.main.presenter.popup.AbstractModelBoundPopupPresenterWidget; import com.google.gwt.event.shared.EventBus; import com.google.inject.Inject; public class ClusterManageNetworkPopupPresenterWidget extends AbstractModelBoundPopupPresenterWidget<ListModel, ClusterManageNetworkPopupPresenterWidget.ViewDef> { public interface ViewDef extends AbstractModelBoundPopupPresenterWidget.ViewDef<ListModel> { } @Inject public ClusterManageNetworkPopupPresenterWidget(EventBus eventBus, ViewDef view) { super(eventBus, view); } }
apache-2.0
IHTSDO/snow-owl
snomed/com.b2international.snowowl.snomed.reasoner/src/com/b2international/snowowl/snomed/reasoner/converter/ConcreteDomainChangeConverter.java
14609
/* * Copyright 2018-2019 B2i Healthcare Pte Ltd, http://b2i.sg * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.b2international.snowowl.snomed.reasoner.converter; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.stream.Collectors; import com.b2international.commons.http.ExtendedLocale; import com.b2international.commons.options.Options; import com.b2international.snowowl.core.domain.BranchContext; import com.b2international.snowowl.core.domain.CollectionResource; import com.b2international.snowowl.core.domain.RepositoryContext; import com.b2international.snowowl.core.events.Request; import com.b2international.snowowl.core.request.SearchResourceRequestBuilder; import com.b2international.snowowl.core.terminology.ComponentCategory; import com.b2international.snowowl.datastore.converter.BaseResourceConverter; import com.b2international.snowowl.datastore.request.BranchRequest; import com.b2international.snowowl.datastore.request.RevisionIndexReadRequest; import com.b2international.snowowl.snomed.common.SnomedRf2Headers; import com.b2international.snowowl.snomed.core.domain.SnomedCoreComponent; import com.b2international.snowowl.snomed.core.domain.refset.SnomedReferenceSetMember; import com.b2international.snowowl.snomed.core.domain.refset.SnomedReferenceSetMembers; import com.b2international.snowowl.snomed.datastore.id.SnomedIdentifiers; import com.b2international.snowowl.snomed.datastore.request.SnomedRequests; import com.b2international.snowowl.snomed.reasoner.domain.ChangeNature; import com.b2international.snowowl.snomed.reasoner.domain.ClassificationTask; import com.b2international.snowowl.snomed.reasoner.domain.ConcreteDomainChange; import com.b2international.snowowl.snomed.reasoner.domain.ConcreteDomainChanges; import com.b2international.snowowl.snomed.reasoner.domain.ReasonerConcreteDomainMember; import com.b2international.snowowl.snomed.reasoner.index.ConcreteDomainChangeDocument; import com.b2international.snowowl.snomed.reasoner.request.ClassificationRequests; import com.google.common.collect.Maps; import com.google.common.collect.Multimap; import com.google.common.collect.Multimaps; /** * @since 6.11 (originally introduced on 7.0) */ public final class ConcreteDomainChangeConverter extends BaseResourceConverter<ConcreteDomainChangeDocument, ConcreteDomainChange, ConcreteDomainChanges> { // TODO: this constant is moved to SnomedReferenceSetMember.Expand on 7.x private static final String REFERENCED_COMPONENT = "referencedComponent"; public ConcreteDomainChangeConverter(final RepositoryContext context, final Options expand, final List<ExtendedLocale> locales) { super(context, expand, locales); } @Override protected ConcreteDomainChanges createCollectionResource(final List<ConcreteDomainChange> results, final String scrollId, final String searchAfter, final int limit, final int total) { return new ConcreteDomainChanges(results, scrollId, searchAfter, limit, total); } @Override protected ConcreteDomainChange toResource(final ConcreteDomainChangeDocument entry) { final ConcreteDomainChange resource = new ConcreteDomainChange(); resource.setClassificationId(entry.getClassificationId()); resource.setChangeNature(entry.getNature()); /* * New members: ID refers to the "origin" member ID * Updated members: ID refers to the member that should be updated in place * Redundant members: ID refers to the member that should be removed or deactivated */ final ReasonerConcreteDomainMember concreteDomainMember = new ReasonerConcreteDomainMember(entry.getMemberId()); // Released flag is the "origin" member's released state for updated and redundant members, false for new members concreteDomainMember.setReleased(entry.isReleased()); concreteDomainMember.setReferencedComponentId(entry.getReferencedComponentId()); switch (entry.getNature()) { case INFERRED: /* * New members are referring to: * - a different component * - in another relationship group * - with a potentially different characteristic type */ concreteDomainMember.setGroup(entry.getGroup()); concreteDomainMember.setCharacteristicTypeId(entry.getCharacteristicTypeId()); break; case UPDATED: // Updates change the serialized value on an existing member concreteDomainMember.setSerializedValue(entry.getSerializedValue()); break; case REDUNDANT: // Redundant CD members only need the UUID and released flag populated to do the delete/inactivation break; default: throw new IllegalStateException(String.format("Unexpected CD member change '%s' found with UUID '%s'.", entry.getNature(), entry.getMemberId())); } resource.setConcreteDomainMember(concreteDomainMember); return resource; } @Override protected void expand(final List<ConcreteDomainChange> results) { if (!expand().containsKey(ConcreteDomainChange.Expand.CONCRETE_DOMAIN_MEMBER)) { return; } /* * Depending on the CD member change search request, we might need to issue * SNOMED CT searches against multiple branches; find out which ones we have. */ final Multimap<String, ConcreteDomainChange> itemsByBranch = getItemsByBranch(results); // Check if we only need to load inferred CD members in their entirety final Options expandOptions = expand().getOptions(ConcreteDomainChange.Expand.CONCRETE_DOMAIN_MEMBER); final boolean inferredOnly = expandOptions.getBoolean("inferredOnly"); final Options cdMemberExpandOptions = expandOptions.getOptions("expand"); final Options referencedComponentOptions = cdMemberExpandOptions.getOptions(REFERENCED_COMPONENT); /* * Remove this option from the member expand options map, so that member search * does not expand the referenced component again */ final boolean needsReferencedComponent = cdMemberExpandOptions.keySet().remove(REFERENCED_COMPONENT); for (final String branch : itemsByBranch.keySet()) { final Collection<ConcreteDomainChange> itemsForCurrentBranch = itemsByBranch.get(branch); /* * Expand referenced component on members via a separate search request, as they * can be different from the referenced component on the "origin" member */ if (needsReferencedComponent) { final List<ReasonerConcreteDomainMember> blankMembers = itemsForCurrentBranch.stream() .filter(c -> !inferredOnly || ChangeNature.INFERRED.equals(c.getChangeNature())) .map(ConcreteDomainChange::getConcreteDomainMember) .collect(Collectors.toList()); final Multimap<String, ReasonerConcreteDomainMember> membersByReferencedComponent = Multimaps.index(blankMembers, ReasonerConcreteDomainMember::getReferencedComponentId); final Multimap<ComponentCategory, String> referencedComponentsByCategory = Multimaps.index(membersByReferencedComponent.keySet(), SnomedIdentifiers::getComponentCategory); for (final Entry<ComponentCategory, Collection<String>> categoryEntry : referencedComponentsByCategory.asMap().entrySet()) { expandComponentCategory(branch, categoryEntry.getKey(), categoryEntry.getValue(), referencedComponentOptions, membersByReferencedComponent); } } /* * Then fetch all the required members (these will have a referenced component * ID that should no longer be copied on inferred members). Note that the same "origin" * member might be used for multiple eg. "new" counterparts. */ final Set<String> cdMemberUuids = itemsForCurrentBranch.stream() .filter(c -> !inferredOnly || ChangeNature.INFERRED.equals(c.getChangeNature())) .map(c -> c.getConcreteDomainMember().getOriginMemberId()) .collect(Collectors.toSet()); final Request<BranchContext, SnomedReferenceSetMembers> cdMemberSearchRequest = SnomedRequests.prepareSearchMember() .filterByIds(cdMemberUuids) .setLimit(cdMemberUuids.size()) .setExpand(cdMemberExpandOptions) .setLocales(locales()) .build(); final SnomedReferenceSetMembers cdMembers = new BranchRequest<>(branch, new RevisionIndexReadRequest<>(cdMemberSearchRequest)) .execute(context()); final Map<String, SnomedReferenceSetMember> cdMembersByUuid = Maps.uniqueIndex(cdMembers, SnomedReferenceSetMember::getId); /* * Finally, set the member on the change item, but preserve the properties that * were already set in "toResource" */ for (final ConcreteDomainChange item : itemsForCurrentBranch) { final ReasonerConcreteDomainMember reasonerMember = item.getConcreteDomainMember(); final String memberUuid = reasonerMember.getOriginMemberId(); switch (item.getChangeNature()) { case INFERRED: { final SnomedReferenceSetMember expandedMember = cdMembersByUuid.get(memberUuid); final Map<String, Object> expandedProperties = expandedMember.getProperties(); // reasonerMember.setCharacteristicTypeId(...) is already set // reasonerMember.setGroup(...) is already set // reasonerMember.setReferencedComponent(...) is already set (or expanded) reasonerMember.setReferenceSetId(expandedMember.getReferenceSetId()); // reasonerMember.setReleased(...) is already set reasonerMember.setSerializedValue((String) expandedProperties.get(SnomedRf2Headers.FIELD_VALUE)); reasonerMember.setTypeId((String) expandedProperties.get(SnomedRf2Headers.FIELD_TYPE_ID)); } break; case UPDATED: if (!inferredOnly) { final SnomedReferenceSetMember expandedMember = cdMembersByUuid.get(memberUuid); final Map<String, Object> expandedProperties = expandedMember.getProperties(); reasonerMember.setCharacteristicTypeId((String) expandedProperties.get(SnomedRf2Headers.FIELD_CHARACTERISTIC_TYPE_ID)); reasonerMember.setGroup((Integer) expandedProperties.get(SnomedRf2Headers.FIELD_RELATIONSHIP_GROUP)); // reasonerMember.setReferencedComponent(...) is already set (or expanded) reasonerMember.setReferenceSetId(expandedMember.getReferenceSetId()); // reasonerMember.setReleased(...) is already set // reasonerMember.setSerializedValue(...) is already set reasonerMember.setTypeId((String) expandedProperties.get(SnomedRf2Headers.FIELD_TYPE_ID)); } break; case REDUNDANT: if (!inferredOnly) { final SnomedReferenceSetMember expandedMember = cdMembersByUuid.get(memberUuid); final Map<String, Object> expandedProperties = expandedMember.getProperties(); reasonerMember.setCharacteristicTypeId((String) expandedProperties.get(SnomedRf2Headers.FIELD_CHARACTERISTIC_TYPE_ID)); reasonerMember.setGroup((Integer) expandedProperties.get(SnomedRf2Headers.FIELD_RELATIONSHIP_GROUP)); // reasonerMember.setReferencedComponent(...) is already set (or expanded) reasonerMember.setReferenceSetId(expandedMember.getReferenceSetId()); // reasonerMember.setReleased(...) is already set reasonerMember.setSerializedValue((String) expandedProperties.get(SnomedRf2Headers.FIELD_VALUE)); reasonerMember.setTypeId((String) expandedProperties.get(SnomedRf2Headers.FIELD_TYPE_ID)); } break; default: throw new IllegalStateException(String.format("Unexpected CD member change '%s' found with UUID '%s'.", item.getChangeNature(), item.getConcreteDomainMember().getOriginMemberId())); } } } } private Multimap<String, ConcreteDomainChange> getItemsByBranch(final List<ConcreteDomainChange> results) { final Set<String> classificationTaskIds = results.stream() .map(ConcreteDomainChange::getClassificationId) .collect(Collectors.toSet()); final Map<String, String> branchesByClassificationIdMap = ClassificationRequests.prepareSearchClassification() .filterByIds(classificationTaskIds) .setLimit(classificationTaskIds.size()) .build() .execute(context()) .stream() .collect(Collectors.toMap( ClassificationTask::getId, ClassificationTask::getBranch)); final Multimap<String, ConcreteDomainChange> itemsByBranch = Multimaps.index(results, r -> branchesByClassificationIdMap.get(r.getClassificationId())); return itemsByBranch; } // Copied from SnomedReferenceSetMemberConverter private void expandComponentCategory(final String branch, final ComponentCategory category, final Collection<String> componentIds, final Options componentOptions, final Multimap<String, ReasonerConcreteDomainMember> membersByReferencedComponent) { final SearchResourceRequestBuilder<?, BranchContext, ? extends CollectionResource<? extends SnomedCoreComponent>> searchRequestBuilder = createSearchRequestBuilder(category); searchRequestBuilder.filterByIds(componentIds) .setLimit(componentIds.size()) .setLocales(locales()) .setExpand(componentOptions.get("expand", Options.class)); final CollectionResource<? extends SnomedCoreComponent> referencedComponents = new BranchRequest<>(branch, new RevisionIndexReadRequest<>(searchRequestBuilder.build())) .execute(context()); for (final SnomedCoreComponent referencedComponent : referencedComponents) { for (final ReasonerConcreteDomainMember member : membersByReferencedComponent.get(referencedComponent.getId())) { member.setReferencedComponent(referencedComponent); } } } private SearchResourceRequestBuilder<?, BranchContext, ? extends CollectionResource<? extends SnomedCoreComponent>> createSearchRequestBuilder(final ComponentCategory category) { switch (category) { case CONCEPT: return SnomedRequests.prepareSearchConcept(); case DESCRIPTION: return SnomedRequests.prepareSearchDescription(); case RELATIONSHIP: return SnomedRequests.prepareSearchRelationship(); default: throw new UnsupportedOperationException("Category '" + category + "' is not supported in referenced component expansion."); } } }
apache-2.0
macvelli/RootFramework
test/root/finance/CurrencyFormatTest.java
1561
package root.finance; import static org.junit.Assert.assertEquals; import org.junit.Test; class CurrencyFormatTest { private CurrencyFormat formatter = new CurrencyFormat(); @Test public void testPositiveFormat() { assertEquals("$0.09", formatter.formatMsg(new Money(9))); assertEquals("$0.75", formatter.formatMsg(new Money(75))); assertEquals("$1.30", formatter.formatMsg(new Money(130))); assertEquals("$452.73", formatter.formatMsg(new Money(45273))); assertEquals("$1,698.25", formatter.formatMsg(new Money(169825))); assertEquals("$27,379.50", formatter.formatMsg(new Money(2737950))); assertEquals("$352,415.72", formatter.formatMsg(new Money(35241572))); assertEquals("$2,352,415.72", formatter.formatMsg(new Money(235241572))); assertEquals("$21,352,415.72", formatter.formatMsg(new Money(2135241572))); } @Test public void testNegativeFormat() { assertEquals("($0.09)", formatter.formatMsg(new Money(-9))); assertEquals("($0.75)", formatter.formatMsg(new Money(-75))); assertEquals("($1.30)", formatter.formatMsg(new Money(-130))); assertEquals("($452.73)", formatter.formatMsg(new Money(-45273))); assertEquals("($1,698.25)", formatter.formatMsg(new Money(-169825))); assertEquals("($27,379.50)", formatter.formatMsg(new Money(-2737950))); assertEquals("($352,415.72)", formatter.formatMsg(new Money(-35241572))); assertEquals("($2,352,415.72)", formatter.formatMsg(new Money(-235241572))); assertEquals("($21,352,415.72)", formatter.formatMsg(new Money(-2135241572))); } } // End CurrencyFormatTest
apache-2.0
formcept/gdfpop
src/main/java/org/formcept/gdfpop/GDFTokens.java
730
package org.formcept.gdfpop; /** * A collection of tokens used for GDF data * * @author Anuj (https://github.com/anujsrc) */ public final class GDFTokens { public static final String NODE_START = "nodedef>"; public static final String EDGE_START = "edgedef>"; public static final String EDGE_LABEL_PROP = "label"; public static final String DEFAULT_KEY = "default"; public static final String DEFAULT_EDGE_LABEL = "_default"; public static final String BOOLEAN = "BOOLEAN"; public static final String STRING = "VARCHAR"; public static final String INT = "INT"; public static final String LONG = "LONG"; public static final String FLOAT = "FLOAT"; public static final String DOUBLE = "DOUBLE"; }
apache-2.0
pedro-hos/caixa-eletronico
ce-crud/src/main/java/org/pedrohos/Application.java
1057
package org.pedrohos; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.boot.context.web.SpringBootServletInitializer; import org.springframework.boot.orm.jpa.EntityScan; import org.springframework.context.annotation.ComponentScan; import org.springframework.data.jpa.repository.config.EnableJpaRepositories; import org.springframework.transaction.annotation.EnableTransactionManagement; @SpringBootApplication @EnableTransactionManagement @ComponentScan("org.pedrohos") @EntityScan("org.pedrohos.model.entities") @EnableJpaRepositories("org.pedrohos.model.repositories") public class Application extends SpringBootServletInitializer { public static void main(final String[] args) { SpringApplication.run(Application.class); } @Override protected SpringApplicationBuilder configure(final SpringApplicationBuilder application) { return application.sources(Application.class); } }
apache-2.0
artemevkif/study
mantis-tests/src/test/java/ru/stqa/pft/mantis/tests/RegistrationTests.java
1722
package ru.stqa.pft.mantis.tests; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import ru.lanwen.verbalregex.VerbalExpression; import ru.stqa.pft.mantis.model.MailMessage; import javax.mail.MessagingException; import java.io.IOException; import java.util.List; import static org.testng.Assert.assertTrue; /** * Created by xxartema on 26.02.2017. */ public class RegistrationTests extends TestBase { @BeforeMethod public void startMailServer() { app.mail().start(); } @Test public void testRegistration() throws IOException, MessagingException { long now = System.currentTimeMillis(); String user = String.format("user%s", now); String password = "password"; String email = String.format("user%s@localhost.localdomain", now); app.james().createUser(user, password); app.registration().start(user, email); List<MailMessage> mailMessages = app.james().waitForMail(user, password, 60000); String confirmationLink = findConfirmationLink(mailMessages, email); app.registration().finish(confirmationLink, password); assertTrue(app.newSession().login(user, password)); } private String findConfirmationLink(List<MailMessage> mailMessages, String email) { MailMessage mailMessage = mailMessages.stream().filter((m) -> m.to.equals(email)).findFirst().get(); VerbalExpression regex = VerbalExpression.regex().find("http://").nonSpace().oneOrMore().build(); return regex.getText(mailMessage.text); } @AfterMethod(alwaysRun = true) public void stopMailServer() { app.mail().stop(); } }
apache-2.0
spring-projects/spring-framework
spring-webflux/src/test/java/org/springframework/web/reactive/result/view/script/JythonScriptTemplateTests.java
3136
/* * Copyright 2002-2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.web.reactive.result.view.script; import java.util.HashMap; import java.util.Map; import org.junit.jupiter.api.Test; import org.springframework.context.annotation.AnnotationConfigApplicationContext; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.http.MediaType; import org.springframework.web.testfixture.http.server.reactive.MockServerHttpRequest; import org.springframework.web.testfixture.http.server.reactive.MockServerHttpResponse; import org.springframework.web.testfixture.server.MockServerWebExchange; import static org.assertj.core.api.Assertions.assertThat; /** * Unit tests for String templates running on Jython. * * @author Sebastien Deleuze */ public class JythonScriptTemplateTests { @Test public void renderTemplate() throws Exception { Map<String, Object> model = new HashMap<>(); model.put("title", "Layout example"); model.put("body", "This is the body"); String url = "org/springframework/web/reactive/result/view/script/jython/template.html"; MockServerHttpResponse response = renderViewWithModel(url, model); assertThat(response.getBodyAsString().block()).isEqualTo("<html><head><title>Layout example</title></head><body><p>This is the body</p></body></html>"); } private MockServerHttpResponse renderViewWithModel(String viewUrl, Map<String, Object> model) throws Exception { ScriptTemplateView view = createViewWithUrl(viewUrl); MockServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/")); view.renderInternal(model, MediaType.TEXT_HTML, exchange).block(); return exchange.getResponse(); } private ScriptTemplateView createViewWithUrl(String viewUrl) throws Exception { AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(); ctx.register(ScriptTemplatingConfiguration.class); ctx.refresh(); ScriptTemplateView view = new ScriptTemplateView(); view.setApplicationContext(ctx); view.setUrl(viewUrl); view.afterPropertiesSet(); return view; } @Configuration static class ScriptTemplatingConfiguration { @Bean public ScriptTemplateConfigurer jythonConfigurer() { ScriptTemplateConfigurer configurer = new ScriptTemplateConfigurer(); configurer.setScripts("org/springframework/web/reactive/result/view/script/jython/render.py"); configurer.setEngineName("jython"); configurer.setRenderFunction("render"); return configurer; } } }
apache-2.0
phvu/nd4j
nd4j-api/src/main/java/org/nd4j/linalg/api/ops/impl/transforms/arithmetic/RDivOp.java
2954
/* * * * Copyright 2015 Skymind,Inc. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * */ package org.nd4j.linalg.api.ops.impl.transforms.arithmetic; import org.nd4j.linalg.api.complex.IComplexNumber; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.api.ops.BaseTransformOp; import org.nd4j.linalg.api.ops.Op; /** * Division operation * * @author Adam Gibson */ public class RDivOp extends BaseTransformOp { public RDivOp() { } public RDivOp(INDArray x, INDArray y, INDArray z, int n) { super(x, y, z, n); } public RDivOp(INDArray x) { super(x); } public RDivOp(INDArray x, INDArray z) { super(x, z); } public RDivOp(INDArray x, INDArray z, int n) { super(x, z, n); } @Override public String name() { return "rdiv"; } @Override public IComplexNumber op(IComplexNumber origin, double other) { return origin.rdiv(other); } @Override public IComplexNumber op(IComplexNumber origin, float other) { return origin.rdiv(other); } @Override public IComplexNumber op(IComplexNumber origin, IComplexNumber other) { return other.div(origin); } @Override public float op(float origin, float other) { return other / origin; } @Override public double op(double origin, double other) { return other / origin; } @Override public double op(double origin) { return origin; } @Override public float op(float origin) { return origin; } @Override public IComplexNumber op(IComplexNumber origin) { return origin; } @Override public Op opForDimension(int index, int dimension) { INDArray xAlongDimension = x.vectorAlongDimension(index, dimension); if (y() != null) return new RDivOp(xAlongDimension, y.vectorAlongDimension(index, dimension), z.vectorAlongDimension(index, dimension), xAlongDimension.length()); else return new RDivOp(xAlongDimension, z.vectorAlongDimension(index, dimension), xAlongDimension.length()); } @Override public void init(INDArray x, INDArray y, INDArray z, int n) { super.init(x, y, z, n); if (y == null) throw new IllegalArgumentException("No components to divide"); } }
apache-2.0
Top-Q/jsystem
jsystem-core-projects/jsystemApp/src/main/java/jsystem/treeui/reporter/TestReportModel.java
3794
/* * Copyright 2005-2010 Ignis Software Tools Ltd. All rights reserved. */ package jsystem.treeui.reporter; import java.util.ArrayList; import javax.swing.table.DefaultTableModel; import jsystem.framework.report.Reporter; /** * TestReportModel This class implements the table model */ public class TestReportModel extends DefaultTableModel { /** * */ private static final long serialVersionUID = 1L; private String[] ColumnList = { "Commands", "Status" }; private final int ALL = 0; private final int NEW = 1; private final int SAME = 2; private final int MAX_DEVICE_TABLE_ROW = 10000; private final int MAX_DEVICE_TABLE_COLUMN = ColumnList.length; private final int MAX_ALLOWED_CACHE = 2000; private final int FIRST = 0; private final int NONE = 0; private final int OK = 1; private final int FAIL = -1; private final int WARNING = 2; private final int COMMAND = 0; private final int STATUS = 1; // Data private ArrayList<TestReportCommand> allDataList; public TestReportModel() { allDataList = new ArrayList<TestReportCommand>(); } public String getColumnName(int column) { if (column > MAX_DEVICE_TABLE_COLUMN) { return null; } return ColumnList[column]; } public Object getValueAt(int iRowIndex, int iColumnIndex) { if (iRowIndex >= allDataList.size()) { return null; } TestReportCommand command = (TestReportCommand) allDataList.get(iRowIndex); switch (iColumnIndex) { case COMMAND: return command.command; case STATUS: switch (command.status) { case NONE: return ""; case OK: return "Pass"; case FAIL: return "Fail"; case WARNING: return "Warning"; default: return ""; } default: return null; } } public void setValueAt(Object aValue, int iRowIndex, int iColumnIndex) { if (iRowIndex > MAX_DEVICE_TABLE_ROW) { return; } if (allDataList == null) { return; } if (iRowIndex >= allDataList.size()) { return; } switch (iColumnIndex) { case COMMAND: return; case STATUS: return; default: return; } } public int getColumnCount() { return MAX_DEVICE_TABLE_COLUMN; } public int getRowCount() { if (allDataList == null) { return 0; } return allDataList.size(); } public boolean isCellEditable(int row, int column) { return false; } private void handleDataChange(int type) { switch (type) { case ALL: fireTableDataChanged(); break; case NEW: fireTableRowsInserted(allDataList.size() - 1, allDataList.size() - 1); break; case SAME: fireTableCellUpdated(STATUS, allDataList.size() - 1); } } public void clearModel() { allDataList.clear(); handleDataChange(ALL); } public int addCommand(String command) { TestReportCommand newCommand = new TestReportCommand(command, NONE); if (allDataList.size() == MAX_ALLOWED_CACHE) { int index = 0; while (index < (MAX_ALLOWED_CACHE / 2)) { allDataList.remove(FIRST); index++; } } allDataList.add(newCommand); handleDataChange(NEW); return (allDataList.size() - 1); } /** * * @param index * @param status - the status is passed on from the <I>Reporter</I> class * @param bold */ public void addStatus(int index, int status, boolean bold) { TestReportCommand command = (TestReportCommand) allDataList.get(index); if (command != null) { command.bold = bold; if (status == Reporter.PASS) { command.status = OK; } else if (status == Reporter.WARNING){ command.status = WARNING; } else{ command.status = FAIL ; } handleDataChange(SAME); } } public TestReportCommand getCommandAt(int index) { if (index >= allDataList.size()) { return null; } return (TestReportCommand) allDataList.get(index); } }
apache-2.0
slacrey/heron-project
heron-core/src/main/java/com/loadburn/heron/converter/NumberConverters.java
1866
package com.loadburn.heron.converter; import com.loadburn.heron.utils.converter.Converter; import java.math.BigDecimal; import java.math.BigInteger; import java.util.ArrayList; import java.util.List; /** * @author slacrey (scstlinfeng@yahoo.com) * Date: 13-10-26 */ public class NumberConverters { public static List<Converter<?, ?>> converters() { List<Converter<?, ?>> converters = new ArrayList<Converter<?, ?>>(); converters.add(new ConverterAdaptor<Number, Integer>() { public Integer to(Number source) { return Integer.valueOf(source.intValue()); } }); converters.add(new ConverterAdaptor<Number, Long>() { public Long to(Number source) { return Long.valueOf(source.longValue()); } }); converters.add(new ConverterAdaptor<Number, Float>() { public Float to(Number source) { return Float.valueOf(source.floatValue()); } }); converters.add(new ConverterAdaptor<Number, Double>() { public Double to(Number source) { return Double.valueOf(source.doubleValue()); } }); converters.add(new ConverterAdaptor<Number, Short>() { public Short to(Number source) { return Short.valueOf(source.shortValue()); } }); converters.add(new ConverterAdaptor<Number, BigInteger>() { public BigInteger to(Number source) { return BigInteger.valueOf(source.longValue()); } }); converters.add(new ConverterAdaptor<Number, BigDecimal>() { public BigDecimal to(Number source) { return BigDecimal.valueOf(source.doubleValue()); } }); return converters; } }
apache-2.0
fsarradin/Broceliand
src/main/java/net/kerflyn/broceliand/model/charge/NoShippingCharge.java
885
/* * Copyright 2012 François Sarradin <fsarradin AT gmail DOT com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.kerflyn.broceliand.model.charge; import javax.persistence.DiscriminatorValue; import javax.persistence.Entity; @Entity @DiscriminatorValue("NONE") public class NoShippingCharge extends ShippingChargeStrategy { }
apache-2.0
webos21/xi
java/jcl/src/java/org/w3c/dom/DOMImplementation.java
8069
/* * Copyright (c) 2004 World Wide Web Consortium, * * (Massachusetts Institute of Technology, European Research Consortium for * Informatics and Mathematics, Keio University). All Rights Reserved. This * work is distributed under the W3C(r) Software License [1] in the hope that * it will be useful, but WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * [1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231 */ package org.w3c.dom; /** * The <code>DOMImplementation</code> interface provides a number of methods for * performing operations that are independent of any particular instance of the * document object model. * <p> * See also the <a * href='http://www.w3.org/TR/2004/REC-DOM-Level-3-Core-20040407'>Document * Object Model (DOM) Level 3 Core Specification</a>. */ public interface DOMImplementation { /** * Test if the DOM implementation implements a specific feature and version, * as specified in . * * @param feature * The name of the feature to test. * @param version * This is the version number of the feature to test. * @return <code>true</code> if the feature is implemented in the specified * version, <code>false</code> otherwise. */ public boolean hasFeature(String feature, String version); /** * Creates an empty <code>DocumentType</code> node. Entity declarations and * notations are not made available. Entity reference expansions and default * attribute additions do not occur.. * * @param qualifiedName * The qualified name of the document type to be created. * @param publicId * The external subset public identifier. * @param systemId * The external subset system identifier. * @return A new <code>DocumentType</code> node with * <code>Node.ownerDocument</code> set to <code>null</code>. * @exception DOMException * INVALID_CHARACTER_ERR: Raised if the specified qualified * name is not an XML name according to [<a * href='http://www.w3.org/TR/2004/REC-xml-20040204'>XML * 1.0</a>]. <br> * NAMESPACE_ERR: Raised if the <code>qualifiedName</code> is * malformed. <br> * NOT_SUPPORTED_ERR: May be raised if the implementation * does not support the feature "XML" and the language * exposed through the Document does not support XML * Namespaces (such as [<a href= * 'http://www.w3.org/TR/1999/REC-html401-19991224/'>HTML * 4.01</a>]). * @since DOM Level 2 */ public DocumentType createDocumentType(String qualifiedName, String publicId, String systemId) throws DOMException; /** * Creates a DOM Document object of the specified type with its document * element. <br> * Note that based on the <code>DocumentType</code> given to create the * document, the implementation may instantiate specialized * <code>Document</code> objects that support additional features than the * "Core", such as "HTML" [<a * href='http://www.w3.org/TR/2003/REC-DOM-Level-2-HTML-20030109'>DOM Level * 2 HTML</a>] . On the other hand, setting the <code>DocumentType</code> * after the document was created makes this very unlikely to happen. * Alternatively, specialized <code>Document</code> creation methods, such * as <code>createHTMLDocument</code> [<a * href='http://www.w3.org/TR/2003/REC-DOM-Level-2-HTML-20030109'>DOM Level * 2 HTML</a>] , can be used to obtain specific types of * <code>Document</code> objects. * * @param namespaceURI * The namespace URI of the document element to create or * <code>null</code>. * @param qualifiedName * The qualified name of the document element to be created or * <code>null</code>. * @param doctype * The type of document to be created or <code>null</code>. When * <code>doctype</code> is not <code>null</code>, its * <code>Node.ownerDocument</code> attribute is set to the * document being created. * @return A new <code>Document</code> object with its document element. If * the <code>NamespaceURI</code>, <code>qualifiedName</code>, and * <code>doctype</code> are <code>null</code>, the returned * <code>Document</code> is empty with no document element. * @exception DOMException * INVALID_CHARACTER_ERR: Raised if the specified qualified * name is not an XML name according to [<a * href='http://www.w3.org/TR/2004/REC-xml-20040204'>XML * 1.0</a>]. <br> * NAMESPACE_ERR: Raised if the <code>qualifiedName</code> is * malformed, if the <code>qualifiedName</code> has a prefix * and the <code>namespaceURI</code> is <code>null</code>, or * if the <code>qualifiedName</code> is <code>null</code> and * the <code>namespaceURI</code> is different from * <code>null</code>, or if the <code>qualifiedName</code> * has a prefix that is "xml" and the * <code>namespaceURI</code> is different from "<a * href='http://www.w3.org/XML/1998/namespace'> * http://www.w3.org/XML/1998/namespace</a>" [<a href= * 'http://www.w3.org/TR/1999/REC-xml-names-19990114/'>XML * Namespaces</a>] , or if the DOM implementation does not * support the <code>"XML"</code> feature but a non-null * namespace URI was provided, since namespaces were defined * by XML. <br> * WRONG_DOCUMENT_ERR: Raised if <code>doctype</code> has * already been used with a different document or was created * from a different implementation. <br> * NOT_SUPPORTED_ERR: May be raised if the implementation * does not support the feature "XML" and the language * exposed through the Document does not support XML * Namespaces (such as [<a href= * 'http://www.w3.org/TR/1999/REC-html401-19991224/'>HTML * 4.01</a>]). * @since DOM Level 2 */ public Document createDocument(String namespaceURI, String qualifiedName, DocumentType doctype) throws DOMException; /** * This method returns a specialized object which implements the specialized * APIs of the specified feature and version, as specified in . The * specialized object may also be obtained by using binding-specific casting * methods but is not necessarily expected to, as discussed in . This method * also allow the implementation to provide specialized objects which do not * support the <code>DOMImplementation</code> interface. * * @param feature * The name of the feature requested. Note that any plus sign "+" * prepended to the name of the feature will be ignored since it * is not significant in the context of this method. * @param version * This is the version number of the feature to test. * @return Returns an object which implements the specialized APIs of the * specified feature and version, if any, or <code>null</code> if * there is no object which implements interfaces associated with * that feature. If the <code>DOMObject</code> returned by this * method implements the <code>DOMImplementation</code> interface, * it must delegate to the primary core * <code>DOMImplementation</code> and not return results * inconsistent with the primary core <code>DOMImplementation</code> * such as <code>hasFeature</code>, <code>getFeature</code>, etc. * @since DOM Level 3 */ public Object getFeature(String feature, String version); }
apache-2.0
nmldiegues/stibt
infinispan/core/src/main/java/org/infinispan/distribution/group/GroupingConsistentHash.java
3779
/* * JBoss, Home of Professional Open Source * Copyright 2012 Red Hat Inc. and/or its affiliates and other contributors * as indicated by the @author tags. All rights reserved. * See the copyright.txt in the distribution for a * full listing of individual contributors. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License, v. 2.1. * This program is distributed in the hope that it will be useful, but WITHOUT A * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A * PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public License, * v.2.1 along with this distribution; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ package org.infinispan.distribution.group; import org.infinispan.commons.hash.Hash; import org.infinispan.distribution.ch.ConsistentHash; import org.infinispan.remoting.transport.Address; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; /** * {@link ConsistentHash} wrapper that groups keys to the same node based on their @{@link Group} * annotation. * <p/> * It uses a {@link GroupManager} to determine the group key from annotations. * * @author Dan Berindei * @since 5.2 */ public class GroupingConsistentHash implements ConsistentHash { private ConsistentHash ch; private GroupManager groupManager; public GroupingConsistentHash(ConsistentHash ch, GroupManager groupManager) { this.ch = ch; this.groupManager = groupManager; } @Override public int getNumSegments() { return ch.getNumSegments(); } @Override public int getNumOwners() { return ch.getNumOwners(); } @Override public Hash getHashFunction() { return null; } @Override public List<Address> getMembers() { return ch.getMembers(); } @Override public int getSegment(Object key) { return ch.getSegment(getGroupKey(key)); } @Override public List<Address> locateOwnersForSegment(int segmentId) { return ch.locateOwnersForSegment(segmentId); } @Override public Address locatePrimaryOwnerForSegment(int segmentId) { return ch.locatePrimaryOwnerForSegment(segmentId); } @Override public Set<Integer> getSegmentsForOwner(Address owner) { return ch.getSegmentsForOwner(owner); } @Override public String getRoutingTableAsString() { return ch.getRoutingTableAsString(); } @Override public Address locatePrimaryOwner(Object key) { return ch.locatePrimaryOwner(getGroupKey(key)); } @Override public List<Address> locateOwners(Object key) { return ch.locateOwners(getGroupKey(key)); } @Override public Set<Address> locateAllOwners(Collection<Object> keys) { HashSet<Address> result = new HashSet<Address>(); for (Object key : keys) { result.addAll(this.locateOwners(getGroupKey(key))); } return result; } @Override public boolean isKeyLocalToNode(Address nodeAddress, Object key) { return ch.isKeyLocalToNode(nodeAddress, getGroupKey(key)); } private Object getGroupKey(Object key) { Object finalKey = key; String groupKey = groupManager.getGroup(key); if (groupKey != null) { finalKey = groupKey; } return finalKey; } @Override public String toString() { return "GroupingConsistentHash:" + ch; } public final ConsistentHash getConsistentHash() { return ch; } }
apache-2.0
parstream/parstream-storm
bolt/src/main/java/com/parstream/adaptor/storm/config/Configuration.java
1488
/** * Copyright 2015 ParStream GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.parstream.adaptor.storm.config; import java.io.Serializable; import java.util.Map; /** * ParStream's storm bolt configuration POJ. */ public class Configuration implements Serializable { private static final long serialVersionUID = 1L; /* * ParStream database connection */ public String host; public String port; public String username; public String password; /* * Column Mapping */ public String tableName; public boolean failOnMissingField = true; public Map<String, String> columnToField; /* * Auto Commit */ public Long autoCommitCount; public Long autoCommitTimespan; public CommitOrder commitOrder = CommitOrder.INPUT_FIRST; @Override public String toString() { return "Configuration(host=" + host + ",port=" + port + ",tableName=" + tableName + ")"; } }
apache-2.0
vyadh/donkeysql
src/test/java/org/softpres/donkeysql/params/IndexedParamQueryTest.java
2518
/* * Copyright (c) 2017, Kieron Wilkinson */ package org.softpres.donkeysql.params; import org.junit.Test; import org.softpres.donkeysql.UncheckedSQLException; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.catchThrowable; import static org.softpres.donkeysql.params.IndexedParamQuery.count; import static org.softpres.donkeysql.params.IndexedParamQuery.humanise; /** * Unit tests for {@link IndexedParamQuery}. */ public class IndexedParamQueryTest { private final Object[] noParams = { }; @Test public void countNormalisedParameters() { assertThat(count("SELECT * FROM t WHERE a = 'id'")).isZero(); assertThat(count("SELECT * FROM t WHERE a = ?")).isEqualTo(1); assertThat(count("SELECT * FROM t WHERE a = ? AND b = ?")).isEqualTo(2); assertThat(count("SELECT * FROM t WHERE a = ? AND b = '?' AND c = ?")).isEqualTo(2); } @Test public void humaniseWithNoParameters() { assertThat( humanise("SELECT * FROM table WHERE column = 'id'", noParams)) .isEqualTo("SELECT * FROM table WHERE column = 'id'"); } @Test public void humaniseWithNumericParameters() { assertThat( humanise("SELECT * FROM table WHERE id = ? AND size > ?", 1, 100)) .isEqualTo("SELECT * FROM table WHERE id = 1 AND size > 100"); } @Test public void humaniseWithNonNumericParameters() { assertThat( humanise("SELECT * FROM table WHERE column = ? AND type > ?", "name", "varchar")) .isEqualTo("SELECT * FROM table WHERE column = 'name' AND type > 'varchar'"); } @Test public void humaniseWithQuotedMarksNotUsedAsParameter() { assertThat( humanise("WHERE a > ? AND b = '?' AND c = ?", 1, "2")) .isEqualTo("WHERE a > 1 AND b = '?' AND c = '2'"); } @Test public void humaniseWithTooFewParameters() { Throwable error = catchThrowable(() -> humanise("SELECT * FROM table WHERE column = ? AND size = ?", 1)); assertThat(error) .isInstanceOf(UncheckedSQLException.class) .hasMessageStartingWith("Parameters supplied do not correspond to SQL statement"); } @Test public void humaniseWithTooManyParameters() { Throwable error = catchThrowable(() -> humanise("SELECT * FROM table WHERE column = ?", 1, 2)); assertThat(error) .isInstanceOf(UncheckedSQLException.class) .hasMessageStartingWith("Parameters supplied do not correspond to SQL statement"); } }
apache-2.0
magnusmickelsson/pokeraidbot
src/main/java/pokeraidbot/Utils.java
16313
package pokeraidbot; import com.jagrosh.jdautilities.command.CommandEvent; import net.dv8tion.jda.api.entities.User; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.Validate; import pokeraidbot.domain.config.ClockService; import pokeraidbot.domain.config.LocaleService; import pokeraidbot.domain.errors.UserMessedUpException; import pokeraidbot.domain.gym.Gym; import pokeraidbot.domain.pokemon.Pokemon; import pokeraidbot.domain.pokemon.PokemonRepository; import pokeraidbot.domain.pokemon.PokemonTypes; import pokeraidbot.domain.pokemon.ResistanceTable; import pokeraidbot.domain.raid.PokemonRaidStrategyService; import pokeraidbot.domain.raid.Raid; import pokeraidbot.domain.raid.signup.SignUp; import java.net.SocketException; import java.net.SocketTimeoutException; import java.time.DateTimeException; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeParseException; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.Set; public class Utils { public static final DateTimeFormatter timeParseFormatter = DateTimeFormatter.ofPattern("H[:][.]mm"); public static final DateTimeFormatter dateAndTimeParseFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH[:][.]mm"); public static final DateTimeFormatter timePrintFormatter = DateTimeFormatter.ofPattern("HH:mm"); public static final DateTimeFormatter dateAndTimePrintFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm"); public static final int HIGH_LIMIT_FOR_SIGNUPS = 20; public static final int RAID_DURATION_IN_MINUTES = 45; public static final int EX_RAID_DURATION_IN_MINUTES = 45; private static final String EX_RAID_BOSS = "deoxys"; private static ClockService clockService = new ClockService(); private static ResistanceTable resistanceTable = new ResistanceTable(); public static ClockService getClockService() { return clockService; } public static void setClockService(ClockService clockService) { Utils.clockService = clockService; } public static String printDateTime(LocalDateTime dateTime) { return dateTime.format(dateAndTimePrintFormatter); } public static String printTime(LocalTime time) { return time.format(timePrintFormatter); } public static String printTimeIfSameDay(LocalDateTime dateAndTime) { if (dateAndTime.toLocalDate().isEqual(LocalDate.now())) { return dateAndTime.toLocalTime().format(timePrintFormatter); } else { return printDateTime(dateAndTime); } } public static boolean isTypeDoubleStrongVsPokemonWithTwoTypes(String pokemonTypeOne, String pokemonTypeTwo, String typeToCheck) { Validate.notEmpty(pokemonTypeOne); Validate.notEmpty(pokemonTypeTwo); Validate.notEmpty(typeToCheck); if (typeIsStrongVsPokemon(pokemonTypeOne, typeToCheck) && typeIsStrongVsPokemon(pokemonTypeTwo, typeToCheck)) { return true; } else { return false; } } public static Set<String> getWeaknessesFor(PokemonTypes pokemonType) { return resistanceTable.getWeaknesses(pokemonType); } public static boolean typeIsStrongVsPokemon(String pokemonType, String typeToCheck) { return resistanceTable.typeIsStrongVsPokemon(pokemonType, typeToCheck); } public static void assertSignupTimeNotBeforeRaidStartAndNow(User user, LocalDateTime dateAndTime, LocalDateTime endOfRaid, LocaleService localeService, boolean isExRaid) { final LocalDateTime startOfRaid = getStartOfRaid(endOfRaid, isExRaid); final LocalDateTime now = clockService.getCurrentDateTime(); assertSignupTimeNotBeforeRaidStart(user, dateAndTime, endOfRaid, localeService, isExRaid); if (dateAndTime.isBefore(now)) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.SIGN_BEFORE_NOW, localeService.getLocaleForUser(user), printTimeIfSameDay(dateAndTime), printTimeIfSameDay(now))); } } public static LocalDateTime getStartOfRaid(LocalDateTime endOfRaid, boolean isExRaid) { return isExRaid ? endOfRaid.minusMinutes(EX_RAID_DURATION_IN_MINUTES) : endOfRaid.minusMinutes(RAID_DURATION_IN_MINUTES); } public static void assertSignupTimeNotBeforeRaidStart(User user, LocalDateTime dateAndTime, LocalDateTime endOfRaid, LocaleService localeService, boolean isExRaid) { final LocalDateTime startOfRaid = getStartOfRaid(endOfRaid, isExRaid); if (dateAndTime.isBefore(startOfRaid)) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.SIGN_BEFORE_RAID, localeService.getLocaleForUser(user), printTimeIfSameDay(dateAndTime), printTimeIfSameDay(startOfRaid))); } } public static void assertGroupTimeNotBeforeNow(User user, LocalDateTime dateAndTime, LocaleService localeService) { final LocalDateTime now = clockService.getCurrentDateTime(); if (dateAndTime.isBefore(now)) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.NO_GROUP_BEFORE_NOW, localeService.getLocaleForUser(user), printTimeIfSameDay(dateAndTime), printTimeIfSameDay(now))); } } public static void assertCreateRaidTimeNotBeforeNow(User user, LocalDateTime dateAndTime, LocaleService localeService) { final LocalDateTime now = clockService.getCurrentDateTime(); if (dateAndTime.isBefore(now)) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.TIMEZONE, localeService.getLocaleForUser(user), printTimeIfSameDay(dateAndTime), printTimeIfSameDay(now))); } } public static void assertTimeNotInNoRaidTimespan(User user, LocalTime time, LocaleService localeService) { if (time.isAfter(LocalTime.of(23, 0)) || time.isBefore(LocalTime.of(5, 0))) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.NO_RAIDS_NOW, localeService.getLocaleForUser(user), printTime(time))); } } public static void assertTimeNotMoreThanXHoursFromNow(User user, LocalTime time, LocaleService localeService, Integer hours) { final LocalTime now = clockService.getCurrentTime(); if (now.isBefore(LocalTime.of(22, 0)) && now.plusHours(2).isBefore(time)) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.NO_RAID_TOO_LONG, localeService.getLocaleForUser(user), printTime(time), printTime(now), String.valueOf(hours))); } } public static void assertEtaNotAfterRaidEnd(User user, Raid raid, LocalDateTime eta, LocaleService localeService) { if (eta.isAfter(raid.getEndOfRaid())) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.NO_ETA_AFTER_RAID, localeService.getLocaleForUser(user), printTimeIfSameDay(eta), printTimeIfSameDay(raid.getEndOfRaid()))); } } public static String getStaticMapUrl(Gym gym) { // todo: host marker png via pokeraidbot web String url = "https://maps.googleapis.com/maps/api/staticmap?center=" + gym.getX() + "," + gym.getY() + "&zoom=14&size=400x400&maptype=roadmap&markers=icon:http://millert.se/pogo/marker_xsmall.png%7C" + gym.getX() + "," + gym.getY() + "&key=AIzaSyAZm7JLojr2KaUvkeHEpHh0Y-zPwP3dpCU"; return url; } public static String getNonStaticMapUrl(Gym gym) { String url = "http://www.google.com/maps?q=" + gym.getX() + "," + gym.getY(); return url; } public static String printWeaknesses(Pokemon pokemon) { Set<String> weaknessesToPrint = new LinkedHashSet<>(); final Set<String> typeSet = pokemon.getTypes().getTypeSet(); for (String weakness : pokemon.getWeaknesses()) { if (typeSet.size() > 1) { final Iterator<String> iterator = typeSet.iterator(); if (isTypeDoubleStrongVsPokemonWithTwoTypes(iterator.next(), iterator.next(), weakness)) { weaknessesToPrint.add("**" + weakness + "**"); } else { weaknessesToPrint.add(weakness); } } else { weaknessesToPrint.add(weakness); } } return StringUtils.join(weaknessesToPrint, ", "); } public static boolean isSamePokemon(String pokemonName, String existingEntityPokemon) { return pokemonName.equalsIgnoreCase(existingEntityPokemon); } public static boolean raidsCollide(LocalDateTime endOfRaid, boolean isExRaid, LocalDateTime endOfRaidTwo, boolean isExRaidTwo) { LocalDateTime startTime = getStartOfRaid(endOfRaid, isExRaid); LocalDateTime startTimeTwo = getStartOfRaid(endOfRaidTwo, isExRaidTwo); return isInInterval(startTime, endOfRaid, startTimeTwo, endOfRaidTwo) || isInInterval(startTimeTwo, endOfRaidTwo, startTime, endOfRaid); } private static boolean isInInterval(LocalDateTime startTime, LocalDateTime endOfRaid, LocalDateTime startTimeTwo, LocalDateTime endOfRaidTwo) { return (startTime.isAfter(startTimeTwo) && startTime.isBefore(endOfRaidTwo)) || (endOfRaid.isBefore(endOfRaidTwo) && endOfRaid.isAfter(startTimeTwo)); } public static boolean isRaidExPokemon(String pokemonName, PokemonRaidStrategyService strategyService, PokemonRepository pokemonRepository) { Pokemon pokemon = pokemonRepository.getByName(pokemonName); return strategyService.getRaidInfo(pokemon).getBossTier() == 5; } public static LocalTime parseTime(User user, String timeString, LocaleService localeService) { LocalTime endsAtTime; try { timeString = preProcessTimeString(timeString); endsAtTime = LocalTime.parse(timeString, Utils.timeParseFormatter); } catch (DateTimeParseException | NullPointerException e) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.BAD_DATETIME_FORMAT, localeService.getLocaleForUser(user), "HH:MM", timeString)); } return endsAtTime; } public static Integer assertNotTooManyOrNoNumber(User user, LocaleService localeService, String people) { Integer numberOfPeople; try { numberOfPeople = new Integer(people); if (numberOfPeople < 1 || numberOfPeople > HIGH_LIMIT_FOR_SIGNUPS) { throw new RuntimeException(); } } catch (RuntimeException e) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.ERROR_PARSE_PLAYERS, localeService.getLocaleForUser(user), people, String.valueOf(HIGH_LIMIT_FOR_SIGNUPS))); } return numberOfPeople; } public static LocalDate parseDate(User user, String dateString, LocaleService localeService) { LocalDate theDate; try { theDate = LocalDate.parse(dateString); } catch (DateTimeException | NullPointerException e) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.BAD_DATETIME_FORMAT, localeService.getLocaleForUser(user), "yyyy-MM-dd", dateString)); } return theDate; } public static Set<String> getNamesOfThoseWithSignUps(Set<SignUp> signUpsAt, boolean includeEta) { final Set<String> signUpNames; signUpNames = new LinkedHashSet<>(); for (SignUp signUp : signUpsAt) { if (signUp.getHowManyPeople() > 0) { String text = signUp.getUserName() + " (**" + signUp.getHowManyPeople(); if (includeEta) { text = text + ", ETA " + printTime(signUp.getArrivalTime()); } text = text + "**)"; signUpNames.add(text); } } return signUpNames; } public static String getPokemonIcon(Pokemon pokemon) { if (!pokemon.isEgg()) { return "https://pokemongohub.net/sprites/normal/" + pokemon.getNumber() + ".png"; } else { return "https://pokeraidbot2.herokuapp.com/img/" + pokemon.getName().toLowerCase() + ".png"; } } public static String[] prepareArguments(CommandEvent commandEvent) { return commandEvent.getArgs().replaceAll("\\s{2,4}", " ").split(" "); } public static boolean isRaidEx(Raid raid, PokemonRaidStrategyService strategyService, PokemonRepository pokemonRepository) { return isRaidExPokemon(raid.getPokemon().getName(), strategyService, pokemonRepository); } public static String preProcessTimeString(String timeString) { if (timeString != null && timeString.matches("[0-9]{3,4}")) { return new StringBuilder(timeString).insert(timeString.length()-2, ":").toString(); } else { return timeString; } } public static void assertGroupStartNotBeforeRaidStart(LocalDateTime raidStart, LocalDateTime groupStart, User user, LocaleService localeService) { if (raidStart.isAfter(groupStart)) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.NO_GROUP_BEFORE_RAID, localeService.getLocaleForUser(user), printTimeIfSameDay(groupStart), printTimeIfSameDay(raidStart))); } } public static void assertTimeInRaidTimespan(User user, LocalDateTime dateTimeToCheck, Raid raid, LocaleService localeService) { final LocalDateTime startOfRaid = getStartOfRaid(raid.getEndOfRaid(), raid.isExRaid()); final boolean timeIsSameOrBeforeEnd = raid.getEndOfRaid().isAfter(dateTimeToCheck) || raid.getEndOfRaid().equals(dateTimeToCheck); final boolean timeIsSameOrAfterStart = startOfRaid.isBefore(dateTimeToCheck) || startOfRaid.equals(dateTimeToCheck); if (!(timeIsSameOrBeforeEnd && timeIsSameOrAfterStart)) { throw new UserMessedUpException(user, localeService.getMessageFor(LocaleService.TIME_NOT_IN_RAID_TIMESPAN, localeService.getLocaleForUser(user), printDateTime(dateTimeToCheck), printDateTime(startOfRaid), printTimeIfSameDay(raid.getEndOfRaid()))); } } public static Set<String> getResistantTo(PokemonTypes pokemonTypes) { return resistanceTable.getResistantTo(pokemonTypes); } public static boolean isExceptionOrCauseNetworkIssues(Throwable t) { return t != null && (t.getMessage().contains("SocketTimeoutException") || (isInstanceOfSocketException(t) || (t.getCause() != null && isInstanceOfSocketException(t.getCause())))); } private static boolean isInstanceOfSocketException(Throwable t) { return (t instanceof SocketException) || (t instanceof SocketTimeoutException); } }
apache-2.0
sonyfe25cp/ParseEverything
src/main/java/com/parseeverything/html/IndonesiaShangBaoParser.java
2375
/** * */ package com.parseeverything.html; /** *解析印度尼西亚商报 * @author Mi Jing * @date 2014-3-18 */ import java.util.regex.Matcher; import java.util.regex.Pattern; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.parseeverything.result.NewsModel; import com.parseeverything.utils.NewsProvider; public class IndonesiaShangBaoParser extends NewsPageParser { static Logger logger = LoggerFactory .getLogger(IndonesiaShangBaoParser.class); static Pattern datePattern = Pattern .compile("(\\w+\\s*\\d{1,2}\\w+\\,\\s*\\d{4})"); @Override public boolean match(String url) { return url.contains("shangbaoindonesia.com"); } @Override public NewsModel parse(String url, String html) { Document doc = Jsoup.parse(html); String title = doc.select(".singlePageTitle").text(); Element contentElement = doc.select(".post").first(); String content = ""; String contentHtml = ""; for (Element part : contentElement.children()) { if (part.tagName().equals("p")) { content += part.text(); contentHtml +="<p>"+ part.html()+"</p>"; } } Elements select = contentElement.select(".postDate"); String datetotal = select.text(); Matcher m = datePattern.matcher(datetotal); if (m.find()) { datetotal = m.group(1); } String[] s1 = datetotal.split(" "); String year = s1[2]; String month = s1[0]; String day = s1[1].substring(0, 2); switch (month) { case "January": month = "01"; break; case "February": month = "02"; break; case "March": month = "03"; break; case "April": month = "04"; break; case "May": month = "05"; break; case "June": month = "06"; break; case "July": month = "07"; break; case "August": month = "08"; break; case "September": month = "09"; break; case "October": month = "10"; break; case "November": month = "11"; break; default: month = "12"; break; } String date = year + "-" + month + "-" + day; NewsModel model = new NewsModel(url, NewsProvider.IndonesiShangBao); model.setTitle(title); model.setContent(content); model.setContentHtml(contentHtml); model.setPublishDate(date); model.setHtml(html); return model; } }
apache-2.0
multi-os-engine/moe-core
moe.apple/moe.platform.ios/src/main/java/apple/accelerate/struct/vDSP_uint24.java
1516
/* Copyright 2014-2016 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package apple.accelerate.struct; import org.moe.natj.c.StructObject; import org.moe.natj.c.ann.Structure; import org.moe.natj.c.ann.StructureField; import org.moe.natj.general.NatJ; import org.moe.natj.general.Pointer; import org.moe.natj.general.ann.Generated; @Generated @Structure() public final class vDSP_uint24 extends StructObject { private static long __natjCache; static { NatJ.register(); } @Generated public vDSP_uint24() { super(vDSP_uint24.class); } @Generated protected vDSP_uint24(Pointer peer) { super(peer); } /** * Unsigned 24-bit integer. */ @Generated @StructureField(order = 0, isGetter = true, count = 3) public native byte bytes(int field_idx); /** * Unsigned 24-bit integer. */ @Generated @StructureField(order = 0, isGetter = false, count = 3) public native void setBytes(byte value, int field_idx); }
apache-2.0
brokge/AndroidUtilClass
util/IoUtils.java
4873
package com.dxy.android.statistics.util; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; import java.io.Writer; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.zip.Checksum; /** * Utils for dealing with IO (streams, readers, ...). * * chenlw@dxyer.com * Created by chenlw on 2015/6/9. */ public class IoUtils { private static final int BUFFER_SIZE = 4096; public static byte[] readAllBytes(InputStream in) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); copyAllBytes(in, out); return out.toByteArray(); } public static byte[] readAllBytesAndClose(InputStream in) throws IOException { try { return readAllBytes(in); } finally { safeClose(in); } } public static String readAllChars(Reader reader) throws IOException { char[] buffer = new char[2048]; StringBuilder builder = new StringBuilder(); while (true) { int read = reader.read(buffer); if (read == -1) { break; } builder.append(buffer, 0, read); } return builder.toString(); } public static String readAllCharsAndClose(Reader reader) throws IOException { try { return readAllChars(reader); } finally { safeClose(reader); } } public static void writeAllCharsAndClose(Writer writer, CharSequence text) throws IOException { try { writer.append(text); } finally { safeClose(writer); } } public static void updateChecksum(InputStream in, Checksum checksum) throws IOException { byte[] buffer = new byte[BUFFER_SIZE]; while (true) { int read = in.read(buffer); if (read == -1) { break; } checksum.update(buffer, 0, read); } } /** @return MD5 digest (32 characters). */ public static String getMd5(InputStream in) throws IOException { byte[] digest = getDigest(in, "MD5"); return StringUtils.toHexString(digest, 32); } /** @return SHA-1 digest (40 characters). */ public static String getSha1(InputStream in) throws IOException { byte[] digest = getDigest(in, "SHA-1"); return StringUtils.toHexString(digest, 40); } public static byte[] getDigest(InputStream in, String digestAlgo) throws IOException { MessageDigest digester; try { digester = MessageDigest.getInstance(digestAlgo); } catch (NoSuchAlgorithmException nsae) { throw new RuntimeException(nsae); } byte[] buffer = new byte[BUFFER_SIZE]; while (true) { int read = in.read(buffer); if (read == -1) { break; } digester.update(buffer, 0, read); } return digester.digest(); } /** * Copies all available data from in to out without closing any stream. * * @return number of bytes copied */ public static int copyAllBytes(InputStream in, OutputStream out) throws IOException { int byteCount = 0; byte[] buffer = new byte[BUFFER_SIZE]; while (true) { int read = in.read(buffer); if (read == -1) { break; } out.write(buffer, 0, read); byteCount += read; } return byteCount; } /** Closes the given stream inside a try/catch. Does nothing if stream is null. */ public static void safeClose(InputStream in) { if (in != null) { try { in.close(); } catch (IOException e) { // Silent } } } /** Closes the given stream inside a try/catch. Does nothing if stream is null. */ public static void safeClose(OutputStream out) { if (out != null) { try { out.close(); } catch (IOException e) { // Silent } } } /** Closes the given stream inside a try/catch. Does nothing if stream is null. */ public static void safeClose(Reader in) { if (in != null) { try { in.close(); } catch (IOException e) { // Silent } } } /** Closes the given stream inside a try/catch. Does nothing if stream is null. */ public static void safeClose(Writer out) { if (out != null) { try { out.close(); } catch (IOException e) { // Silent } } } }
apache-2.0
shadowfox-ninja/ShadowUtils
app/src/main/java/tech/shadowfox/shadow/MainActivity.java
334
package tech.shadowfox.shadow; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; public class MainActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); } }
apache-2.0
joakimsahlstrom/twyn
src/main/java/se/jsa/twyn/ArrayIndex.java
899
/* * Copyright 2015 Joakim Sahlström * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package se.jsa.twyn; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.METHOD) public @interface ArrayIndex { int value(); }
apache-2.0
fpompermaier/onvif
onvif-ws-client/src/main/java/org/onvif/ver10/schema/ImageStabilization.java
4294
package org.onvif.ver10.schema; import java.util.HashMap; import java.util.Map; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAnyAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; import javax.xml.namespace.QName; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.cxf.xjc.runtime.JAXBToStringStyle; /** * <p>Java class for ImageStabilization complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="ImageStabilization"&gt; * &lt;complexContent&gt; * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"&gt; * &lt;sequence&gt; * &lt;element name="Mode" type="{http://www.onvif.org/ver10/schema}ImageStabilizationMode"/&gt; * &lt;element name="Level" type="{http://www.w3.org/2001/XMLSchema}float" minOccurs="0"/&gt; * &lt;element name="Extension" type="{http://www.onvif.org/ver10/schema}ImageStabilizationExtension" minOccurs="0"/&gt; * &lt;/sequence&gt; * &lt;anyAttribute processContents='lax'/&gt; * &lt;/restriction&gt; * &lt;/complexContent&gt; * &lt;/complexType&gt; * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "ImageStabilization", propOrder = { "mode", "level", "extension" }) public class ImageStabilization { @XmlElement(name = "Mode", required = true) @XmlSchemaType(name = "string") protected ImageStabilizationMode mode; @XmlElement(name = "Level") protected Float level; @XmlElement(name = "Extension") protected ImageStabilizationExtension extension; @XmlAnyAttribute private Map<QName, String> otherAttributes = new HashMap<QName, String>(); /** * Gets the value of the mode property. * * @return * possible object is * {@link ImageStabilizationMode } * */ public ImageStabilizationMode getMode() { return mode; } /** * Sets the value of the mode property. * * @param value * allowed object is * {@link ImageStabilizationMode } * */ public void setMode(ImageStabilizationMode value) { this.mode = value; } /** * Gets the value of the level property. * * @return * possible object is * {@link Float } * */ public Float getLevel() { return level; } /** * Sets the value of the level property. * * @param value * allowed object is * {@link Float } * */ public void setLevel(Float value) { this.level = value; } /** * Gets the value of the extension property. * * @return * possible object is * {@link ImageStabilizationExtension } * */ public ImageStabilizationExtension getExtension() { return extension; } /** * Sets the value of the extension property. * * @param value * allowed object is * {@link ImageStabilizationExtension } * */ public void setExtension(ImageStabilizationExtension value) { this.extension = value; } /** * Gets a map that contains attributes that aren't bound to any typed property on this class. * * <p> * the map is keyed by the name of the attribute and * the value is the string value of the attribute. * * the map returned by this method is live, and you can add new attribute * by updating the map directly. Because of this design, there's no setter. * * * @return * always non-null */ public Map<QName, String> getOtherAttributes() { return otherAttributes; } /** * Generates a String representation of the contents of this type. * This is an extension method, produced by the 'ts' xjc plugin * */ @Override public String toString() { return ToStringBuilder.reflectionToString(this, JAXBToStringStyle.DEFAULT_STYLE); } }
apache-2.0
phax/ph-commons
ph-commons/src/test/java/com/helger/commons/id/IIntIDProviderTest.java
1042
/* * Copyright (C) 2014-2022 Philip Helger (www.helger.com) * philip[at]helger[dot]com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.helger.commons.id; import static org.junit.Assert.assertEquals; import org.junit.Test; /** * Test class for class {@link IIntIDProvider}. * * @author Philip Helger */ public final class IIntIDProviderTest { @Test public void testAll () { final IIntIDProvider <MockHasIntID> x = IHasIntID::getID; assertEquals (-3, x.getID (new MockHasIntID (-3))); } }
apache-2.0
wendal/alipay-sdk
src/main/java/com/alipay/api/response/AlipayMobileBeaconDeviceQueryResponse.java
1140
package com.alipay.api.response; import com.alipay.api.internal.mapping.ApiField; import com.alipay.api.domain.BeaconDeviceInfo; import com.alipay.api.AlipayResponse; /** * ALIPAY API: alipay.mobile.beacon.device.query response. * * @author auto create * @since 1.0, 2017-02-28 11:12:47 */ public class AlipayMobileBeaconDeviceQueryResponse extends AlipayResponse { private static final long serialVersionUID = 4743812712573537958L; /** * 蓝牙设备信息 */ @ApiField("beacon_device_info") private BeaconDeviceInfo beaconDeviceInfo; /** * 操作返回码,200为成功 */ @ApiField("code") private String code; /** * 请求处理结果 */ @ApiField("msg") private String msg; public void setBeaconDeviceInfo(BeaconDeviceInfo beaconDeviceInfo) { this.beaconDeviceInfo = beaconDeviceInfo; } public BeaconDeviceInfo getBeaconDeviceInfo( ) { return this.beaconDeviceInfo; } public void setCode(String code) { this.code = code; } public String getCode( ) { return this.code; } public void setMsg(String msg) { this.msg = msg; } public String getMsg( ) { return this.msg; } }
apache-2.0