repo_name
stringlengths
4
116
path
stringlengths
4
379
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
muthuishere/sshcommandbroadcaster
src/com/sshutils/views/settings/ServerDetails.java
35807
/* * Copyright 2013 Muthukumaran (https://github.com/muthuishere/). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sshutils.views.settings; import com.sshutils.beans.ConnectionInfo; import com.sshutils.beans.ServerInfo; import com.sshutils.utils.StringHelper; import java.awt.event.ActionEvent; import java.awt.event.ItemEvent; import java.awt.event.KeyEvent; import java.net.URL; import javax.swing.AbstractAction; import javax.swing.ActionMap; import javax.swing.ImageIcon; import javax.swing.InputMap; import javax.swing.JComponent; import javax.swing.KeyStroke; import org.apache.log4j.Logger; public class ServerDetails extends javax.swing.JDialog { private static final Logger log = Logger.getLogger(ServerDetails.class); /** * A return status code - returned if Cancel button has been pressed */ public static final int RET_CANCEL = 0; /** * A return status code - returned if OK button has been pressed */ public static final int RET_OK = 1; private ConnectionInfo connectionInfo = null; public ConnectionInfo getConnectionInfo() { return connectionInfo; } public void setConnectionInfo(ConnectionInfo connectionInfo) { this.connectionInfo = connectionInfo; } public ServerDetails(String title, ConnectionInfo connectionInfo, java.awt.Frame parent, boolean modal) { this(title,"",connectionInfo,parent,modal); } /** * Creates new form ServerDetails */ public ServerDetails(String title, String profileFolder,ConnectionInfo connectionInfo, java.awt.Frame parent, boolean modal) { super(parent, modal); this.setTitle(title); initComponents(); URL iconURL = getClass().getResource("/com/sshutils/resources/red/16x16/app.png"); // iconURL is null when not found ImageIcon icon = new ImageIcon(iconURL); this.setIconImage(icon.getImage()); this.connectionInfo = connectionInfo; resetError(); log.info("Components" + this.jTabbedPane1.getTabCount()); this.jTabbedPane1.setEnabledAt(1, false); tabTunnel.setEnabled(false); if(!StringHelper.isEmpty(profileFolder)){ txtFolder.setText(profileFolder); } if (null != connectionInfo) { txtOnlyProfileName.setText(connectionInfo.getOnlyProfileName()); txtFolder.setText(connectionInfo.getFolderName()); if (null != connectionInfo.getServerInfo()) { txtServerHostName.setText(connectionInfo.getServerInfo().getHost()); txtServerPassword.setText(connectionInfo.getServerInfo().getPassword()); txtServerUserName.setText(connectionInfo.getServerInfo().getUser()); cmbServerPort.setText(connectionInfo.getServerInfo().getPort() + ""); } if (null != connectionInfo.getGatewayInfo() && StringHelper.isEmpty(connectionInfo.getGatewayInfo().getHost()) == false) { //Enable tunnel txtTunnelHostName.setText(connectionInfo.getGatewayInfo().getHost()); txtTunnelPassword.setText(connectionInfo.getGatewayInfo().getPassword()); txtTunnelUserName.setText(connectionInfo.getGatewayInfo().getUser()); cmbTunnelPort.setText(connectionInfo.getGatewayInfo().getPort() + ""); this.jTabbedPane1.setEnabledAt(1, true); this.chkUseTunnel.setSelected(true); } } else { this.connectionInfo = new ConnectionInfo(); } // Close the dialog when Esc is pressed String cancelName = "cancel"; InputMap inputMap = getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT); inputMap.put(KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0), cancelName); ActionMap actionMap = getRootPane().getActionMap(); actionMap.put(cancelName, new AbstractAction() { public void actionPerformed(ActionEvent e) { doClose(RET_CANCEL); } }); } public void resetError() { new Thread() { public void run() { lblError.setText(""); } }.start(); } public void updateProfileName() { new Thread() { public void run() { if (StringHelper.isEmpty(txtOnlyProfileName.getText())) { if (!StringHelper.isEmpty(txtServerHostName.getText()) && !StringHelper.isEmpty(txtServerUserName.getText())) { txtOnlyProfileName.setText(txtServerUserName.getText() + "@" + txtServerHostName.getText()); } } } }.start(); } public void setError(String txt) { lblError.setText("* " + txt); } /** * @return the return status of this dialog - one of RET_OK or RET_CANCEL */ public int getReturnStatus() { return returnStatus; } /** * This method is called from within the constructor to initialize the form. * WARNING: Do NOT modify this code. The content of this method is always * regenerated by the Form Editor. */ @SuppressWarnings("unchecked") // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents private void initComponents() { okButton = new javax.swing.JButton(); cancelButton = new javax.swing.JButton(); jTabbedPane1 = new javax.swing.JTabbedPane(); tabHost = new javax.swing.JPanel(); jLabel1 = new javax.swing.JLabel(); jLabel2 = new javax.swing.JLabel(); txtServerHostName = new javax.swing.JTextField(); jLabel3 = new javax.swing.JLabel(); txtServerUserName = new javax.swing.JTextField(); jLabel4 = new javax.swing.JLabel(); txtServerPassword = new javax.swing.JPasswordField(); chkUseTunnel = new javax.swing.JCheckBox(); cmbServerPort = new javax.swing.JTextField(); tabTunnel = new javax.swing.JPanel(); jLabel5 = new javax.swing.JLabel(); txtTunnelHostName = new javax.swing.JTextField(); txtTunnelUserName = new javax.swing.JTextField(); jLabel6 = new javax.swing.JLabel(); jLabel7 = new javax.swing.JLabel(); txtTunnelPassword = new javax.swing.JPasswordField(); jLabel8 = new javax.swing.JLabel(); cmbTunnelPort = new javax.swing.JTextField(); jLabel9 = new javax.swing.JLabel(); txtOnlyProfileName = new javax.swing.JTextField(); lblError = new javax.swing.JLabel(); jLabel10 = new javax.swing.JLabel(); txtFolder = new javax.swing.JTextField(); addWindowListener(new java.awt.event.WindowAdapter() { public void windowClosing(java.awt.event.WindowEvent evt) { closeDialog(evt); } public void windowOpened(java.awt.event.WindowEvent evt) { formWindowOpened(evt); } }); okButton.setLabel("Save"); okButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { okButtonActionPerformed(evt); } }); cancelButton.setText("Cancel"); cancelButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { cancelButtonActionPerformed(evt); } }); jTabbedPane1.setTabPlacement(javax.swing.JTabbedPane.LEFT); jLabel1.setText("Host Name"); jLabel2.setText("Port"); txtServerHostName.addFocusListener(new java.awt.event.FocusAdapter() { public void focusLost(java.awt.event.FocusEvent evt) { txtServerHostNameFocusLost(evt); } }); txtServerHostName.addKeyListener(new java.awt.event.KeyAdapter() { public void keyPressed(java.awt.event.KeyEvent evt) { txtServerHostNameKeyPressed(evt); } }); jLabel3.setText("UserName"); txtServerUserName.addFocusListener(new java.awt.event.FocusAdapter() { public void focusLost(java.awt.event.FocusEvent evt) { txtServerUserNameFocusLost(evt); } }); txtServerUserName.addKeyListener(new java.awt.event.KeyAdapter() { public void keyPressed(java.awt.event.KeyEvent evt) { txtServerUserNameKeyPressed(evt); } }); jLabel4.setText("Password"); txtServerPassword.addKeyListener(new java.awt.event.KeyAdapter() { public void keyPressed(java.awt.event.KeyEvent evt) { txtServerPasswordKeyPressed(evt); } }); chkUseTunnel.setText("Use Tunnel"); chkUseTunnel.setToolTipText(""); chkUseTunnel.addItemListener(new java.awt.event.ItemListener() { public void itemStateChanged(java.awt.event.ItemEvent evt) { chkUseTunnelItemStateChanged(evt); } }); chkUseTunnel.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { chkUseTunnelActionPerformed(evt); } }); cmbServerPort.setFont(new java.awt.Font("Tahoma", 1, 11)); // NOI18N cmbServerPort.setText("22"); cmbServerPort.setToolTipText(""); javax.swing.GroupLayout tabHostLayout = new javax.swing.GroupLayout(tabHost); tabHost.setLayout(tabHostLayout); tabHostLayout.setHorizontalGroup( tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(tabHostLayout.createSequentialGroup() .addGap(20, 20, 20) .addGroup(tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(tabHostLayout.createSequentialGroup() .addGroup(tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addComponent(jLabel3) .addComponent(txtServerUserName, javax.swing.GroupLayout.PREFERRED_SIZE, 174, javax.swing.GroupLayout.PREFERRED_SIZE)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 102, Short.MAX_VALUE) .addGroup(tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false) .addGroup(tabHostLayout.createSequentialGroup() .addComponent(jLabel4) .addGap(128, 128, 128)) .addComponent(txtServerPassword)) .addGap(39, 39, 39)) .addGroup(tabHostLayout.createSequentialGroup() .addGroup(tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(tabHostLayout.createSequentialGroup() .addComponent(jLabel1) .addGap(0, 0, Short.MAX_VALUE)) .addComponent(txtServerHostName)) .addGap(18, 18, 18) .addGroup(tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addComponent(jLabel2) .addComponent(cmbServerPort, javax.swing.GroupLayout.PREFERRED_SIZE, 46, javax.swing.GroupLayout.PREFERRED_SIZE)) .addGap(31, 31, 31)) .addGroup(tabHostLayout.createSequentialGroup() .addComponent(chkUseTunnel) .addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)))) ); tabHostLayout.setVerticalGroup( tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(tabHostLayout.createSequentialGroup() .addContainerGap() .addGroup(tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(jLabel1) .addComponent(jLabel2)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addGroup(tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(txtServerHostName, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) .addComponent(cmbServerPort, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) .addGap(27, 27, 27) .addComponent(chkUseTunnel) .addGap(18, 18, 18) .addGroup(tabHostLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING) .addGroup(tabHostLayout.createSequentialGroup() .addComponent(jLabel3) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(txtServerUserName, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) .addGroup(tabHostLayout.createSequentialGroup() .addComponent(jLabel4) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(txtServerPassword, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))) .addContainerGap(51, Short.MAX_VALUE)) ); jTabbedPane1.addTab("App Server", tabHost); tabTunnel.setVerifyInputWhenFocusTarget(false); jLabel5.setText("Host Name"); txtTunnelHostName.addKeyListener(new java.awt.event.KeyAdapter() { public void keyPressed(java.awt.event.KeyEvent evt) { txtTunnelHostNameKeyPressed(evt); } }); txtTunnelUserName.addKeyListener(new java.awt.event.KeyAdapter() { public void keyPressed(java.awt.event.KeyEvent evt) { txtTunnelUserNameKeyPressed(evt); } }); jLabel6.setText("UserName"); jLabel7.setText("Password"); txtTunnelPassword.addKeyListener(new java.awt.event.KeyAdapter() { public void keyPressed(java.awt.event.KeyEvent evt) { txtTunnelPasswordKeyPressed(evt); } }); jLabel8.setText("Port"); cmbTunnelPort.setFont(new java.awt.Font("Tahoma", 1, 11)); // NOI18N cmbTunnelPort.setText("22"); cmbTunnelPort.setToolTipText(""); javax.swing.GroupLayout tabTunnelLayout = new javax.swing.GroupLayout(tabTunnel); tabTunnel.setLayout(tabTunnelLayout); tabTunnelLayout.setHorizontalGroup( tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, tabTunnelLayout.createSequentialGroup() .addContainerGap() .addGroup(tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING) .addGroup(javax.swing.GroupLayout.Alignment.LEADING, tabTunnelLayout.createSequentialGroup() .addGroup(tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addComponent(jLabel6) .addComponent(txtTunnelUserName, javax.swing.GroupLayout.PREFERRED_SIZE, 174, javax.swing.GroupLayout.PREFERRED_SIZE)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 102, Short.MAX_VALUE) .addGroup(tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false) .addComponent(jLabel7) .addComponent(txtTunnelPassword, javax.swing.GroupLayout.PREFERRED_SIZE, 174, javax.swing.GroupLayout.PREFERRED_SIZE))) .addGroup(tabTunnelLayout.createSequentialGroup() .addGroup(tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(tabTunnelLayout.createSequentialGroup() .addComponent(jLabel5) .addGap(0, 0, Short.MAX_VALUE)) .addComponent(txtTunnelHostName)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addGroup(tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addComponent(jLabel8) .addComponent(cmbTunnelPort, javax.swing.GroupLayout.PREFERRED_SIZE, 46, javax.swing.GroupLayout.PREFERRED_SIZE)) .addGap(4, 4, 4))) .addGap(49, 49, 49)) ); tabTunnelLayout.setVerticalGroup( tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(tabTunnelLayout.createSequentialGroup() .addContainerGap() .addGroup(tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(jLabel5) .addComponent(jLabel8)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addGroup(tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(txtTunnelHostName, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) .addComponent(cmbTunnelPort, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) .addGap(71, 71, 71) .addGroup(tabTunnelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING) .addGroup(tabTunnelLayout.createSequentialGroup() .addComponent(jLabel6) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(txtTunnelUserName, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)) .addGroup(tabTunnelLayout.createSequentialGroup() .addComponent(jLabel7) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(txtTunnelPassword, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))) .addContainerGap(48, Short.MAX_VALUE)) ); jTabbedPane1.addTab("Tunnel", tabTunnel); jLabel9.setText("ProfileName"); txtOnlyProfileName.addKeyListener(new java.awt.event.KeyAdapter() { public void keyPressed(java.awt.event.KeyEvent evt) { txtOnlyProfileNameKeyPressed(evt); } }); lblError.setFont(new java.awt.Font("Tahoma", 1, 10)); // NOI18N lblError.setForeground(new java.awt.Color(255, 0, 0)); lblError.setText("jLabel10"); jLabel10.setText("Folder"); txtFolder.setToolTipText("Use / character for each folder"); txtFolder.addKeyListener(new java.awt.event.KeyAdapter() { public void keyPressed(java.awt.event.KeyEvent evt) { txtFolderKeyPressed(evt); } }); javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane()); getContentPane().setLayout(layout); layout.setHorizontalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGap(19, 19, 19) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING) .addGroup(layout.createSequentialGroup() .addComponent(jLabel10) .addGap(18, 18, 18) .addComponent(txtFolder, javax.swing.GroupLayout.PREFERRED_SIZE, 400, javax.swing.GroupLayout.PREFERRED_SIZE)) .addGroup(layout.createSequentialGroup() .addComponent(jLabel9) .addGap(18, 18, 18) .addComponent(txtOnlyProfileName, javax.swing.GroupLayout.PREFERRED_SIZE, 400, javax.swing.GroupLayout.PREFERRED_SIZE))) .addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)) .addGroup(layout.createSequentialGroup() .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(layout.createSequentialGroup() .addGap(83, 83, 83) .addComponent(lblError, javax.swing.GroupLayout.PREFERRED_SIZE, 277, javax.swing.GroupLayout.PREFERRED_SIZE) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) .addComponent(okButton, javax.swing.GroupLayout.PREFERRED_SIZE, 67, javax.swing.GroupLayout.PREFERRED_SIZE) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(cancelButton)) .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) .addComponent(jTabbedPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 582, javax.swing.GroupLayout.PREFERRED_SIZE))) .addContainerGap()) ); layout.linkSize(javax.swing.SwingConstants.HORIZONTAL, new java.awt.Component[] {cancelButton, okButton}); layout.setVerticalGroup( layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING) .addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addGap(4, 4, 4) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(txtFolder, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) .addComponent(jLabel10)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(txtOnlyProfileName, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE) .addComponent(jLabel9)) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED) .addComponent(jTabbedPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 215, javax.swing.GroupLayout.PREFERRED_SIZE) .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 66, Short.MAX_VALUE) .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE) .addComponent(cancelButton) .addComponent(okButton) .addComponent(lblError)) .addContainerGap()) ); getRootPane().setDefaultButton(okButton); pack(); }// </editor-fold>//GEN-END:initComponents private void okButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_okButtonActionPerformed // new Thread() { public void run() { ServerInfo serverInfo = new ServerInfo(); ServerInfo tunnelInfo = new ServerInfo(); String errText = ""; if (StringHelper.isEmpty(txtServerHostName.getText())) { errText = "Server HostName cannot be Empty !"; } else { serverInfo.setHost(txtServerHostName.getText()); } if (StringHelper.isEmpty(new String(txtServerPassword.getPassword()))) { errText = "Server Password cannot be Empty !"; } else { serverInfo.setPassword(new String(txtServerPassword.getPassword())); } if (StringHelper.isEmpty(txtServerUserName.getText())) { errText = "Server Username cannot be Empty !"; } else { serverInfo.setUser(txtServerUserName.getText()); } if (StringHelper.isEmpty(txtOnlyProfileName.getText())) { errText = "ProfileName cannot be Empty !"; } else { String foldername=txtFolder.getText(); if(!StringHelper.isEmpty(foldername)){ if(foldername.startsWith("/")) foldername=foldername.substring(1); if(!foldername.endsWith("/")) foldername=foldername+ "/"; } connectionInfo.setProfileName(foldername+ txtOnlyProfileName.getText()); } serverInfo.setPort(Integer.parseInt(cmbServerPort.getText())); if (chkUseTunnel.isSelected()) { if (StringHelper.isEmpty(txtTunnelHostName.getText())) { errText = "Tunnel HostName cannot be Empty !"; } else { tunnelInfo.setHost(txtTunnelHostName.getText()); } if (StringHelper.isEmpty(new String(txtTunnelPassword.getPassword()))) { errText = "Tunnel Password cannot be Empty !"; } else { tunnelInfo.setPassword(new String(txtTunnelPassword.getPassword())); } if (StringHelper.isEmpty(txtTunnelUserName.getText())) { errText = "Tunnel Username cannot be Empty !"; } else { tunnelInfo.setUser(txtTunnelUserName.getText()); } tunnelInfo.setPort(Integer.parseInt(cmbTunnelPort.getText())); } if (!StringHelper.isEmpty(errText)) { setError(errText); return; } { //Save the connectionInfo Object and save it in settings connectionInfo.setServerInfo(serverInfo); connectionInfo.setGatewayInfo(tunnelInfo); doClose(RET_OK); } } }.start(); }//GEN-LAST:event_okButtonActionPerformed private void cancelButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cancelButtonActionPerformed doClose(RET_CANCEL); }//GEN-LAST:event_cancelButtonActionPerformed /** * Closes the dialog */ private void closeDialog(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_closeDialog doClose(RET_CANCEL); }//GEN-LAST:event_closeDialog private void chkUseTunnelActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_chkUseTunnelActionPerformed // TODO add your handling code here: new Thread() { public void run() { //ServerDetails dialog = new ServerDetails("Add Server",new javax.swing.JFrame(), true); } }.start(); }//GEN-LAST:event_chkUseTunnelActionPerformed private void chkUseTunnelItemStateChanged(java.awt.event.ItemEvent evt) {//GEN-FIRST:event_chkUseTunnelItemStateChanged // TODO add your handling code here: final int eventtype = evt.getStateChange(); new Thread() { public void run() { if (eventtype == ItemEvent.SELECTED) { jTabbedPane1.setEnabledAt(1, true); } else { jTabbedPane1.setEnabledAt(1, false); } } }.start(); }//GEN-LAST:event_chkUseTunnelItemStateChanged private void formWindowOpened(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowOpened // TODO add your handling code here: }//GEN-LAST:event_formWindowOpened private void txtServerHostNameKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_txtServerHostNameKeyPressed // TODO add your handling code here: resetError(); }//GEN-LAST:event_txtServerHostNameKeyPressed private void txtServerUserNameKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_txtServerUserNameKeyPressed // TODO add your handling code here: resetError(); }//GEN-LAST:event_txtServerUserNameKeyPressed private void txtServerPasswordKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_txtServerPasswordKeyPressed // TODO add your handling code here: resetError(); }//GEN-LAST:event_txtServerPasswordKeyPressed private void txtOnlyProfileNameKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_txtOnlyProfileNameKeyPressed // TODO add your handling code here: resetError(); }//GEN-LAST:event_txtOnlyProfileNameKeyPressed private void txtTunnelHostNameKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_txtTunnelHostNameKeyPressed // TODO add your handling code here: resetError(); }//GEN-LAST:event_txtTunnelHostNameKeyPressed private void txtTunnelUserNameKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_txtTunnelUserNameKeyPressed // TODO add your handling code here: resetError(); }//GEN-LAST:event_txtTunnelUserNameKeyPressed private void txtTunnelPasswordKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_txtTunnelPasswordKeyPressed // TODO add your handling code here: resetError(); }//GEN-LAST:event_txtTunnelPasswordKeyPressed private void txtServerHostNameFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_txtServerHostNameFocusLost // TODO add your handling code here: updateProfileName(); }//GEN-LAST:event_txtServerHostNameFocusLost private void txtServerUserNameFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_txtServerUserNameFocusLost // TODO add your handling code here: updateProfileName(); }//GEN-LAST:event_txtServerUserNameFocusLost private void txtFolderKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_txtFolderKeyPressed // TODO add your handling code here: }//GEN-LAST:event_txtFolderKeyPressed private void doClose(int retStatus) { returnStatus = retStatus; setVisible(false); dispose(); } /** * @param args the command line arguments */ public static void main(String args[]) { /* Set the Nimbus look and feel */ //<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) "> /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel. * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html */ try { for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) { if ("Nimbus".equals(info.getName())) { javax.swing.UIManager.setLookAndFeel(info.getClassName()); break; } } } catch (ClassNotFoundException ex) { java.util.logging.Logger.getLogger(ServerDetails.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (InstantiationException ex) { java.util.logging.Logger.getLogger(ServerDetails.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (IllegalAccessException ex) { java.util.logging.Logger.getLogger(ServerDetails.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } catch (javax.swing.UnsupportedLookAndFeelException ex) { java.util.logging.Logger.getLogger(ServerDetails.class.getName()).log(java.util.logging.Level.SEVERE, null, ex); } //</editor-fold> /* Create and display the dialog */ java.awt.EventQueue.invokeLater(new Runnable() { public void run() { ServerDetails dialog = new ServerDetails("empty", null, new javax.swing.JFrame(), true); dialog.addWindowListener(new java.awt.event.WindowAdapter() { @Override public void windowClosing(java.awt.event.WindowEvent e) { System.exit(0); } }); dialog.setVisible(true); } }); } // Variables declaration - do not modify//GEN-BEGIN:variables private javax.swing.JButton cancelButton; private javax.swing.JCheckBox chkUseTunnel; private javax.swing.JTextField cmbServerPort; private javax.swing.JTextField cmbTunnelPort; private javax.swing.JLabel jLabel1; private javax.swing.JLabel jLabel10; private javax.swing.JLabel jLabel2; private javax.swing.JLabel jLabel3; private javax.swing.JLabel jLabel4; private javax.swing.JLabel jLabel5; private javax.swing.JLabel jLabel6; private javax.swing.JLabel jLabel7; private javax.swing.JLabel jLabel8; private javax.swing.JLabel jLabel9; private javax.swing.JTabbedPane jTabbedPane1; private javax.swing.JLabel lblError; private javax.swing.JButton okButton; private javax.swing.JPanel tabHost; private javax.swing.JPanel tabTunnel; private javax.swing.JTextField txtFolder; private javax.swing.JTextField txtOnlyProfileName; private javax.swing.JTextField txtServerHostName; private javax.swing.JPasswordField txtServerPassword; private javax.swing.JTextField txtServerUserName; private javax.swing.JTextField txtTunnelHostName; private javax.swing.JPasswordField txtTunnelPassword; private javax.swing.JTextField txtTunnelUserName; // End of variables declaration//GEN-END:variables private int returnStatus = RET_CANCEL; }
apache-2.0
qwangrepos/helm
pkg/helm/portforwarder/portforwarder_test.go
2049
/* Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package portforwarder import ( "testing" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" ) func mockTillerPod() api.Pod { return api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "orca", Namespace: api.NamespaceDefault, Labels: map[string]string{"app": "helm", "name": "tiller"}, }, Status: api.PodStatus{ Phase: api.PodRunning, Conditions: []api.PodCondition{ { Status: api.ConditionTrue, Type: api.PodReady, }, }, }, } } func mockTillerPodPending() api.Pod { p := mockTillerPod() p.Name = "blue" p.Status.Conditions[0].Status = api.ConditionFalse return p } func TestGetFirstPod(t *testing.T) { tests := []struct { name string pods []api.Pod expected string err bool }{ { name: "with a ready pod", pods: []api.Pod{mockTillerPod()}, expected: "orca", }, { name: "without a ready pod", pods: []api.Pod{mockTillerPodPending()}, err: true, }, { name: "without a pod", pods: []api.Pod{}, err: true, }, } for _, tt := range tests { client := fake.NewSimpleClientset(&api.PodList{Items: tt.pods}) name, err := getTillerPodName(client.Core(), api.NamespaceDefault) if (err != nil) != tt.err { t.Errorf("%q. expected error: %v, got %v", tt.name, tt.err, err) } if name != tt.expected { t.Errorf("%q. expected %q, got %q", tt.name, tt.expected, name) } } }
apache-2.0
CoffeeMail/CoffeeMail
src/coffeemail/dns/MXRecord.java
972
package coffeemail.dns; public class MXRecord implements Comparable<MXRecord> { private String domain; private short priority; private short port = 25; public MXRecord(String domain, int priority) { if (domain.replace(":", "").length() < domain.length()) { String[] parts = domain.split(":"); this.domain = parts[0]; this.port = Short.parseShort(parts[1]); } else { this.domain = domain; } this.priority = (short) priority; } public String getDomain() { return domain; } public short getPriority() { return priority; } public short getPort() { return hasPort() ? port : 25; } public boolean hasPort() { return port != 25; } @Override public String toString() { return priority + "-" + domain + ":" + getPort(); } @Override public int compareTo(MXRecord mxr) { if (getPriority() > mxr.getPriority()) { return 10; } else if (getPriority() < mxr.getPriority()) { return -10; } else { return 0; } } }
apache-2.0
mmm2a/game-center
src/com/morgan/server/constants/PageConstantsSource.java
322
package com.morgan.server.constants; /** * Interface for a type that can provide page constants. * * @author mark@mark-morgan.net (Mark Morgan) */ public interface PageConstantsSource { /** * Asks this provider to add its constants to the sink. */ void provideConstantsInto(PageConstants constantsSink); }
apache-2.0
pepperonas/AndBasx
library/src/main/java/com/pepperonas/andbasx/format/TimeFormatUtilsLocalized.java
5951
/* * Copyright (c) 2017 Martin Pfeffer * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.pepperonas.andbasx.format; import com.pepperonas.andbasx.system.DeviceUtils; import com.pepperonas.jbasx.format.TimeFormatUtils; import java.util.Date; /** * The type Time format utils localized. */ public class TimeFormatUtilsLocalized { /** * The constant DEFAULT_FORMAT. */ public static final String DEFAULT_FORMAT = TimeFormatUtils.DEFAULT_FORMAT; /** * The constant DEFAULT_FORMAT_YMD_HMS. */ public static final String DEFAULT_FORMAT_YMD_HMS = TimeFormatUtils.DEFAULT_FORMAT_YMD_HMS; /** * The constant DEFAULT_FORMAT_YMD_HM. */ public static final String DEFAULT_FORMAT_YMD_HM = TimeFormatUtils.DEFAULT_FORMAT_YMD_HM; /** * The constant DEFAULT_FORMAT_MD_HM. */ public static final String DEFAULT_FORMAT_MD_HM = TimeFormatUtils.DEFAULT_FORMAT_MD_HM; /** * The constant DEFAULT_FORMAT_DMY_HM. */ public static final String DEFAULT_FORMAT_DMY_HM = TimeFormatUtils.DEFAULT_FORMAT_DMY_HM; /** * The constant DEFAULT_FORMAT_DMY_HMS. */ public static final String DEFAULT_FORMAT_DMY_HMS = TimeFormatUtils.DEFAULT_FORMAT_DMY_HMS; /** * The constant LOG_FORMAT. */ public static final String LOG_FORMAT = TimeFormatUtils.LOG_FORMAT; /** * The constant UTC_FORMAT. */ public static final String UTC_FORMAT = TimeFormatUtils.UTC_FORMAT; /** * To time string force show hours string. * * @param millis the millis * @param showSeconds the show seconds * @param showUnits the show units * @return the string */ public static String toTimeStringForceShowHours(long millis, boolean showSeconds, boolean showUnits) { return TimeFormatUtils.toTimeString(millis, showSeconds, showUnits); } /** * To time string string. * * @param millis the millis * @param showSeconds the show seconds * @param showUnits the show units * @return the string */ public static String toTimeString(long millis, boolean showSeconds, boolean showUnits) { return TimeFormatUtils.toTimeString(millis, showSeconds, showUnits); } /** * To time string string. * * @param millis the millis * @param showSeconds the show seconds * @param separatorHours the separator hours * @param separatorMin the separator min * @param separatorSec the separator sec * @return the string */ public static String toTimeString(long millis, boolean showSeconds, String separatorHours, String separatorMin, String separatorSec) { return TimeFormatUtils.toTimeString(millis, showSeconds, separatorHours, separatorMin, separatorSec); } /** * Utc to local string. * * @param date the date * @return the string */ public static String utcToLocal(Date date) { return TimeFormatUtils.utcToLocal(date, DeviceUtils.getLocale()); } /** * Format time string. * * @param time the time * @param format the format * @return the string */ public static String formatTime(long time, String format) { return TimeFormatUtils.formatTime(time, format, DeviceUtils.getLocale()); } /** * Format time string. * * @param date the date * @param format the format * @return the string */ public static String formatTime(Date date, String format) { return TimeFormatUtils.formatTime(date, format, DeviceUtils.getLocale()); } /** * Format time string. * * @param timeStr the time str * @param srcFormat the src format * @param dstFormat the dst format * @return the string */ public static String formatTime(String timeStr, String srcFormat, String dstFormat) { return TimeFormatUtils.formatTime(timeStr, srcFormat, dstFormat, DeviceUtils.getLocale()); } /** * Format time long. * * @param time the time * @param format the format * @return the long */ public static long formatTime(String time, String format) { return TimeFormatUtils.formatTime(time, format, DeviceUtils.getLocale()); } /** * Gets timestamp. * * @param format the format * @return the timestamp */ public static String getTimestamp(TimeFormatUtils.Format format) { return TimeFormatUtils.getTimestamp(format); } /** * Gets timestamp lexical. * * @param showSeconds the show seconds * @return the timestamp lexical */ public static String getTimestampLexical(boolean showSeconds) { return TimeFormatUtils.getTimestampLexical(showSeconds, DeviceUtils.getLocale()); } /** * Gets timestamp millis. * * @return the timestamp millis */ public static String getTimestampMillis() { return TimeFormatUtils.getTimestampMillis(DeviceUtils.getLocale()); } /** * Gets stamp. * * @return the stamp */ public static String getStamp() { return TimeFormatUtils.getStamp(); } /** * Gets daytime. * * @return the daytime */ public static TimeFormatUtils.Daytime getDaytime() { return TimeFormatUtils.getDaytime(); } }
apache-2.0
gxa/atlas
gxa/src/main/javascript/bundles/differential-expression/src/DifferentialRouterLoader.js
3071
import React from 'react' import PropTypes from 'prop-types' import { connect, PromiseState } from 'react-refetch' import URI from 'urijs' import DifferentialRouter from './DifferentialRouter' const transformFacetsResponseToArray = (facetsResponse) => { return Object.keys(facetsResponse).map(facetName => { return { facetName: facetName, facetItems: facetsResponse[facetName].map(facetItem => { return { name: facetItem.name, value: facetItem.value, disabled: false, checked: false } }) } }) } const pruneFacetsTreeBasedOnResultsThatCameIn = (facetsTreeData, results) => { return facetsTreeData.map(facet => ( { facetName: facet.facetName, facetItems: facet.facetItems.filter(facetItem => results.some(result => { if (Array.isArray(result[facet.facetName])) { return result[facet.facetName].includes(facetItem.name) } else { return result[facet.facetName] === facetItem.name } }) ) } )).filter(facet => facet.facetItems.length > 0) } class DifferentialRouterLoader extends React.Component { constructor(props) { super(props) } render () { const { facetsFetch, resultsFetch } = this.props const allFetches = PromiseState.all([facetsFetch, resultsFetch]) if (allFetches.pending) { return ( <div className={`row column`}> <img src={URI(`resources/images/loading.gif`, this.props.atlasUrl).toString()} /> </div> ) } else if (allFetches.fulfilled) { const resultsResponse = resultsFetch.value const facetsResponse = facetsFetch.value const facetsTreeData = transformFacetsResponseToArray(facetsResponse) const prunedFacetsTreeData = pruneFacetsTreeBasedOnResultsThatCameIn(facetsTreeData, resultsResponse.results) const results = resultsResponse.results const legend = { minDownLevel: resultsResponse.minDownLevel, minUpLevel: resultsResponse.minUpLevel, maxDownLevel:resultsResponse.maxDownLevel, maxUpLevel: resultsResponse.maxUpLevel } return ( <DifferentialRouter facetsTreeData={prunedFacetsTreeData} results={results} legend={legend} atlasUrl={this.props.atlasUrl} /> ) } } } DifferentialRouterLoader.propTypes = { atlasUrl: PropTypes.string.isRequired, geneQuery: PropTypes.string.isRequired, conditionQuery : PropTypes.string.isRequired, species: PropTypes.string.isRequired } export default connect((props) => { const requestParams = {geneQuery: props.geneQuery, conditionQuery: props.conditionQuery, species: props.species} return { facetsFetch: URI(`json/search/differential_facets`, props.atlasUrl).search(requestParams).toString(), resultsFetch: URI(`json/search/differential_results`, props.atlasUrl).search(requestParams).toString() } })(DifferentialRouterLoader)
apache-2.0
erikmack/lxd
lxd/cluster/raft.go
14646
package cluster import ( "bytes" "fmt" "io/ioutil" "log" "math" "net" "net/http" "os" "path/filepath" "strconv" "strings" "time" "github.com/CanonicalLtd/dqlite" "github.com/CanonicalLtd/raft-http" "github.com/CanonicalLtd/raft-membership" "github.com/boltdb/bolt" "github.com/hashicorp/raft" "github.com/hashicorp/raft-boltdb" "github.com/lxc/lxd/lxd/db" "github.com/lxc/lxd/lxd/node" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/log15" "github.com/lxc/lxd/shared/logger" "github.com/pkg/errors" ) // Create a raft instance and all its dependencies, to be used as backend for // the dqlite driver running on this LXD node. // // If this node should not serve as dqlite node, nil is returned. // // The raft instance will use an in-memory transport if clustering is not // enabled on this node. // // The certInfo parameter should contain the cluster TLS keypair and optional // CA certificate. // // The latency parameter is a coarse grain measure of how fast/reliable network // links are. This is used to tweak the various timeouts parameters of the raft // algorithm. See the raft.Config structure for more details. A value of 1.0 // means use the default values from hashicorp's raft package. Values closer to // 0 reduce the values of the various timeouts (useful when running unit tests // in-memory). func newRaft(database *db.Node, cert *shared.CertInfo, latency float64) (*raftInstance, error) { if latency <= 0 { return nil, fmt.Errorf("latency should be positive") } // Figure out if we actually need to act as dqlite node. var info *db.RaftNode err := database.Transaction(func(tx *db.NodeTx) error { var err error info, err = node.DetermineRaftNode(tx) return err }) if err != nil { return nil, err } // If we're not part of the dqlite cluster, there's nothing to do. if info == nil { return nil, nil } logger.Info("Start database node", log15.Ctx{"id": info.ID, "address": info.Address}) // Initialize a raft instance along with all needed dependencies. instance, err := raftInstanceInit(database, info, cert, latency) if err != nil { return nil, err } return instance, nil } // A LXD-specific wrapper around raft.Raft, which also holds a reference to its // network transport and dqlite FSM. type raftInstance struct { layer *rafthttp.Layer // HTTP-based raft transport layer handler http.HandlerFunc // Handles join/leave/connect requests membershipChanger func(*raft.Raft) // Forwards to raft membership requests from handler logs *raftboltdb.BoltStore // Raft logs store, needs to be closed upon shutdown registry *dqlite.Registry // The dqlite Registry linked to the FSM and the Driver fsm raft.FSM // The dqlite FSM linked to the raft instance raft *raft.Raft // The actual raft instance } // Create a new raftFactory, instantiating all needed raft dependencies. func raftInstanceInit( db *db.Node, node *db.RaftNode, cert *shared.CertInfo, latency float64) (*raftInstance, error) { // FIXME: should be a parameter timeout := 5 * time.Second raftLogger := raftLogger() // Raft config. config := raftConfig(latency) config.Logger = raftLogger config.LocalID = raft.ServerID(strconv.Itoa(int(node.ID))) // Raft transport var handler *rafthttp.Handler var membershipChanger func(*raft.Raft) var layer *rafthttp.Layer var transport raft.Transport addr := node.Address if addr == "" { // This should normally be used only for testing as it can // cause split-brian, but since we are not exposing raft to the // network at all it's safe to do so. When this node gets // exposed to the network and assigned an address, we need to // restart raft anyways. config.StartAsLeader = true transport = raftMemoryTransport() } else { dial, err := raftDial(cert) if err != nil { return nil, err } transport, handler, layer, err = raftNetworkTransport(db, addr, raftLogger, timeout, dial) if err != nil { return nil, err } membershipChanger = func(raft *raft.Raft) { raftmembership.HandleChangeRequests(raft, handler.Requests()) } } err := raft.ValidateConfig(config) if err != nil { return nil, errors.Wrap(err, "invalid raft configuration") } // Rename legacy data directory if needed. dir := filepath.Join(db.Dir(), "global") legacyDir := filepath.Join(db.Dir(), "..", "raft") if shared.PathExists(legacyDir) { if shared.PathExists(dir) { return nil, fmt.Errorf("both legacy and new global database directories exist") } logger.Info("Renaming global database directory from raft/ to database/global/") err := os.Rename(legacyDir, dir) if err != nil { return nil, errors.Wrap(err, "failed to rename legacy global database directory") } } // Data directory if !shared.PathExists(dir) { err := os.Mkdir(dir, 0750) if err != nil { return nil, err } } // Raft logs store logs, err := raftboltdb.New(raftboltdb.Options{ Path: filepath.Join(dir, "logs.db"), BoltOptions: &bolt.Options{Timeout: timeout}, }) if err != nil { return nil, errors.Wrap(err, "failed to create bolt store for raft logs") } // Raft snapshot store (don't log snapshots since we take them frequently) snaps, err := raft.NewFileSnapshotStoreWithLogger(dir, 2, log.New(ioutil.Discard, "", 0)) if err != nil { return nil, errors.Wrap(err, "failed to create file snapshot store") } // If we are the initial node, we use the last index persisted in the // logs store and other checks to determine if we have ever // bootstrapped the cluster, and if not we do so (see raft.HasExistingState). if node.ID == 1 { err := raftMaybeBootstrap(config, logs, snaps, transport) if err != nil { return nil, errors.Wrap(err, "failed to boostrap cluster") } } // The dqlite registry and FSM. registry := dqlite.NewRegistry(dir) fsm := dqlite.NewFSM(registry) // The actual raft instance. raft, err := raft.NewRaft(config, fsm, logs, logs, snaps, transport) if err != nil { logs.Close() return nil, errors.Wrap(err, "failed to start raft") } if membershipChanger != nil { // Process Raft connections over HTTP. This goroutine will // terminate when instance.handler.Close() is called, which // happens indirectly when the raft instance is shutdown in // instance.Shutdown(), and the associated transport is closed. go membershipChanger(raft) } instance := &raftInstance{ layer: layer, handler: raftHandler(cert, handler), membershipChanger: membershipChanger, logs: logs, registry: registry, fsm: fsm, raft: raft, } return instance, nil } // Registry returns the dqlite Registry associated with the raft instance. func (i *raftInstance) Registry() *dqlite.Registry { return i.registry } // FSM returns the dqlite FSM associated with the raft instance. func (i *raftInstance) FSM() raft.FSM { return i.fsm } // Raft returns the actual underlying raft instance. func (i *raftInstance) Raft() *raft.Raft { return i.raft } // Servers returns the servers that are currently part of the cluster. // // If this raft instance is not the leader, an error is returned. func (i *raftInstance) Servers() ([]raft.Server, error) { if i.raft.State() != raft.Leader { return nil, raft.ErrNotLeader } future := i.raft.GetConfiguration() err := future.Error() if err != nil { return nil, err } configuration := future.Configuration() return configuration.Servers, nil } // HandlerFunc can be used to handle HTTP requests performed against the LXD // API RaftEndpoint ("/internal/raft"), in order to join/leave/form the raft // cluster. // // If it returns nil, it means that this node is not supposed to expose a raft // endpoint over the network, because it's running as a non-clustered single // node. func (i *raftInstance) HandlerFunc() http.HandlerFunc { if i.handler == nil { return nil } return i.handler.ServeHTTP } // MembershipChanger returns the underlying rafthttp.Layer, which can be used // to change the membership of this node in the cluster. func (i *raftInstance) MembershipChanger() raftmembership.Changer { return i.layer } // Shutdown raft and any raft-related resource we have instantiated. func (i *raftInstance) Shutdown() error { logger.Info("Stop raft instance") // Invoke raft APIs asynchronously to allow for a timeout. timeout := 10 * time.Second // FIXME/TODO: We take a snapshot before when shutting down the daemon // so there will be no uncompacted raft logs at the next // startup. This is a workaround for slow log replay when // the LXD daemon starts (see #4485). A more proper fix // should be probably implemented in dqlite. errCh := make(chan error) timer := time.After(timeout) go func() { errCh <- i.raft.Snapshot().Error() }() // In case of error we just log a warning, since this is not really // fatal. select { case err := <-errCh: if err != nil && err != raft.ErrNothingNewToSnapshot { logger.Warnf("Failed to take raft snapshot: %v", err) } case <-timer: logger.Warnf("Timeout waiting for raft to take a snapshot") } errCh = make(chan error) timer = time.After(timeout) go func() { errCh <- i.raft.Shutdown().Error() }() select { case err := <-errCh: if err != nil { return errors.Wrap(err, "failed to shutdown raft") } case <-timer: logger.Debug("Timeout waiting for raft to shutdown") return fmt.Errorf("raft did not shutdown within %s", timeout) } err := i.logs.Close() if err != nil { return errors.Wrap(err, "failed to close boltdb logs store") } return nil } // Create an in-memory raft transport. func raftMemoryTransport() raft.Transport { _, transport := raft.NewInmemTransport("0") return transport } // Create a rafthttp.Dial function that connects over TLS using the given // cluster (and optionally CA) certificate both as client and remote // certificate. func raftDial(cert *shared.CertInfo) (rafthttp.Dial, error) { config, err := tlsClientConfig(cert) if err != nil { return nil, err } dial := rafthttp.NewDialTLS(config) return dial, nil } // Create a network raft transport that will handle connections using a // rafthttp.Handler. func raftNetworkTransport( db *db.Node, address string, logger *log.Logger, timeout time.Duration, dial rafthttp.Dial) (raft.Transport, *rafthttp.Handler, *rafthttp.Layer, error) { handler := rafthttp.NewHandlerWithLogger(logger) addr, err := net.ResolveTCPAddr("tcp", address) if err != nil { return nil, nil, nil, errors.Wrap(err, "invalid node address") } layer := rafthttp.NewLayer(raftEndpoint, addr, handler, dial) config := &raft.NetworkTransportConfig{ Logger: logger, Stream: layer, MaxPool: 2, Timeout: timeout, ServerAddressProvider: &raftAddressProvider{db: db}, } transport := raft.NewNetworkTransportWithConfig(config) return transport, handler, layer, nil } // The LXD API endpoint path that gets routed to a rafthttp.Handler for // joining/leaving the cluster and exchanging raft commands between nodes. const raftEndpoint = "/internal/raft" // An address provider that looks up server addresses in the raft_nodes table. type raftAddressProvider struct { db *db.Node } func (p *raftAddressProvider) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) { databaseID, err := strconv.Atoi(string(id)) if err != nil { return "", errors.Wrap(err, "non-numeric server ID") } var address string err = p.db.Transaction(func(tx *db.NodeTx) error { var err error address, err = tx.RaftNodeAddress(int64(databaseID)) return err }) if err != nil { return "", err } return raft.ServerAddress(address), nil } // Create a base raft configuration tweaked for a network with the given latency measure. func raftConfig(latency float64) *raft.Config { config := raft.DefaultConfig() scale := func(duration *time.Duration) { *duration = time.Duration((math.Ceil(float64(*duration) * latency))) } durations := []*time.Duration{ &config.HeartbeatTimeout, &config.ElectionTimeout, &config.CommitTimeout, &config.LeaderLeaseTimeout, } for _, duration := range durations { scale(duration) } // FIXME/TODO: We increase the frequency of snapshots here to keep the // number of uncompacted raft logs low, and workaround slow // log replay when the LXD daemon starts (see #4485). A more // proper fix should be probably implemented in dqlite. config.SnapshotThreshold = 64 config.TrailingLogs = 128 return config } // Helper to bootstrap the raft cluster if needed. func raftMaybeBootstrap( conf *raft.Config, logs *raftboltdb.BoltStore, snaps raft.SnapshotStore, trans raft.Transport) error { // First check if we were already bootstrapped. hasExistingState, err := raft.HasExistingState(logs, logs, snaps) if err != nil { return errors.Wrap(err, "failed to check if raft has existing state") } if hasExistingState { return nil } server := raft.Server{ ID: conf.LocalID, Address: trans.LocalAddr(), } configuration := raft.Configuration{ Servers: []raft.Server{server}, } return raft.BootstrapCluster(conf, logs, logs, snaps, trans, configuration) } func raftHandler(info *shared.CertInfo, handler *rafthttp.Handler) http.HandlerFunc { if handler == nil { return nil } return func(w http.ResponseWriter, r *http.Request) { if !tlsCheckCert(r, info) { http.Error(w, "403 invalid client certificate", http.StatusForbidden) return } handler.ServeHTTP(w, r) } } func raftLogger() *log.Logger { return log.New(&raftLogWriter{}, "", 0) } // Implement io.Writer on top of LXD's logging system. type raftLogWriter struct { } func (o *raftLogWriter) Write(line []byte) (n int, err error) { // Parse the log level according to hashicorp's raft pkg convetions. level := "" msg := "" x := bytes.IndexByte(line, '[') if x >= 0 { y := bytes.IndexByte(line[x:], ']') if y >= 0 { level = string(line[x+1 : x+y]) // Capitalize the string, to match LXD logging conventions first := strings.ToUpper(string(line[x+y+2])) rest := string(line[x+y+3 : len(line)-1]) msg = first + rest } } if level == "" { // Ignore log entries that don't stick to the convetion. return len(line), nil } switch level { case "DEBUG": logger.Debug(msg) case "INFO": logger.Info(msg) case "WARN": logger.Warn(msg) default: // Ignore any other log level. } return len(line), nil }
apache-2.0
howardliu-cn/cynomys
cynomys-client/cynomys-agent/src/main/java/cn/howardliu/monitor/cynomys/agent/dto/ApplicationInfo.java
7882
/** * @Probject Name: netty-wfj-base * @Path: com.wfj.netty.monitor.dtoApplicationInfo.java * @Create By Jack * @Create In 2015年8月26日 下午7:03:40 */ package cn.howardliu.monitor.cynomys.agent.dto; /** * @Class Name ApplicationInfo * @Author Jack * @Create In 2015年8月26日 */ public class ApplicationInfo { private String serverTag; private String name; private String version; private String desc; private String status; private SystemInfo sysInfo; private String updateTime; private Long sumInboundReqCounts; private Long sumOutboundReqCounts; private Long sumDealReqCounts; private Long sumDealReqTime; private Long peerDealReqTime; private Long sumErrDealReqCounts; private Long sumErrDealReqTime; private String serverName; private String serverVersion; private Long transactionCount; private String pid; private String dataBaseVersion; private String dataSourceDetails; private Long unixOpenFileDescriptorCount; private Long unixMaxFileDescriptorCount; private String startupDate; public String getServerTag() { return serverTag; } public void setServerTag(String serverTag) { this.serverTag = serverTag; } /** * @Return the String startupDate */ public String getStartupDate() { return startupDate; } /** * @Param String startupDate to set */ public void setStartupDate(String startupDate) { this.startupDate = startupDate; } /** * @Return the Long unixOpenFileDescriptorCount */ public Long getUnixOpenFileDescriptorCount() { return unixOpenFileDescriptorCount; } /** * @Param Long unixOpenFileDescriptorCount to set */ public void setUnixOpenFileDescriptorCount(Long unixOpenFileDescriptorCount) { this.unixOpenFileDescriptorCount = unixOpenFileDescriptorCount; } /** * @Return the Long unixMaxFileDescriptorCount */ public Long getUnixMaxFileDescriptorCount() { return unixMaxFileDescriptorCount; } /** * @Param Long unixMaxFileDescriptorCount to set */ public void setUnixMaxFileDescriptorCount(Long unixMaxFileDescriptorCount) { this.unixMaxFileDescriptorCount = unixMaxFileDescriptorCount; } /** * @Return the String pid */ public String getPid() { return pid; } /** * @Param String pid to set */ public void setPid(String pid) { this.pid = pid; } /** * @Return the String dataBaseVersion */ public String getDataBaseVersion() { return dataBaseVersion; } /** * @Param String dataBaseVersion to set */ public void setDataBaseVersion(String dataBaseVersion) { this.dataBaseVersion = dataBaseVersion; } /** * @Return the String dataSourceDetails */ public String getDataSourceDetails() { return dataSourceDetails; } /** * @Param String dataSourceDetails to set */ public void setDataSourceDetails(String dataSourceDetails) { this.dataSourceDetails = dataSourceDetails; } /** * @Return the Long transactionCount */ public Long getTransactionCount() { return transactionCount; } /** * @Param Long transactionCount to set */ public void setTransactionCount(Long transactionCount) { this.transactionCount = transactionCount; } /** * @Return the String serverNameString */ public String getServerName() { return serverName; } /** * @Param String serverNameString to set */ public void setServerName(String serverName) { this.serverName = serverName; } /** * @Return the String serverVersionString */ public String getServerVersion() { return serverVersion; } /** * @Param String serverVersionString to set */ public void setServerVersion(String serverVersion) { this.serverVersion = serverVersion; } /** * @Return the Long sumErrDealReqCounts */ public Long getSumErrDealReqCounts() { return sumErrDealReqCounts; } /** * @Param Long sumErrDealReqCounts to set */ public void setSumErrDealReqCounts(Long sumErrDealReqCounts) { this.sumErrDealReqCounts = sumErrDealReqCounts; } /** * @Return the Long sumErrDealReqTime */ public Long getSumErrDealReqTime() { return sumErrDealReqTime; } /** * @Param Long sumErrDealReqTime to set */ public void setSumErrDealReqTime(Long sumErrDealReqTime) { this.sumErrDealReqTime = sumErrDealReqTime; } /** * @Return the String updateTime */ public String getUpdateTime() { return updateTime; } /** * @Param String updateTime to set */ public void setUpdateTime(String updateTime) { this.updateTime = updateTime; } /** * @Return the Long sumInboundReqCounts */ public Long getSumInboundReqCounts() { return sumInboundReqCounts; } /** * @Param Long sumInboundReqCounts to set */ public void setSumInboundReqCounts(Long sumInboundReqCounts) { this.sumInboundReqCounts = sumInboundReqCounts; } /** * @Return the Long sumOutboundReqCounts */ public Long getSumOutboundReqCounts() { return sumOutboundReqCounts; } /** * @Param Long sumOutboundReqCounts to set */ public void setSumOutboundReqCounts(Long sumOutboundReqCounts) { this.sumOutboundReqCounts = sumOutboundReqCounts; } /** * @Return the Long sumDealReqCounts */ public Long getSumDealReqCounts() { return sumDealReqCounts; } /** * @Param Long sumDealReqCounts to set */ public void setSumDealReqCounts(Long sumDealReqCounts) { this.sumDealReqCounts = sumDealReqCounts; } /** * @Return the Long sumDealReqTime */ public Long getSumDealReqTime() { return sumDealReqTime; } /** * @Param Long sumDealReqTime to set */ public void setSumDealReqTime(Long sumDealReqTime) { this.sumDealReqTime = sumDealReqTime; } /** * @Return the Long peerDealReqTime */ public Long getPeerDealReqTime() { return peerDealReqTime; } /** * @Param Long peerDealReqTime to set */ public void setPeerDealReqTime(Long peerDealReqTime) { this.peerDealReqTime = peerDealReqTime; } /** * @Return the String name */ public String getName() { return name; } /** * @Param String name to set */ public void setName(String name) { this.name = name; } /** * @Return the String version */ public String getVersion() { return version; } /** * @Param String version to set */ public void setVersion(String version) { this.version = version; } /** * @Return the String desc */ public String getDesc() { return desc; } /** * @Param String desc to set */ public void setDesc(String desc) { this.desc = desc; } /** * @Return the String status */ public String getStatus() { return status; } /** * @Param String status to set */ public void setStatus(String status) { this.status = status; } /** * @Return the SystemInfo sysInfo */ public SystemInfo getSysInfo() { return sysInfo; } /** * @Param SystemInfo sysInfo to set */ public void setSysInfo(SystemInfo sysInfo) { this.sysInfo = sysInfo; } }
apache-2.0
SmoothSync/smoothsetup
library/src/main/java/com/smoothsync/smoothsetup/restrictions/RestrictionService.java
1577
/* * Copyright (c) 2020 dmfs GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.smoothsync.smoothsetup.restrictions; import android.os.Bundle; import com.smoothsync.api.model.Service; import org.dmfs.jems.optional.elementary.NullSafe; import org.dmfs.jems.single.combined.Backed; import org.dmfs.jems2.Optional; import java.net.URI; import java.security.KeyStore; import static org.dmfs.jems2.optional.Absent.absent; public final class RestrictionService implements Service { private final Bundle mBundle; public RestrictionService(Bundle bundle) { mBundle = bundle; } @Override public String name() { return new Backed<>(new NullSafe<>(mBundle.getString("name")), "").value(); } @Override public String serviceType() { return mBundle.getString("service-type"); } @Override public URI uri() { return URI.create(mBundle.getString("uri")); } @Override public Optional<KeyStore> keyStore() { return absent(); } }
apache-2.0
MnAnX/Infra
ZmqServer/src/nx/server/zmq/IHandler.java
139
package nx.server.zmq; public interface IHandler { String getServiceName(); String process(String request) throws Exception; }
apache-2.0
JonathanAQ/HospitalProject
AuthenticationFilter.java
1884
package com.hospital.fase.prog3.Filter; import java.io.IOException; import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.annotation.WebFilter; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; /** * Servlet Filter implementation class AuthenticationFilter */ @WebFilter("/AuthenticationFilter") public class AuthenticationFilter implements Filter { /** * Default constructor. */ public AuthenticationFilter() { // TODO Auto-generated constructor stub } /** * @see Filter#destroy() */ public void destroy() { // TODO Auto-generated method stub } /** * @see Filter#doFilter(ServletRequest, ServletResponse, FilterChain) */ public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { // TODO Auto-generated method stub // place your code here HttpServletRequest req = (HttpServletRequest) request; HttpServletResponse res = (HttpServletResponse) response; String uri = req.getRequestURI(); HttpSession session = req.getSession(false); boolean isURLLoginServlet = uri.endsWith("loginServlet"); boolean isURLEndsHTML = uri.endsWith("html"); if (session == null && !(isURLEndsHTML || isURLLoginServlet)) { res.sendRedirect("login.html"); } else { chain.doFilter(request, response); } // pass the request along the filter chain chain.doFilter(request, response); } /** * @see Filter#init(FilterConfig) */ public void init(FilterConfig fConfig) throws ServletException { // TODO Auto-generated method stub } }
apache-2.0
RobertSzarejko/WebSecurityForBackendDev
web-security-demo/src/main/java/pl/itdonat/demo/wsfbd/encryption/CryptoOneController.java
1211
package pl.itdonat.demo.wsfbd.encryption; import org.springframework.http.MediaType; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.*; import pl.itdonat.demo.wsfbd.encryption.encode.EncodeData; import pl.itdonat.demo.wsfbd.encryption.encode.EncodeService; import java.util.List; /** * Created by r.szarejko on 2017-03-22. */ @Controller @RequestMapping("/crypto_one") public class CryptoOneController { private final EncodeService encodeService; public CryptoOneController(EncodeService encodeService) { this.encodeService = encodeService; } @GetMapping public String get(Model model){ CryptoData cryptoData = new CryptoData(); model.addAttribute("data", cryptoData); return "crypto_one"; } @PostMapping public String postTheSame(Model model, @ModelAttribute(value = "data") CryptoData data){ List<EncodeData> encodeData = encodeService.prepareEncodedValueByAlgorithm(data.getValue(), data.getAlgorithm(), 6); model.addAttribute("data", data); model.addAttribute("valueList", encodeData); return "crypto_one"; } }
apache-2.0
cherryhill/collectionspace-services
services/report/client/src/test/java/org/collectionspace/services/client/test/ReportServiceTest.java
14833
/** * This document is a part of the source code and related artifacts * for CollectionSpace, an open source collections management system * for museums and related institutions: * * http://www.collectionspace.org * http://wiki.collectionspace.org * * Copyright © 2009 Regents of the University of California * * Licensed under the Educational Community License (ECL), Version 2.0. * You may not use this file except in compliance with this License. * * You may obtain a copy of the ECL 2.0 License at * https://source.collectionspace.org/collection-space/LICENSE.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.collectionspace.services.client.test; import java.math.BigDecimal; import java.util.ArrayList; import java.util.List; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import org.collectionspace.services.acquisition.AcquisitionSourceList; import org.collectionspace.services.acquisition.AcquisitionsCommon; import org.collectionspace.services.acquisition.StructuredDateGroup; import org.collectionspace.services.client.AbstractCommonListUtils; import org.collectionspace.services.client.CollectionSpaceClient; import org.collectionspace.services.client.PayloadOutputPart; import org.collectionspace.services.client.PoxPayloadOut; import org.collectionspace.services.client.ReportClient; import org.collectionspace.services.common.invocable.InvocationContext; import org.collectionspace.services.report.ReportsCommon; import org.collectionspace.services.jaxb.AbstractCommonList; import org.collectionspace.services.client.AcquisitionClient; import org.jboss.resteasy.client.ClientResponse; import org.testng.Assert; //import org.testng.annotations.AfterClass; import org.testng.annotations.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * FIXME: http://issues.collectionspace.org/browse/CSPACE-1685 * ReportServiceTest, carries out tests against a * deployed and running Report Service. * * $LastChangedRevision: 2261 $ * $LastChangedDate: 2010-05-28 16:52:22 -0700 (Fri, 28 May 2010) $ */ public class ReportServiceTest extends AbstractPoxServiceTestImpl<AbstractCommonList, ReportsCommon> { /** The logger. */ private final String CLASS_NAME = ReportServiceTest.class.getName(); private final Logger logger = LoggerFactory.getLogger(CLASS_NAME); final String SERVICE_NAME = "reports"; final String SERVICE_PATH_COMPONENT = "reports"; // Instance variables specific to this test. private String testDocType = "Acquisition"; private String createAquisitionResource() { String result = null; AcquisitionClient acquisitionClient = new AcquisitionClient(); AcquisitionsCommon acquisitionsCommon = new AcquisitionsCommon(); acquisitionsCommon.setAcquisitionReason("It was nice."); acquisitionsCommon.setOriginalObjectPurchasePriceValue(new BigDecimal(500)); acquisitionsCommon.setAcquisitionReferenceNumber("2013.003.0004"); AcquisitionSourceList asl = new AcquisitionSourceList(); List<String> sourceList = asl.getAcquisitionSource(); sourceList.add("The Jim Henson Legacy"); acquisitionsCommon.setAcquisitionSources(asl); StructuredDateGroup sdg = new StructuredDateGroup(); sdg.setDateDisplayDate("12/12/2012"); acquisitionsCommon.setAccessionDateGroup(sdg); PoxPayloadOut poxPayloadOut = new PoxPayloadOut(AcquisitionClient.SERVICE_PAYLOAD_NAME); PayloadOutputPart commonPart = poxPayloadOut.addPart(acquisitionClient.getCommonPartName(), acquisitionsCommon); Response res = acquisitionClient.create(poxPayloadOut); try { setupCreate(); int statusCode = res.getStatus(); if (logger.isDebugEnabled()) { logger.debug(this.getClass().getCanonicalName() + ": HTTP status = " + statusCode); } Assert.assertTrue(testRequestType.isValidStatusCode(statusCode), invalidStatusCodeMessage(testRequestType, statusCode)); Assert.assertEquals(statusCode, testExpectedStatusCode); result = extractId(res); // Store the IDs from every resource created by tests, // so they can be deleted after tests have been run. allResourceIdsCreated.add(result); } finally { res.close(); } return result; } @Test(dataProvider = "testName", dependsOnMethods = {"create"}) public void publishReportInstance(String testName) throws Exception { // Perform setup. // Submit the request to the service and store the response. ReportClient client = (ReportClient)this.getClientInstance(); String reportCsid = createResource(testName, this.getKnowResourceIdentifier()); String acquisitionCsid = createAquisitionResource(); // // Hard coded for now, but need to create this test in the Integration test area where // we'll create an Acquisition instance for this test // InvocationContext invocationContext = new InvocationContext(); invocationContext.setDocType("Acquisition"); invocationContext.setMode("single"); invocationContext.setSingleCSID(acquisitionCsid); Response res = client.publishReport(reportCsid, invocationContext); try { int statusCode = res.getStatus(); setupCreate(); // Check the status code of the response: does it match // the expected response(s)? if (logger.isDebugEnabled()) { logger.debug(testName + ": status = " + statusCode); } Assert.assertTrue(testRequestType.isValidStatusCode(statusCode), invalidStatusCodeMessage(testRequestType, statusCode)); Assert.assertEquals(statusCode, testExpectedStatusCode); String publicItemCsid = extractId(res); Assert.assertNotNull(publicItemCsid); } finally { res.close(); } } /* (non-Javadoc) * @see org.collectionspace.services.client.test.BaseServiceTest#getClientInstance() */ @Override protected CollectionSpaceClient getClientInstance() { return new ReportClient(); } @Test(dataProvider = "testName", dependsOnMethods = {"CRUDTests"}) public void readListFiltered(String testName) throws Exception { // Perform setup. setupReadList(); // Submit the request to the service and store the response. ReportClient client = new ReportClient(); Response res = client.readListFiltered(testDocType, "single"); AbstractCommonList list = null; try { assertStatusCode(res, testName); list = res.readEntity(AbstractCommonList.class); } finally { if (res != null) { res.close(); } } List<AbstractCommonList.ListItem> items = list.getListItem(); // We must find the basic one we created boolean fFoundBaseItem = false; for (AbstractCommonList.ListItem item : items) { String itemCsid = AbstractCommonListUtils.ListItemGetCSID(item); if (knownResourceId.equalsIgnoreCase(itemCsid)) { fFoundBaseItem = true; break; } } if (!fFoundBaseItem) { Assert.fail("readListFiltered failed to return base item"); } // Now filter for something else, and ensure it is NOT returned res = client.readListFiltered("Intake", "single"); try { assertStatusCode(res, testName); list = res.readEntity(AbstractCommonList.class); } finally { if (res != null) { res.close(); } } items = list.getListItem(); // We must NOT find the basic one we created for (AbstractCommonList.ListItem item : items) { Assert.assertNotSame(AbstractCommonListUtils.ListItemGetCSID(item), knownResourceId, "readListFiltered(\"Intake\", \"single\") incorrectly returned base item"); } // Now filter for something else, and ensure it is NOT returned res = client.readListFiltered(testDocType, "group"); try { assertStatusCode(res, testName); list = res.readEntity(AbstractCommonList.class); } finally { if (res != null) { res.close(); } } items = list.getListItem(); // We must NOT find the basic one we created for (AbstractCommonList.ListItem item : items) { Assert.assertNotSame(AbstractCommonListUtils.ListItemGetCSID(item), knownResourceId, "readListFiltered(\""+testDocType+"\", \"group\") incorrectly returned base item"); } } /** * This method overrides the delete method in the base class which is marked with the TestNG @Test annotation. * Since we don't want the actually delete test to happen until later in the dependency test chain, we're make this * an empty method. Later in the test suite, the method localDelete() will get called and it will call super.delete() */ @Override public void delete(String testName) throws Exception { // // Do nothing for now. The test localDelete() will get called later in the dependency chain. // } /** * This test will delete the known resource after the test readListFiltered() is run */ @Test(dataProvider = "testName", dependsOnMethods = {"readListFiltered"}) public void localDelete(String testName) throws Exception { super.delete(testName); } // --------------------------------------------------------------- // Utility methods used by tests above // --------------------------------------------------------------- @Override protected String getServiceName() { return SERVICE_NAME; } /* (non-Javadoc) * @see org.collectionspace.services.client.test.BaseServiceTest#getServicePathComponent() */ @Override public String getServicePathComponent() { return SERVICE_PATH_COMPONENT; } /** * Creates the report instance. * * @param identifier the identifier * @return the multipart output */ private PoxPayloadOut createReportInstance(String identifier) { List<String> docTypes = new ArrayList<String>(); docTypes.add(testDocType); return createReportInstance( "Acquisition Summary " + identifier, docTypes, true, false, false, true, "acq_basic.jasper", "application/pdf"); } /** * Creates the report instance. * * @param name the report name * @param filename the relative path to the report * @param outputMIME the MIME type we will return for this report * @return the multipart output */ private PoxPayloadOut createReportInstance(String name, List<String> forDocTypeList, boolean supportsSingle, boolean supportsList, boolean supportsGroup, boolean supportsNoContext, String filename, String outputMIME) { ReportsCommon reportCommon = new ReportsCommon(); reportCommon.setName(name); ReportsCommon.ForDocTypes forDocTypes = new ReportsCommon.ForDocTypes(); List<String> docTypeList = forDocTypes.getForDocType(); docTypeList.addAll(forDocTypeList); reportCommon.setForDocTypes(forDocTypes); reportCommon.setSupportsSingleDoc(supportsSingle); reportCommon.setSupportsDocList(supportsList); reportCommon.setSupportsGroup(supportsGroup); reportCommon.setSupportsNoContext(supportsNoContext); reportCommon.setFilename(filename); reportCommon.setOutputMIME(outputMIME); reportCommon.setNotes(getUTF8DataFragment()); // For UTF-8 tests PoxPayloadOut multipart = new PoxPayloadOut(this.getServicePathComponent()); PayloadOutputPart commonPart = multipart.addPart(new ReportClient().getCommonPartName(), reportCommon); if (logger.isDebugEnabled()) { logger.debug("to be created, report common"); logger.debug(objectAsXmlString(reportCommon, ReportsCommon.class)); logger.debug(multipart.toXML()); } return multipart; } @Override protected PoxPayloadOut createInstance(String commonPartName, String identifier) { PoxPayloadOut result = createReportInstance(identifier); return result; } @Override protected ReportsCommon updateInstance(ReportsCommon reportsCommon) { ReportsCommon result = new ReportsCommon(); result.setSupportsSingleDoc(true); result.setName("updated-" + reportsCommon.getName()); result.setOutputMIME("updated-" + reportsCommon.getOutputMIME()); result.setNotes("updated-" + reportsCommon.getNotes()); return result; } @Override protected void compareUpdatedInstances(ReportsCommon original, ReportsCommon updated) throws Exception { // Check selected fields in the updated common part. Assert.assertEquals(updated.getName(), original.getName(), "Name in updated object did not match submitted data."); // Check the values of fields containing Unicode UTF-8 (non-Latin-1) characters. if (logger.isDebugEnabled()) { logger.debug("UTF-8 data sent=" + original.getNotes() + "\n" + "UTF-8 data received=" + updated.getNotes()); } Assert.assertTrue(updated.getNotes().contains(getUTF8DataFragment()), "UTF-8 data retrieved '" + updated.getNotes() + "' does not contain expected data '" + getUTF8DataFragment()); Assert.assertEquals(updated.getNotes(), original.getNotes(), "Notes in updated object did not match submitted data."); } protected void compareReadInstances(ReportsCommon original, ReportsCommon fromRead) throws Exception { Assert.assertEquals(fromRead.getNotes(), getUTF8DataFragment(), "UTF-8 data retrieved '" + fromRead.getNotes() + "' does not match expected data '" + getUTF8DataFragment()); } /* * For convenience and terseness, this test method is the base of the test execution dependency chain. Other test methods may * refer to this method in their @Test annotation declarations. */ @Override @Test(dataProvider = "testName", dependsOnMethods = { "org.collectionspace.services.client.test.AbstractServiceTestImpl.baseCRUDTests"}) public void CRUDTests(String testName) { // TODO Auto-generated method stub } }
apache-2.0
alibaba/nacos
console/src/main/resources/static/console-ui/public/js/javascript.js
32209
/* * Copyright 1999-2018 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ (function(mod) { if (typeof exports == "object" && typeof module == "object") // CommonJS mod(require("../../lib/codemirror")); else if (typeof define == "function" && define.amd) // AMD define(["../../lib/codemirror"], mod); else // Plain browser env mod(CodeMirror); })(function(CodeMirror) { "use strict"; function expressionAllowed(stream, state, backUp) { return /^(?:operator|sof|keyword c|case|new|export|default|[\[{}\(,;:]|=>)$/.test(state.lastType) || (state.lastType == "quasi" && /\{\s*$/.test(stream.string.slice(0, stream.pos - (backUp || 0)))) } CodeMirror.defineMode("javascript", function(config, parserConfig) { var indentUnit = config.indentUnit; var statementIndent = parserConfig.statementIndent; var jsonldMode = parserConfig.jsonld; var jsonMode = parserConfig.json || jsonldMode; var isTS = parserConfig.typescript; var wordRE = parserConfig.wordCharacters || /[\w$\xa1-\uffff]/; // Tokenizer var keywords = function(){ function kw(type) {return {type: type, style: "keyword"};} var A = kw("keyword a"), B = kw("keyword b"), C = kw("keyword c"); var operator = kw("operator"), atom = {type: "atom", style: "atom"}; var jsKeywords = { "if": kw("if"), "while": A, "with": A, "else": B, "do": B, "try": B, "finally": B, "return": C, "break": C, "continue": C, "new": kw("new"), "delete": C, "throw": C, "debugger": C, "var": kw("var"), "const": kw("var"), "let": kw("var"), "function": kw("function"), "catch": kw("catch"), "for": kw("for"), "switch": kw("switch"), "case": kw("case"), "default": kw("default"), "in": operator, "typeof": operator, "instanceof": operator, "true": atom, "false": atom, "null": atom, "undefined": atom, "NaN": atom, "Infinity": atom, "this": kw("this"), "class": kw("class"), "super": kw("atom"), "yield": C, "export": kw("export"), "import": kw("import"), "extends": C, "await": C }; // Extend the 'normal' keywords with the TypeScript language extensions if (isTS) { var type = {type: "variable", style: "type"}; var tsKeywords = { // object-like things "interface": kw("class"), "implements": C, "namespace": C, "module": kw("module"), "enum": kw("module"), // scope modifiers "public": kw("modifier"), "private": kw("modifier"), "protected": kw("modifier"), "abstract": kw("modifier"), // types "string": type, "number": type, "boolean": type, "any": type }; for (var attr in tsKeywords) { jsKeywords[attr] = tsKeywords[attr]; } } return jsKeywords; }(); var isOperatorChar = /[+\-*&%=<>!?|~^@]/; var isJsonldKeyword = /^@(context|id|value|language|type|container|list|set|reverse|index|base|vocab|graph)"/; function readRegexp(stream) { var escaped = false, next, inSet = false; while ((next = stream.next()) != null) { if (!escaped) { if (next == "/" && !inSet) return; if (next == "[") inSet = true; else if (inSet && next == "]") inSet = false; } escaped = !escaped && next == "\\"; } } // Used as scratch variables to communicate multiple values without // consing up tons of objects. var type, content; function ret(tp, style, cont) { type = tp; content = cont; return style; } function tokenBase(stream, state) { var ch = stream.next(); if (ch == '"' || ch == "'") { state.tokenize = tokenString(ch); return state.tokenize(stream, state); } else if (ch == "." && stream.match(/^\d+(?:[eE][+\-]?\d+)?/)) { return ret("number", "number"); } else if (ch == "." && stream.match("..")) { return ret("spread", "meta"); } else if (/[\[\]{}\(\),;\:\.]/.test(ch)) { return ret(ch); } else if (ch == "=" && stream.eat(">")) { return ret("=>", "operator"); } else if (ch == "0" && stream.eat(/x/i)) { stream.eatWhile(/[\da-f]/i); return ret("number", "number"); } else if (ch == "0" && stream.eat(/o/i)) { stream.eatWhile(/[0-7]/i); return ret("number", "number"); } else if (ch == "0" && stream.eat(/b/i)) { stream.eatWhile(/[01]/i); return ret("number", "number"); } else if (/\d/.test(ch)) { stream.match(/^\d*(?:\.\d*)?(?:[eE][+\-]?\d+)?/); return ret("number", "number"); } else if (ch == "/") { if (stream.eat("*")) { state.tokenize = tokenComment; return tokenComment(stream, state); } else if (stream.eat("/")) { stream.skipToEnd(); return ret("comment", "comment"); } else if (expressionAllowed(stream, state, 1)) { readRegexp(stream); stream.match(/^\b(([gimyu])(?![gimyu]*\2))+\b/); return ret("regexp", "string-2"); } else { stream.eatWhile(isOperatorChar); return ret("operator", "operator", stream.current()); } } else if (ch == "`") { state.tokenize = tokenQuasi; return tokenQuasi(stream, state); } else if (ch == "#") { stream.skipToEnd(); return ret("error", "error"); } else if (isOperatorChar.test(ch)) { if (ch != ">" || !state.lexical || state.lexical.type != ">") stream.eatWhile(isOperatorChar); return ret("operator", "operator", stream.current()); } else if (wordRE.test(ch)) { stream.eatWhile(wordRE); var word = stream.current() if (state.lastType != ".") { if (keywords.propertyIsEnumerable(word)) { var kw = keywords[word] return ret(kw.type, kw.style, word) } if (word == "async" && stream.match(/^\s*[\(\w]/, false)) return ret("async", "keyword", word) } return ret("variable", "variable", word) } } function tokenString(quote) { return function(stream, state) { var escaped = false, next; if (jsonldMode && stream.peek() == "@" && stream.match(isJsonldKeyword)){ state.tokenize = tokenBase; return ret("jsonld-keyword", "meta"); } while ((next = stream.next()) != null) { if (next == quote && !escaped) break; escaped = !escaped && next == "\\"; } if (!escaped) state.tokenize = tokenBase; return ret("string", "string"); }; } function tokenComment(stream, state) { var maybeEnd = false, ch; while (ch = stream.next()) { if (ch == "/" && maybeEnd) { state.tokenize = tokenBase; break; } maybeEnd = (ch == "*"); } return ret("comment", "comment"); } function tokenQuasi(stream, state) { var escaped = false, next; while ((next = stream.next()) != null) { if (!escaped && (next == "`" || next == "$" && stream.eat("{"))) { state.tokenize = tokenBase; break; } escaped = !escaped && next == "\\"; } return ret("quasi", "string-2", stream.current()); } var brackets = "([{}])"; // This is a crude lookahead trick to try and notice that we're // parsing the argument patterns for a fat-arrow function before we // actually hit the arrow token. It only works if the arrow is on // the same line as the arguments and there's no strange noise // (comments) in between. Fallback is to only notice when we hit the // arrow, and not declare the arguments as locals for the arrow // body. function findFatArrow(stream, state) { if (state.fatArrowAt) state.fatArrowAt = null; var arrow = stream.string.indexOf("=>", stream.start); if (arrow < 0) return; if (isTS) { // Try to skip TypeScript return type declarations after the arguments var m = /:\s*(?:\w+(?:<[^>]*>|\[\])?|\{[^}]*\})\s*$/.exec(stream.string.slice(stream.start, arrow)) if (m) arrow = m.index } var depth = 0, sawSomething = false; for (var pos = arrow - 1; pos >= 0; --pos) { var ch = stream.string.charAt(pos); var bracket = brackets.indexOf(ch); if (bracket >= 0 && bracket < 3) { if (!depth) { ++pos; break; } if (--depth == 0) { if (ch == "(") sawSomething = true; break; } } else if (bracket >= 3 && bracket < 6) { ++depth; } else if (wordRE.test(ch)) { sawSomething = true; } else if (/["'\/]/.test(ch)) { return; } else if (sawSomething && !depth) { ++pos; break; } } if (sawSomething && !depth) state.fatArrowAt = pos; } // Parser var atomicTypes = {"atom": true, "number": true, "variable": true, "string": true, "regexp": true, "this": true, "jsonld-keyword": true}; function JSLexical(indented, column, type, align, prev, info) { this.indented = indented; this.column = column; this.type = type; this.prev = prev; this.info = info; if (align != null) this.align = align; } function inScope(state, varname) { for (var v = state.localVars; v; v = v.next) if (v.name == varname) return true; for (var cx = state.context; cx; cx = cx.prev) { for (var v = cx.vars; v; v = v.next) if (v.name == varname) return true; } } function parseJS(state, style, type, content, stream) { var cc = state.cc; // Communicate our context to the combinators. // (Less wasteful than consing up a hundred closures on every call.) cx.state = state; cx.stream = stream; cx.marked = null, cx.cc = cc; cx.style = style; if (!state.lexical.hasOwnProperty("align")) state.lexical.align = true; while(true) { var combinator = cc.length ? cc.pop() : jsonMode ? expression : statement; if (combinator(type, content)) { while(cc.length && cc[cc.length - 1].lex) cc.pop()(); if (cx.marked) return cx.marked; if (type == "variable" && inScope(state, content)) return "variable-2"; return style; } } } // Combinator utils var cx = {state: null, column: null, marked: null, cc: null}; function pass() { for (var i = arguments.length - 1; i >= 0; i--) cx.cc.push(arguments[i]); } function cont() { pass.apply(null, arguments); return true; } function register(varname) { function inList(list) { for (var v = list; v; v = v.next) if (v.name == varname) return true; return false; } var state = cx.state; cx.marked = "def"; if (state.context) { if (inList(state.localVars)) return; state.localVars = {name: varname, next: state.localVars}; } else { if (inList(state.globalVars)) return; if (parserConfig.globalVars) state.globalVars = {name: varname, next: state.globalVars}; } } // Combinators var defaultVars = {name: "this", next: {name: "arguments"}}; function pushcontext() { cx.state.context = {prev: cx.state.context, vars: cx.state.localVars}; cx.state.localVars = defaultVars; } function popcontext() { cx.state.localVars = cx.state.context.vars; cx.state.context = cx.state.context.prev; } function pushlex(type, info) { var result = function() { var state = cx.state, indent = state.indented; if (state.lexical.type == "stat") indent = state.lexical.indented; else for (var outer = state.lexical; outer && outer.type == ")" && outer.align; outer = outer.prev) indent = outer.indented; state.lexical = new JSLexical(indent, cx.stream.column(), type, null, state.lexical, info); }; result.lex = true; return result; } function poplex() { var state = cx.state; if (state.lexical.prev) { if (state.lexical.type == ")") state.indented = state.lexical.indented; state.lexical = state.lexical.prev; } } poplex.lex = true; function expect(wanted) { function exp(type) { if (type == wanted) return cont(); else if (wanted == ";") return pass(); else return cont(exp); }; return exp; } function statement(type, value) { if (type == "var") return cont(pushlex("vardef", value.length), vardef, expect(";"), poplex); if (type == "keyword a") return cont(pushlex("form"), parenExpr, statement, poplex); if (type == "keyword b") return cont(pushlex("form"), statement, poplex); if (type == "{") return cont(pushlex("}"), block, poplex); if (type == ";") return cont(); if (type == "if") { if (cx.state.lexical.info == "else" && cx.state.cc[cx.state.cc.length - 1] == poplex) cx.state.cc.pop()(); return cont(pushlex("form"), parenExpr, statement, poplex, maybeelse); } if (type == "function") return cont(functiondef); if (type == "for") return cont(pushlex("form"), forspec, statement, poplex); if (type == "variable") { if (isTS && value == "type") { cx.marked = "keyword" return cont(typeexpr, expect("operator"), typeexpr, expect(";")); } else { return cont(pushlex("stat"), maybelabel); } } if (type == "switch") return cont(pushlex("form"), parenExpr, expect("{"), pushlex("}", "switch"), block, poplex, poplex); if (type == "case") return cont(expression, expect(":")); if (type == "default") return cont(expect(":")); if (type == "catch") return cont(pushlex("form"), pushcontext, expect("("), funarg, expect(")"), statement, poplex, popcontext); if (type == "class") return cont(pushlex("form"), className, poplex); if (type == "export") return cont(pushlex("stat"), afterExport, poplex); if (type == "import") return cont(pushlex("stat"), afterImport, poplex); if (type == "module") return cont(pushlex("form"), pattern, expect("{"), pushlex("}"), block, poplex, poplex) if (type == "async") return cont(statement) if (value == "@") return cont(expression, statement) return pass(pushlex("stat"), expression, expect(";"), poplex); } function expression(type) { return expressionInner(type, false); } function expressionNoComma(type) { return expressionInner(type, true); } function parenExpr(type) { if (type != "(") return pass() return cont(pushlex(")"), expression, expect(")"), poplex) } function expressionInner(type, noComma) { if (cx.state.fatArrowAt == cx.stream.start) { var body = noComma ? arrowBodyNoComma : arrowBody; if (type == "(") return cont(pushcontext, pushlex(")"), commasep(pattern, ")"), poplex, expect("=>"), body, popcontext); else if (type == "variable") return pass(pushcontext, pattern, expect("=>"), body, popcontext); } var maybeop = noComma ? maybeoperatorNoComma : maybeoperatorComma; if (atomicTypes.hasOwnProperty(type)) return cont(maybeop); if (type == "function") return cont(functiondef, maybeop); if (type == "class") return cont(pushlex("form"), classExpression, poplex); if (type == "keyword c" || type == "async") return cont(noComma ? maybeexpressionNoComma : maybeexpression); if (type == "(") return cont(pushlex(")"), maybeexpression, expect(")"), poplex, maybeop); if (type == "operator" || type == "spread") return cont(noComma ? expressionNoComma : expression); if (type == "[") return cont(pushlex("]"), arrayLiteral, poplex, maybeop); if (type == "{") return contCommasep(objprop, "}", null, maybeop); if (type == "quasi") return pass(quasi, maybeop); if (type == "new") return cont(maybeTarget(noComma)); return cont(); } function maybeexpression(type) { if (type.match(/[;\}\)\],]/)) return pass(); return pass(expression); } function maybeexpressionNoComma(type) { if (type.match(/[;\}\)\],]/)) return pass(); return pass(expressionNoComma); } function maybeoperatorComma(type, value) { if (type == ",") return cont(expression); return maybeoperatorNoComma(type, value, false); } function maybeoperatorNoComma(type, value, noComma) { var me = noComma == false ? maybeoperatorComma : maybeoperatorNoComma; var expr = noComma == false ? expression : expressionNoComma; if (type == "=>") return cont(pushcontext, noComma ? arrowBodyNoComma : arrowBody, popcontext); if (type == "operator") { if (/\+\+|--/.test(value)) return cont(me); if (value == "?") return cont(expression, expect(":"), expr); return cont(expr); } if (type == "quasi") { return pass(quasi, me); } if (type == ";") return; if (type == "(") return contCommasep(expressionNoComma, ")", "call", me); if (type == ".") return cont(property, me); if (type == "[") return cont(pushlex("]"), maybeexpression, expect("]"), poplex, me); if (isTS && value == "as") { cx.marked = "keyword"; return cont(typeexpr, me) } } function quasi(type, value) { if (type != "quasi") return pass(); if (value.slice(value.length - 2) != "${") return cont(quasi); return cont(expression, continueQuasi); } function continueQuasi(type) { if (type == "}") { cx.marked = "string-2"; cx.state.tokenize = tokenQuasi; return cont(quasi); } } function arrowBody(type) { findFatArrow(cx.stream, cx.state); return pass(type == "{" ? statement : expression); } function arrowBodyNoComma(type) { findFatArrow(cx.stream, cx.state); return pass(type == "{" ? statement : expressionNoComma); } function maybeTarget(noComma) { return function(type) { if (type == ".") return cont(noComma ? targetNoComma : target); else return pass(noComma ? expressionNoComma : expression); }; } function target(_, value) { if (value == "target") { cx.marked = "keyword"; return cont(maybeoperatorComma); } } function targetNoComma(_, value) { if (value == "target") { cx.marked = "keyword"; return cont(maybeoperatorNoComma); } } function maybelabel(type) { if (type == ":") return cont(poplex, statement); return pass(maybeoperatorComma, expect(";"), poplex); } function property(type) { if (type == "variable") {cx.marked = "property"; return cont();} } function objprop(type, value) { if (type == "async") { cx.marked = "property"; return cont(objprop); } else if (type == "variable" || cx.style == "keyword") { cx.marked = "property"; if (value == "get" || value == "set") return cont(getterSetter); return cont(afterprop); } else if (type == "number" || type == "string") { cx.marked = jsonldMode ? "property" : (cx.style + " property"); return cont(afterprop); } else if (type == "jsonld-keyword") { return cont(afterprop); } else if (type == "modifier") { return cont(objprop) } else if (type == "[") { return cont(expression, expect("]"), afterprop); } else if (type == "spread") { return cont(expression, afterprop); } else if (type == ":") { return pass(afterprop) } } function getterSetter(type) { if (type != "variable") return pass(afterprop); cx.marked = "property"; return cont(functiondef); } function afterprop(type) { if (type == ":") return cont(expressionNoComma); if (type == "(") return pass(functiondef); } function commasep(what, end, sep) { function proceed(type, value) { if (sep ? sep.indexOf(type) > -1 : type == ",") { var lex = cx.state.lexical; if (lex.info == "call") lex.pos = (lex.pos || 0) + 1; return cont(function(type, value) { if (type == end || value == end) return pass() return pass(what) }, proceed); } if (type == end || value == end) return cont(); return cont(expect(end)); } return function(type, value) { if (type == end || value == end) return cont(); return pass(what, proceed); }; } function contCommasep(what, end, info) { for (var i = 3; i < arguments.length; i++) cx.cc.push(arguments[i]); return cont(pushlex(end, info), commasep(what, end), poplex); } function block(type) { if (type == "}") return cont(); return pass(statement, block); } function maybetype(type, value) { if (isTS) { if (type == ":") return cont(typeexpr); if (value == "?") return cont(maybetype); } } function typeexpr(type) { if (type == "variable") {cx.marked = "type"; return cont(afterType);} if (type == "string" || type == "number" || type == "atom") return cont(afterType); if (type == "{") return cont(pushlex("}"), commasep(typeprop, "}", ",;"), poplex, afterType) if (type == "(") return cont(commasep(typearg, ")"), maybeReturnType) } function maybeReturnType(type) { if (type == "=>") return cont(typeexpr) } function typeprop(type, value) { if (type == "variable" || cx.style == "keyword") { cx.marked = "property" return cont(typeprop) } else if (value == "?") { return cont(typeprop) } else if (type == ":") { return cont(typeexpr) } else if (type == "[") { return cont(expression, maybetype, expect("]"), typeprop) } } function typearg(type) { if (type == "variable") return cont(typearg) else if (type == ":") return cont(typeexpr) } function afterType(type, value) { if (value == "<") return cont(pushlex(">"), commasep(typeexpr, ">"), poplex, afterType) if (value == "|" || type == ".") return cont(typeexpr) if (type == "[") return cont(expect("]"), afterType) if (value == "extends") return cont(typeexpr) } function vardef() { return pass(pattern, maybetype, maybeAssign, vardefCont); } function pattern(type, value) { if (type == "modifier") return cont(pattern) if (type == "variable") { register(value); return cont(); } if (type == "spread") return cont(pattern); if (type == "[") return contCommasep(pattern, "]"); if (type == "{") return contCommasep(proppattern, "}"); } function proppattern(type, value) { if (type == "variable" && !cx.stream.match(/^\s*:/, false)) { register(value); return cont(maybeAssign); } if (type == "variable") cx.marked = "property"; if (type == "spread") return cont(pattern); if (type == "}") return pass(); return cont(expect(":"), pattern, maybeAssign); } function maybeAssign(_type, value) { if (value == "=") return cont(expressionNoComma); } function vardefCont(type) { if (type == ",") return cont(vardef); } function maybeelse(type, value) { if (type == "keyword b" && value == "else") return cont(pushlex("form", "else"), statement, poplex); } function forspec(type) { if (type == "(") return cont(pushlex(")"), forspec1, expect(")"), poplex); } function forspec1(type) { if (type == "var") return cont(vardef, expect(";"), forspec2); if (type == ";") return cont(forspec2); if (type == "variable") return cont(formaybeinof); return pass(expression, expect(";"), forspec2); } function formaybeinof(_type, value) { if (value == "in" || value == "of") { cx.marked = "keyword"; return cont(expression); } return cont(maybeoperatorComma, forspec2); } function forspec2(type, value) { if (type == ";") return cont(forspec3); if (value == "in" || value == "of") { cx.marked = "keyword"; return cont(expression); } return pass(expression, expect(";"), forspec3); } function forspec3(type) { if (type != ")") cont(expression); } function functiondef(type, value) { if (value == "*") {cx.marked = "keyword"; return cont(functiondef);} if (type == "variable") {register(value); return cont(functiondef);} if (type == "(") return cont(pushcontext, pushlex(")"), commasep(funarg, ")"), poplex, maybetype, statement, popcontext); if (isTS && value == "<") return cont(pushlex(">"), commasep(typeexpr, ">"), poplex, functiondef) } function funarg(type) { if (type == "spread") return cont(funarg); return pass(pattern, maybetype, maybeAssign); } function classExpression(type, value) { // Class expressions may have an optional name. if (type == "variable") return className(type, value); return classNameAfter(type, value); } function className(type, value) { if (type == "variable") {register(value); return cont(classNameAfter);} } function classNameAfter(type, value) { if (value == "<") return cont(pushlex(">"), commasep(typeexpr, ">"), poplex, classNameAfter) if (value == "extends" || value == "implements" || (isTS && type == ",")) return cont(isTS ? typeexpr : expression, classNameAfter); if (type == "{") return cont(pushlex("}"), classBody, poplex); } function classBody(type, value) { if (type == "variable" || cx.style == "keyword") { if ((value == "async" || value == "static" || value == "get" || value == "set" || (isTS && (value == "public" || value == "private" || value == "protected" || value == "readonly" || value == "abstract"))) && cx.stream.match(/^\s+[\w$\xa1-\uffff]/, false)) { cx.marked = "keyword"; return cont(classBody); } cx.marked = "property"; return cont(isTS ? classfield : functiondef, classBody); } if (type == "[") return cont(expression, expect("]"), isTS ? classfield : functiondef, classBody) if (value == "*") { cx.marked = "keyword"; return cont(classBody); } if (type == ";") return cont(classBody); if (type == "}") return cont(); if (value == "@") return cont(expression, classBody) } function classfield(type, value) { if (value == "?") return cont(classfield) if (type == ":") return cont(typeexpr, maybeAssign) if (value == "=") return cont(expressionNoComma) return pass(functiondef) } function afterExport(type, value) { if (value == "*") { cx.marked = "keyword"; return cont(maybeFrom, expect(";")); } if (value == "default") { cx.marked = "keyword"; return cont(expression, expect(";")); } if (type == "{") return cont(commasep(exportField, "}"), maybeFrom, expect(";")); return pass(statement); } function exportField(type, value) { if (value == "as") { cx.marked = "keyword"; return cont(expect("variable")); } if (type == "variable") return pass(expressionNoComma, exportField); } function afterImport(type) { if (type == "string") return cont(); return pass(importSpec, maybeMoreImports, maybeFrom); } function importSpec(type, value) { if (type == "{") return contCommasep(importSpec, "}"); if (type == "variable") register(value); if (value == "*") cx.marked = "keyword"; return cont(maybeAs); } function maybeMoreImports(type) { if (type == ",") return cont(importSpec, maybeMoreImports) } function maybeAs(_type, value) { if (value == "as") { cx.marked = "keyword"; return cont(importSpec); } } function maybeFrom(_type, value) { if (value == "from") { cx.marked = "keyword"; return cont(expression); } } function arrayLiteral(type) { if (type == "]") return cont(); return pass(commasep(expressionNoComma, "]")); } function isContinuedStatement(state, textAfter) { return state.lastType == "operator" || state.lastType == "," || isOperatorChar.test(textAfter.charAt(0)) || /[,.]/.test(textAfter.charAt(0)); } // Interface return { startState: function(basecolumn) { var state = { tokenize: tokenBase, lastType: "sof", cc: [], lexical: new JSLexical((basecolumn || 0) - indentUnit, 0, "block", false), localVars: parserConfig.localVars, context: parserConfig.localVars && {vars: parserConfig.localVars}, indented: basecolumn || 0 }; if (parserConfig.globalVars && typeof parserConfig.globalVars == "object") state.globalVars = parserConfig.globalVars; return state; }, token: function(stream, state) { if (stream.sol()) { if (!state.lexical.hasOwnProperty("align")) state.lexical.align = false; state.indented = stream.indentation(); findFatArrow(stream, state); } if (state.tokenize != tokenComment && stream.eatSpace()) return null; var style = state.tokenize(stream, state); if (type == "comment") return style; state.lastType = type == "operator" && (content == "++" || content == "--") ? "incdec" : type; return parseJS(state, style, type, content, stream); }, indent: function(state, textAfter) { if (state.tokenize == tokenComment) return CodeMirror.Pass; if (state.tokenize != tokenBase) return 0; var firstChar = textAfter && textAfter.charAt(0), lexical = state.lexical, top // Kludge to prevent 'maybelse' from blocking lexical scope pops if (!/^\s*else\b/.test(textAfter)) for (var i = state.cc.length - 1; i >= 0; --i) { var c = state.cc[i]; if (c == poplex) lexical = lexical.prev; else if (c != maybeelse) break; } while ((lexical.type == "stat" || lexical.type == "form") && (firstChar == "}" || ((top = state.cc[state.cc.length - 1]) && (top == maybeoperatorComma || top == maybeoperatorNoComma) && !/^[,\.=+\-*:?[\(]/.test(textAfter)))) lexical = lexical.prev; if (statementIndent && lexical.type == ")" && lexical.prev.type == "stat") lexical = lexical.prev; var type = lexical.type, closing = firstChar == type; if (type == "vardef") return lexical.indented + (state.lastType == "operator" || state.lastType == "," ? lexical.info + 1 : 0); else if (type == "form" && firstChar == "{") return lexical.indented; else if (type == "form") return lexical.indented + indentUnit; else if (type == "stat") return lexical.indented + (isContinuedStatement(state, textAfter) ? statementIndent || indentUnit : 0); else if (lexical.info == "switch" && !closing && parserConfig.doubleIndentSwitch != false) return lexical.indented + (/^(?:case|default)\b/.test(textAfter) ? indentUnit : 2 * indentUnit); else if (lexical.align) return lexical.column + (closing ? 0 : 1); else return lexical.indented + (closing ? 0 : indentUnit); }, electricInput: /^\s*(?:case .*?:|default:|\{|\})$/, blockCommentStart: jsonMode ? null : "/*", blockCommentEnd: jsonMode ? null : "*/", lineComment: jsonMode ? null : "//", fold: "brace", closeBrackets: "()[]{}''\"\"``", helperType: jsonMode ? "json" : "javascript", jsonldMode: jsonldMode, jsonMode: jsonMode, expressionAllowed: expressionAllowed, skipExpression: function(state) { var top = state.cc[state.cc.length - 1] if (top == expression || top == expressionNoComma) state.cc.pop() } }; }); CodeMirror.registerHelper("wordChars", "javascript", /[\w$]/); CodeMirror.defineMIME("text/javascript", "javascript"); CodeMirror.defineMIME("text/ecmascript", "javascript"); CodeMirror.defineMIME("application/javascript", "javascript"); CodeMirror.defineMIME("application/x-javascript", "javascript"); CodeMirror.defineMIME("application/ecmascript", "javascript"); CodeMirror.defineMIME("application/json", {name: "javascript", json: true}); CodeMirror.defineMIME("application/x-json", {name: "javascript", json: true}); CodeMirror.defineMIME("application/ld+json", {name: "javascript", jsonld: true}); CodeMirror.defineMIME("text/typescript", { name: "javascript", typescript: true }); CodeMirror.defineMIME("application/typescript", { name: "javascript", typescript: true }); });
apache-2.0
lei-xia/helix
helix-core/src/test/java/org/apache/helix/manager/zk/TestWtCacheSyncOpSingleThread.java
6774
package org.apache.helix.manager.zk; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.List; import org.apache.helix.AccessOption; import org.apache.helix.PropertyPathBuilder; import org.apache.helix.TestHelper; import org.apache.helix.zookeeper.datamodel.ZNRecord; import org.apache.helix.zookeeper.datamodel.ZNRecordUpdater; import org.apache.helix.ZkUnitTestBase; import org.testng.Assert; import org.testng.annotations.Test; public class TestWtCacheSyncOpSingleThread extends ZkUnitTestBase { @Test public void testHappyPathZkCacheBaseDataAccessor() { String className = TestHelper.getTestClassName(); String methodName = TestHelper.getTestMethodName(); String clusterName = className + "_" + methodName; System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); // init zkCacheDataAccessor String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901"); String extViewPath = PropertyPathBuilder.externalView(clusterName); ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient); baseAccessor.create(curStatePath, null, AccessOption.PERSISTENT); List<String> cachePaths = Arrays.asList(curStatePath, extViewPath); ZkCacheBaseDataAccessor<ZNRecord> accessor = new ZkCacheBaseDataAccessor<>(baseAccessor, null, cachePaths, null); boolean ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, true); Assert.assertTrue(ret, "wtCache doesn't match data on Zk"); // create 10 current states for (int i = 0; i < 10; i++) { String path = curStatePath + "/session_0/TestDB" + i; boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertTrue(success, "Should succeed in create: " + path); } // verify wtCache // TestHelper.printCache(accessor._wtCache); ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false); Assert.assertTrue(ret, "wtCache doesn't match data on Zk"); // update each current state 10 times, single thread for (int i = 0; i < 10; i++) { String path = curStatePath + "/session_0/TestDB" + i; for (int j = 0; j < 10; j++) { ZNRecord newRecord = new ZNRecord("TestDB" + i); newRecord.setSimpleField("" + j, "" + j); boolean success = accessor.update(path, new ZNRecordUpdater(newRecord), AccessOption.PERSISTENT); Assert.assertTrue(success, "Should succeed in update: " + path); } } // verify cache // TestHelper.printCache(accessor._wtCache._cache); ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false); Assert.assertTrue(ret, "wtCache doesn't match data on Zk"); // set 10 external views for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i); boolean success = accessor.set(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertTrue(success, "Should succeed in set: " + path); } // verify wtCache // accessor.printWtCache(); ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false); Assert.assertTrue(ret, "wtCache doesn't match data on Zk"); // get 10 external views for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i); ZNRecord record = accessor.get(path, null, 0); Assert.assertEquals(record.getId(), "TestDB" + i); } // getChildNames List<String> childNames = accessor.getChildNames(extViewPath, 0); // System.out.println(childNames); Assert.assertEquals(childNames.size(), 10, "Should contain only: TestDB0-9"); for (int i = 0; i < 10; i++) { Assert.assertTrue(childNames.contains("TestDB" + i)); } // exists for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901", "session_0", "TestDB" + i); Assert.assertTrue(accessor.exists(path, 0)); } deleteCluster(clusterName); System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); } @Test public void testCreateFailZkCacheBaseDataAccessor() { String className = TestHelper.getTestClassName(); String methodName = TestHelper.getTestMethodName(); String clusterName = className + "_" + methodName; System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); // init zkCacheDataAccessor String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901"); ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient); ZkCacheBaseDataAccessor<ZNRecord> accessor = new ZkCacheBaseDataAccessor<>(baseAccessor, null, Collections.singletonList(curStatePath), null); // create 10 current states for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertTrue(success, "Should succeed in create: " + path); } // create same 10 current states again, should fail for (int i = 0; i < 10; i++) { String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901", "session_1", "TestDB" + i); boolean success = accessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT); Assert.assertFalse(success, "Should fail in create due to NodeExists: " + path); } deleteCluster(clusterName); System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); } }
apache-2.0
objectos/core
io/io-legacy/src/main/java/br/com/objectos/way/io/SheetXlsApachePOI.java
2084
/* * Copyright 2013 Objectos, Fábrica de Software LTDA. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package br.com.objectos.way.io; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.List; import br.com.objectos.core.lang.Strings; /** * @author Marcio Endo */ class SheetXlsApachePOI<T> extends AbstractSheetXls implements SheetXls { private final TableWriter<T> delegate; private final List<T> entities; private String name; public SheetXlsApachePOI(TableWriter<T> delegate, List<T> entities) { this.delegate = delegate; this.entities = entities; } @Override public SheetXls named(String name) { this.name = name; return this; } @Override public byte[] toByteArray() throws IOException { POIWorkbook wb = new POIWorkbook(); apachePOI(wb); ByteArrayOutputStream stream = new ByteArrayOutputStream(); wb.write(stream); return stream.toByteArray(); } @Override public void writeTo(File file) throws IOException { POIWorkbook wb = new POIWorkbook(); apachePOI(wb); FileOutputStream stream = new FileOutputStream(file); wb.write(stream); } @Override void apachePOI(POIWorkbook wb) { POISheet sheet = Strings.isNullOrEmpty(name) ? wb.createSheet() : wb.createSheet(name); delegate.apachePOI(sheet); TableHeader header = delegate.getHeader(); header.apachePOI(sheet); TableBody body = delegate.getBody(); body.apachePOI(sheet, header.maxSize, entities); } }
apache-2.0
alefesouza/gdg-sp
Back-end/api/delete_users.php
1086
<?php /* * Copyright (C) 2017 Alefe Souza <contact@alefesouza.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use GDGSP\API\OneSignalAPI; use GDGSP\Database\DB; include("../index.php"); $meetup_ids = array("GDG-SP"); foreach($meetup_ids as $meetup_id) { DB::init($meetup_id); $return = OneSignalAPI::getDevices(); foreach($return->players as $player) { if($player->invalid_identifier) { $member_id = $player->tags->member_id ?: ""; if($member_id != "") { $db->deleteUser($member_id); break; } } } } ?>
apache-2.0
educask/EducaskCore
core/classes/MenuEngine.php
18223
<?php /** * User: Keegan Bailey * Date: 13/05/14 * Time: 9:53 AM */ class MenuEngine { /* Checking to see if the instance variable is holding onto to status engine object * and if it's not create one. */ private static $instance; public static function getInstance() { if (!isset(self::$instance)) { self::$instance = new MenuEngine(); } return self::$instance; } private $foundMenus; private $foundMenuItems; private function __construct() { $this->foundMenus = array(); $this->foundMenuItems = array(); } //region Get public function getMenu($inMenuID) { //get a single menu from the database based off of ID if (!is_numeric($inMenuID)) { return false; } if(isset($this->foundMenus[$inMenuID])) { return $this->foundMenus[$inMenuID]; } $database = Database::getInstance(); if (!$database->isConnected()) { return false; } $inMenuID = $database->escapeString($inMenuID); // get the menu specified $results = $database->getData("*", "menu", "menuID = {$inMenuID}"); if ($results === false) { return false; } if($results === null) { return false; } if (count($results) > 1) { return false; } $menuID = $results[0]['menuID']; $menuName = $results[0]['menuName']; $menuRegion = $results[0]['themeRegion']; $menuEnabled = !!$results[0]['enabled']; // convert to bool // get all top level menu items for that menu $itemResults = $database->getData("*", "menuItem", "menuID = {$menuID} AND parent = 0 ORDER BY weight"); // turn each top level into a menuItem object $menuItems = array(); foreach ($itemResults as $item) { $itemID = $item['menuItemID']; if (!$this->menuItemIsVisible($itemID, PAGE_TYPE, CurrentUser::getUserSession()->getRoleID())) { continue; } $menuItem = $this->getMenuItem($itemID); if($menuItem === false) { continue; } $menuItems[] = $menuItem; } $menu = new Menu($menuID, $menuName, $menuRegion, $menuItems, $menuEnabled); $this->foundMenus[$menuID] = $menu; return $menu; } public function getMenuItem($inMenuItemID) { //get a single menuItem from DB based off of ID if (!is_numeric($inMenuItemID)) { return false; } if(! $this->menuItemIsVisible($inMenuItemID, PAGE_TYPE, CurrentUser::getUserSession()->getRoleID())) { return false; } if(isset($this->foundMenuItems[$inMenuItemID])) { return $this->foundMenuItems[$inMenuItemID]; } $database = Database::getInstance(); if(! $database->isConnected()) { return false; } $inMenuItemID = $database->escapeString($inMenuItemID); // get all menu items for this menu $results = $database->getData("*", "menuItem", "menuItemID = {$inMenuItemID} ORDER BY weight"); if ($results === false) { return false; } if ($results === null) { return false; } if(count($results) > 1) { return false; } // are there children? $children = array(); $hasChildren = false; if ((int)$results[0]['hasChildren'] === 1) { $hasChildren = true; $children = $this->getMenuItemChildren($results[0]['menuItemID']); } if ($children === false) { $hasChildren = false; $children = array(); } // make a menuItem Object $menuItem = new MenuItem($results[0]['menuID'], $results[0]['menuItemID'], $results[0]['linkText'], new Link($results[0]['href']), $results[0]['weight'], $hasChildren, !!$results[0]['enabled'], $results[0]['parent'], $children); return $menuItem; } private function getMenuItemChildren($inID) { if (!is_numeric($inID)) { return false; } $database = Database::getInstance(); if(! $database->isConnected()) { return false; } $inID = $database->escapeString($inID); $results = $database->getData('*', 'menuItem', "parent = {$inID}"); if ($results === false) { return false; } if($results === null) { return false; } $children = array(); foreach ($results as $row) { $itemID = $row['menuItemID']; if (!$this->menuItemIsVisible($itemID, PAGE_TYPE, CurrentUser::getUserSession()->getRoleID())) { continue; } $children[] = $this->getMenuItem($itemID); } return $children; } public function setMenu(Menu $inMenu) { //takes in a menu object and updates DB $permissionEngine = PermissionEngine::getInstance(); if (!$permissionEngine->checkPermission("userCanEditMenus")) { return false; } $database = Database::getInstance(); if(! $database->isConnected()) { return false; } $menuID = $database->escapeString($inMenu->getID()); $menuName = $database->escapeString($inMenu->getName()); $themeRegion = $database->escapeString($inMenu->getThemeRegion()); $enabled = $inMenu->isEnabled(); if($enabled === true) { $enabled = 1; } else { $enabled = 0; } $results = $database->updateTable("menu", "menuName = '{$menuName}', themeRegion = '{$themeRegion}', enabled = {$enabled}", "menuID = {$menuID}"); if($results === false) { return false; } return true; } public function setMenuItem(MenuItem $inMenuItem) { $permissionEngine = PermissionEngine::getInstance(); if (!$permissionEngine->checkPermission("userCanEditMenuItems")) { return false; } $database = Database::getInstance(); if(! $database->isConnected()) { return false; } $menuItemID = $database->escapeString($inMenuItem->getID()); $menuID = $database->escapeString($inMenuItem->getMenuID()); $linkText = $database->escapeString($inMenuItem->getLinkText()); $linkHref = $database->escapeString($inMenuItem->getHref()->getRawHref()); $weight = $database->escapeString($inMenuItem->getWeight()); $parent = $database->escapeString($inMenuItem->getParent()); if($inMenuItem->hasChildren() === true) { $hasChildren = 1; } else { $hasChildren = 0; } if($inMenuItem->isEnabled() === true) { $enabled = 1; } else { $enabled = 0; } $results = $database->updateTable("menuItem", "menuID = {$menuID}, linkText = '{$linkText}', href = '{$linkHref}', weight = {$weight}, parent={$parent}, hasChildren = {$hasChildren}, enabled = {$enabled}", "menuItemID = {$menuItemID}"); if($results === false) { return false; } return true; } public function addMenu(Menu $inMenu) { $permissionEngine = PermissionEngine::getInstance(); if(! $permissionEngine->currentUserCanDo("userCanAddMenus")) { return false; } $database = Database::getInstance(); if(! $database->isConnected()) { return false; } $menuName = $database->escapeString($inMenu->getName()); $themeRegion = $database->escapeString($inMenu->getThemeRegion()); if($inMenu->isEnabled()) { $enabled = 1; } else { $enabled = 0; } $results = $database->insertData("menu", "'menuName', 'themeRegion', 'enabled'", "'{$menuName}', '{$themeRegion}', {$enabled}"); if($results === false) { return false; } return true; } public function addMenuItem(MenuItem $inMenuItem) { $permissionEngine = PermissionEngine::getInstance(); if (!$permissionEngine->currentUserCanDo("userCanAddMenuItems")) { return false; } $database = Database::getInstance(); if(! $database->isConnected()) { return false; } $menuID = $database->escapeString($inMenuItem->getMenuID()); $linkText = $database->escapeString($inMenuItem->getLinkText()); $linkHref = $database->escapeString($inMenuItem->getHref()->getRawHref()); $weight = $database->escapeString($inMenuItem->getWeight()); $parent = $database->escapeString($inMenuItem->getParent()); if($inMenuItem->hasChildren()) { $hasChildren = 1; } else { $hasChildren = 0; } if($inMenuItem->isEnabled()) { $enabled = 1; } else { $enabled = 0; } $results = $database->insertData("menuItem", "menuID, linkText, href, weight, hasChildren, enabled, parent", "{$menuID}, '{$linkText}', '{$linkHref}', {$weight}, {$hasChildren}, {$enabled}, {$parent}"); if($results === false) { return false; } return true; } public function deleteMenu($inMenuID) { if (!is_numeric($inMenuID)) { return false; } $permissionEngine = PermissionEngine::getInstance(); if (! $permissionEngine->currentUserCanDo("userCanDeleteMenus")) { return false; } $database = Database::getInstance(); if(! $database->isConnected()) { return false; } $inMenuID = $database->escapeString($inMenuID); $results = $database->removeData("menu", "menuID = {$inMenuID}"); if($results === false) { return false; } return true; } public function deleteMenuItem($inMenuItemID) { if (!is_numeric($inMenuItemID)) { return false; } $permissionEngine = PermissionEngine::getInstance(); if (! $permissionEngine->currentUserCanDo("userCanDeleteMenuItems")) { return false; } $database = Database::getInstance(); if(! $database->isConnected()) { return false; } $inMenuItemID = $database->escapeString($inMenuItemID); $results = $database->removeData("menuItem", "menuItemID = {$inMenuItemID}"); if($results === false) { return false; } return true; } private function menuItemIsVisible($menuItemID, $pageType, $roleID) { if (!is_numeric($menuItemID)) { return false; } if ($menuItemID < 1) { return false; } $database = Database::getInstance(); if (!$database->isConnected()) { return false; } $menuItemID = $database->escapeString($menuItemID); // check to see if it's in the visibility table $results = $database->getData('*', 'menuItemVisibility', 'menuItemID = ' . $menuItemID); //Default is to display the menu item unless specified. if ($results === null) { return true; } //Query failed. Play it safe and don't display the menu item. if ($results === false) { return false; } $comparators = array('referenceType' => '', 'referenceValue' => ''); $hookEngine = HookEngine::getInstance(); $comparators = $hookEngine->runFilter('menuItemVisibilityComparator', $comparators); $comparators[] = array('referenceType' => 'pageType', 'referenceValue' => $pageType); $comparators[] = array('referenceType' => 'role', 'referenceValue' => $roleID); $finalComparators = array(); foreach ($comparators as $comparator) { if (!isset($comparator['referenceType'])) { continue; } if (!isset($comparator['referenceValue'])) { continue; } if (isset($finalComparators[$comparator['referenceType']])) { continue; } $finalComparators[$comparator['referenceType']] = $comparator['referenceValue']; } $countOfDoNotDisplays = 0; $countOfDoDisplays = 0; foreach ($results as $rule) { if (!isset($finalComparators[$rule['referenceType']])) { continue; } //If the first character is an !, then negate the operation. if ($rule['referenceID'][0] === '!') { $vote = $this->menuItemVisibleNegate($rule, $finalComparators); if ($vote === -1) { $countOfDoNotDisplays += 1; continue; } if ($vote === 1) { $countOfDoDisplays += 1; continue; } //No vote on any other value. continue; } if ($finalComparators[$rule['referenceType']] != $rule['referenceID']) { continue; } if ((int)$rule['visible'] === 0) { $countOfDoNotDisplays += 1; continue; } $countOfDoDisplays += 1; } if ($countOfDoNotDisplays > $countOfDoDisplays) { return false; } return true; } private function menuItemVisibleNegate($rule, $finalComparators) { //Take the ! off $rule['referenceID'] = substr($rule['referenceID'], 1); if (!isset($finalComparators[$rule['referenceType']])) { return 0; } //Negate means vote for anything that does not match. Move on if it matches; don't vote. if ($finalComparators[$rule['referenceType']] == $rule['referenceID']) { return 0; } if ((int)$rule['visible'] === 0) { return -1; } return 1; } public function setMenuItemVisibility($inMenuItemID, $referenceID, $referenceType, $isVisible = false) { if (!is_numeric($inMenuItemID)) { return false; } if ($inMenuItemID < 1) { return false; } if (!is_bool($isVisible)) { return false; } $referenceID = preg_replace('/\s+/', '', strip_tags($referenceID)); $referenceType = preg_replace('/\s+/', '', strip_tags($referenceType)); $database = Database::getInstance(); if (!$database->isConnected()) { return false; } $referenceType = $database->escapeString($referenceType); $referenceID = $database->escapeString($referenceID); $inMenuItemID = $database->escapeString($inMenuItemID); $whereClause = "referenceID='{$referenceID}' AND referenceType='{$referenceType}' AND menuItemID={$inMenuItemID}"; $exists = $database->getData('ruleID', 'menuItemVisibility', $whereClause); if ($exists === false) { return false; } if ($exists != null) { return $this->insertNewVisibilityRule($inMenuItemID, $referenceID, $referenceType, $isVisible); } if ($isVisible === true) { $visible = 1; } else { $visible = 0; } $success = $database->updateTable('menuItemVisibility', "visibile={$visible}", $whereClause); if ($success === false) { return false; } if ($success === null) { return false; } return true; } private function insertNewVisibilityRule($inMenuItemID, $referenceID, $referenceType, $isVisible = false) { if (!is_numeric($inMenuItemID)) { return false; } if ($inMenuItemID < 1) { return false; } if (!is_bool($isVisible)) { return false; } $referenceID = preg_replace('/\s+/', '', strip_tags($referenceID)); $referenceType = preg_replace('/\s+/', '', strip_tags($referenceType)); $database = Database::getInstance(); if (!$database->isConnected()) { return false; } $referenceType = $database->escapeString($referenceType); $referenceID = $database->escapeString($referenceID); $inMenuItemID = $database->escapeString($inMenuItemID); if ($isVisible === true) { $visible = 1; } else { $visible = 0; } $success = $database->insertData('menuItemVisibility', 'referenceID, referenceType, visible, menuItemID', "'{$referenceID}', '{$referenceType}', {$visible}, {$inMenuItemID}"); if ($success === false) { return false; } if ($success === null) { return false; } return true; } public function deleteMenuItemVisibilityRule($inMenuItemID, $referenceID, $referenceType) { if (!is_numeric($inMenuItemID)) { return false; } if ($inMenuItemID < 1) { return false; } $referenceID = preg_replace('/\s+/', '', strip_tags($referenceID)); $referenceType = preg_replace('/\s+/', '', strip_tags($referenceType)); $database = Database::getInstance(); if (!$database->isConnected()) { return false; } $referenceType = $database->escapeString($referenceType); $referenceID = $database->escapeString($referenceID); $inMenuItemID = $database->escapeString($inMenuItemID); $success = $database->removeData('menuItemVisibility', "referenceID='{$referenceID}' AND referenceType='{$referenceType}' AND menuItemID={$inMenuItemID}"); if ($success === false) { return false; } if ($success === null) { return false; } return true; } }
apache-2.0
chris6k/tiyuzazhi
src/com/tiyuzazhi/app/CommentCenterActivity.java
2095
package com.tiyuzazhi.app; import android.app.Activity; import android.os.Bundle; import android.os.Handler; import android.os.Looper; import android.text.Html; import android.view.View; import android.widget.TextView; import com.tiyuzazhi.api.ArticleApi; import com.tiyuzazhi.beans.ExaminingArticle; import com.tiyuzazhi.utils.TPool; import com.tiyuzazhi.utils.ToastUtils; import java.util.concurrent.atomic.AtomicBoolean; /** * */ public class CommentCenterActivity extends Activity { private AtomicBoolean opLock; private Handler handler; private TextView opName; private TextView comment; private int articleId; @Override protected void onCreate(Bundle savedInstanceState) { setContentView(R.layout.author_center); super.onCreate(savedInstanceState); articleId = getIntent().getIntExtra("articleId", 0); View back = findViewById(R.id.backButton); back.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { finish(); } }); opLock = new AtomicBoolean(false); handler = new Handler(Looper.getMainLooper()); opName = (TextView) findViewById(R.id.opName); comment = (TextView) findViewById(R.id.comment); init(); } private void init() { TPool.post(new Runnable() { @Override public void run() { final ExaminingArticle examiningArticle = ArticleApi.loadArticle(articleId); if (examiningArticle != null) { handler.post(new Runnable() { @Override public void run() { opName.setText(examiningArticle.getOpName() + ":"); comment.setText(Html.fromHtml(examiningArticle.getComment())); } }); } else { ToastUtils.show("未能获取到文章,请稍后再试"); } } }); } }
apache-2.0
NewcastleComputingScience/student-outcome-accelerator
glance-core/shared/src/main/scala/uk/ac/ncl/la/soar/glance/eval/SurveyResponse.scala
4053
/** Default (Template) Project * * Copyright (c) 2017 Hugo Firth * Email: <me@hugofirth.com/> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.ac.ncl.la.soar.glance.eval import java.util.UUID import cats.implicits._ import io.circe.generic.auto._ import io.circe.syntax._ import io.circe.{Decoder, Encoder, Json, _} import uk.ac.ncl.la.soar._ import uk.ac.ncl.la.soar.glance.util.Time import uk.ac.ncl.la.soar.glance.util.Times._ import uk.ac.ncl.la.soar.glance.web.client.component.sortable.IndexChange /** * ADT representing an a survey which is completed by a member of staff. * * Note: Have to use Doubles for times because Javascript ... figure out a better solution to this. Longs better, Instants * preferable. Facade type on one side with implicit conversions? */ sealed trait SurveyResponse { def survey: Survey def simple: Ranking def detailed: Ranking def respondent: String def start: Double def id: UUID } /** * Simple structure representing Ranking info. Reified here because we store two sets of ranking info per survey * response. */ final case class Ranking(ranks: List[StudentNumber], rankHistory: List[(StudentNumber, IndexChange, Time)] = Nil) object SurveyResponse { def apply(survey: Survey, simple: Ranking, detailed: Ranking, respondent: String, start: Double, id: UUID): SurveyResponse = IncompleteResponse(survey, simple, detailed, respondent, start, id) /* Typeclass instances for SurveResponse */ implicit val encodeSurveyResponse: Encoder[SurveyResponse] = new Encoder[SurveyResponse] { final def apply(a: SurveyResponse): Json = Json.obj( "id" -> a.id.toString.asJson, "survey" -> a.survey.asJson, "simple" -> a.simple.asJson, "detailed" -> a.detailed.asJson, "respondent" -> a.respondent.asJson, "start" -> a.start.asJson, ) } implicit val decodeSurveyResponse: Decoder[SurveyResponse] = new Decoder[SurveyResponse] { override def apply(c: HCursor): Decoder.Result[SurveyResponse] = { for { id <- c.downField("id").as[String] d <- decodeIdLessResponse(c) } yield d(UUID.fromString(id)) } } //TODO: Work out why we couldn't get auto/semiauto to work for us here? implicit val decodeIdLessResponse: Decoder[UUID => SurveyResponse] = new Decoder[UUID => SurveyResponse] { override def apply(c: HCursor): Decoder.Result[UUID => SurveyResponse] = { for { survey <- c.downField("survey").as[Survey] simple <- c.downField("simple").as[Ranking] detailed <- c.downField("detailed").as[Ranking] respondent <- c.downField("respondent").as[String] start <- c.downField("start").as[Double] } yield { id: UUID => IncompleteResponse(survey, simple, detailed, respondent, start, id) } } } } case class IncompleteResponse(survey: Survey, simple: Ranking, detailed: Ranking, respondent: String, start: Double, id: UUID) extends SurveyResponse case class CompleteResponse(survey: Survey, simple: Ranking, detailed: Ranking, respondent: String, start: Double, finish: Double, id: UUID) extends SurveyResponse
apache-2.0
vjanmey/EpicMudfia
com/planet_ink/coffee_mud/Abilities/Spells/Spell_Fly.java
4062
package com.planet_ink.coffee_mud.Abilities.Spells; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.util.*; /* Copyright 2000-2014 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ @SuppressWarnings("rawtypes") public class Spell_Fly extends Spell { @Override public String ID() { return "Spell_Fly"; } private final static String localizedName = CMLib.lang()._("Fly"); @Override public String name() { return localizedName; } private final static String localizedStaticDisplay = CMLib.lang()._("(Fly spell)"); @Override public String displayText() { return localizedStaticDisplay; } @Override public int abstractQuality(){ return Ability.QUALITY_BENEFICIAL_OTHERS;} @Override protected int canAffectCode(){return CAN_MOBS|CAN_ITEMS;} @Override public int classificationCode(){ return Ability.ACODE_SPELL|Ability.DOMAIN_TRANSMUTATION;} @Override public long flags(){return Ability.FLAG_MOVING;} @Override public void affectPhyStats(Physical affected, PhyStats affectableStats) { super.affectPhyStats(affected,affectableStats); affectableStats.setDisposition(affectableStats.disposition()|PhyStats.IS_FLYING); } @Override public void unInvoke() { // undo the affects of this spell if(!(affected instanceof MOB)) { super.unInvoke(); return; } final MOB mob=(MOB)affected; super.unInvoke(); if(canBeUninvoked()) if((mob.location()!=null)&&(!mob.amDead())) mob.location().show(mob,null,CMMsg.MSG_OK_VISUAL,_("<S-NAME> begin(s) to float back down.")); } @Override public boolean invoke(MOB mob, Vector commands, Physical givenTarget, boolean auto, int asLevel) { final MOB target=getTarget(mob,commands,givenTarget); if(target==null) return false; // the invoke method for spells receives as // parameters the invoker, and the REMAINING // command line parameters, divided into words, // and added as String objects to a vector. if(!super.invoke(mob,commands,givenTarget,auto,asLevel)) return false; boolean success=proficiencyCheck(mob,0,auto); if(success) { // it worked, so build a copy of this ability, // and add it to the affects list of the // affected MOB. Then tell everyone else // what happened. invoker=mob; final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),auto?"":_("^S<S-NAME> cast(s) a spell on <T-NAMESELF>.^?")); if(mob.location().okMessage(mob,msg)) { mob.location().send(mob,msg); if(target.location()==mob.location()) { target.location().show(target,null,CMMsg.MSG_OK_ACTION,_("<S-NAME> start(s) to fly around!")); success=beneficialAffect(mob,target,asLevel,0); } } } else return beneficialWordsFizzle(mob,target,_("<S-NAME> cast(s) a spell on <T-NAMESELF>, but the magic fizzles.")); // return whether it worked return success; } }
apache-2.0
CargoWebServer/CargoWebServer
Project/src/code.myceliUs.com/CargoWebServer/Cargo/Server/GraphStore.go
74285
package Server import ( "bytes" "encoding/gob" "encoding/json" "errors" "io/ioutil" "log" "os" "reflect" "strings" "regexp" "strconv" "time" "code.myceliUs.com/CargoWebServer/Cargo/Entities/CargoEntities" "code.myceliUs.com/CargoWebServer/Cargo/Entities/Config" "code.myceliUs.com/CargoWebServer/Cargo/JS" "code.myceliUs.com/CargoWebServer/Cargo/QueryParser/ast" "code.myceliUs.com/CargoWebServer/Cargo/QueryParser/lexer" "code.myceliUs.com/CargoWebServer/Cargo/QueryParser/parser" "code.myceliUs.com/Utility" "github.com/xrash/smetrics" // Xapian datastore. base64 "encoding/base64" "sync" "code.myceliUs.com/GoXapian" "code.myceliUs.com/XML_Schemas" ) //////////////////////////////////////////////////////////////////////////////// // DataStore function //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Key value Data Store //////////////////////////////////////////////////////////////////////////////// type GraphStore struct { /** The store name **/ m_id string // In case of remote store m_conn *WebSocketConnection m_port int m_ipv4 string m_hostName string m_storeName string m_pwd string m_user string m_prototypes map[string]*EntityPrototype // In case of local sotre /** The store path **/ m_path string // Operation channels m_set_entity_channel chan interface{} m_get_entity_channel chan interface{} m_delete_entity_channel chan interface{} } func getServiceContainerConnection() *WebSocketConnection { var conn *WebSocketConnection var port int port = 9494 // Try to get it from the db... conn = GetServer().getConnectionByIp("127.0.0.1", port) return conn } func NewGraphStore(info *Config.DataStoreConfiguration) (store *GraphStore, err error) { store = new(GraphStore) store.m_id = info.M_id // Connection information. store.m_ipv4 = info.M_ipv4 store.m_port = info.M_port store.m_user = info.M_user store.m_pwd = info.M_pwd store.m_hostName = info.M_hostName store.m_storeName = info.M_storeName store.m_prototypes = make(map[string]*EntityPrototype, 0) // if the store is a local store. if store.m_ipv4 == "127.0.0.1" { store.m_path = GetServer().GetConfigurationManager().GetDataPath() + "/" + store.m_id if _, err := os.Stat(store.m_path); os.IsNotExist(err) { os.Mkdir(store.m_path, 0777) } } if err != nil { log.Println("open:", err) } // Here I will register all class in the vm. prototypes, err := store.GetEntityPrototypes() if err == nil { for i := 0; i < len(prototypes); i++ { // The script will be put in global context (CargoWebServer) JS.GetJsRuntimeManager().AppendScript("CargoWebServer/"+prototypes[i].TypeName, prototypes[i].generateConstructor(), false) } } // Open a db channel. store.m_set_entity_channel = make(chan interface{}) store.m_get_entity_channel = make(chan interface{}) store.m_delete_entity_channel = make(chan interface{}) // datastore operation presscessing function. go func(store *GraphStore) { // The readable datastore. wstores := make(map[string]xapian.WritableDatabase) // Keep store in memory. for { select { case op := <-store.m_set_entity_channel: values, _ := op.(*sync.Map).Load("values") if values.(map[string]interface{})["TYPENAME"] != nil { //LogInfo("---> save values ", toJsonStr(values)) typeName := values.(map[string]interface{})["TYPENAME"].(string) uuid := values.(map[string]interface{})["UUID"].(string) path := store.m_path + "/" + typeName + ".glass" if wstores[path] == nil { wstores[path] = xapian.NewWritableDatabase(path, xapian.DB_CREATE_OR_OPEN) } wstores[path].Begin_transaction() // So here I will index the property found in the entity. doc := xapian.NewDocument() // Keep json data... var data string data, err = Utility.ToJson(values) if err == nil && len(data) > 0 { store.indexEntity(doc, values.(map[string]interface{})) doc.Set_data(data) doc.Add_boolean_term("Q" + formalize(uuid)) wstores[path].Replace_document("Q"+formalize(uuid), doc) } wstores[path].Commit_transaction() // Release the document memory. xapian.DeleteDocument(doc) } done, _ := op.(*sync.Map).Load("done") done.(chan bool) <- true case op := <-store.m_delete_entity_channel: uuid := op.(map[string]interface{})["uuid"].(string) query := xapian.NewQuery("Q" + formalize(uuid)) path := store.m_path + "/" + uuid[0:strings.Index(uuid, "%")] + ".glass" if wstores[path] == nil { wstores[path] = xapian.NewWritableDatabase(path, xapian.DB_CREATE_OR_OPEN) } enquire := xapian.NewEnquire(wstores[path]) enquire.Set_query(query) mset := enquire.Get_mset(uint(0), uint(10000)) // Now I will process the results. for i := 0; i < mset.Size(); i++ { it := mset.Get_hit(uint(i)) doc := it.Get_document() // Remove the document wstores[path].Delete_document(doc.Get_docid()) xapian.DeleteDocument(doc) xapian.DeleteMSetIterator(it) } xapian.DeleteQuery(query) xapian.DeleteMSet(mset) xapian.DeleteEnquire(enquire) wstores[path].Flush() op.(map[string]interface{})["done"].(chan bool) <- true case op := <-store.m_get_entity_channel: queryString := op.(map[string]interface{})["queryString"].(string) typeName := op.(map[string]interface{})["typeName"].(string) fields := op.(map[string]interface{})["fields"].([]string) path := store.m_path + "/" + typeName + ".glass" // The results. results := make([][]interface{}, 0) if !Utility.Exists(path) { // Here no database was found. err = errors.New("Datastore " + path + " dosent exit!") } else { if len(queryString) > 0 { results, err = store.executeSearchQuery(queryString, fields) } else { typeNameIndex := generatePrefix(typeName, "TYPENAME") + formalize(typeName) query := xapian.NewQuery(typeNameIndex) results, err = store.runXapianQuery(typeName, query, fields) } if len(results) == 0 { err = errors.New("No results found!") } } op.(map[string]interface{})["results"].(chan []interface{}) <- []interface{}{results, err} } } }(store) return } ////////////////////////////////////////////////////////////////////////////////// // Synchronized operations. ////////////////////////////////////////////////////////////////////////////////// /** * Create or Save entity in it store. */ func (this *GraphStore) setEntity(entity Entity) { var values map[string]interface{} if reflect.TypeOf(entity).String() == "*Server.DynamicEntity" { values = entity.(*DynamicEntity).getValues() } else { values, _ = Utility.ToMap(entity) } op := new(sync.Map) op.Store("values", values) done := make(chan bool) op.Store("done", done) this.m_set_entity_channel <- op <-done } /** * Remove an entity from the datastore. */ func (this *GraphStore) deleteEntity(uuid string) { op := make(map[string]interface{}) op["uuid"] = uuid op["done"] = make(chan bool) this.m_delete_entity_channel <- op <-op["done"].(chan bool) } /** * Get entity or values from a datastore. */ func (this *GraphStore) getValues(queryString string, typeName string, fields []string) ([][]interface{}, error) { op := make(map[string]interface{}) op["queryString"] = queryString op["typeName"] = typeName op["fields"] = fields op["results"] = make(chan []interface{}) this.m_get_entity_channel <- op results := <-op["results"].(chan []interface{}) if results[1] != nil { return nil, results[1].(error) } return results[0].([][]interface{}), nil } /** * This function is use to create a new entity prototype and save it value. * in db. * It must be create once per type */ func (this *GraphStore) CreateEntityPrototype(prototype *EntityPrototype) error { if len(prototype.TypeName) == 0 { return errors.New("Entity prototype type name must contain a value!") } if this.m_ipv4 != "127.0.0.1" { // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function GetEntityPrototype(storeId, prototype){ return GetServer().GetEntityManager().CreateEntityPrototype(storeId, prototype, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "storeId" param1.Value = this.m_id param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "prototype" param2.Value = prototype // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // So here I will marchal the values from a json string and // initialyse the entity values from the values the contain. var results []map[string]interface{} json.Unmarshal(rspMsg.msg.Rsp.Results[0].DataBytes, &results) // Set the TYPENAME property here. results[0]["TYPENAME"] = "Server.EntityPrototype" value, err := Utility.InitializeStructure(results[0], setEntityFct) if err != nil { resultsChan <- err } else { resultsChan <- value.Interface().(*EntityPrototype) } } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan if reflect.TypeOf(results).String() == "*Server.EntityPrototype" { return nil } return results.(error) // return an error message instead. } // Here i will append super type fields... prototype.setSuperTypeFields() // Register it to the vm... JS.GetJsRuntimeManager().AppendScript("CargoWebServer", prototype.generateConstructor(), true) // Send event message... var eventDatas []*MessageData evtData := new(MessageData) evtData.TYPENAME = "Server.MessageData" evtData.Name = "prototype" evtData.Value = prototype eventDatas = append(eventDatas, evtData) evt, _ := NewEvent(NewPrototypeEvent, PrototypeEvent, eventDatas) GetServer().GetEventManager().BroadcastEvent(evt) if len(prototype.TypeName) == 0 { return errors.New("Entity prototype type name must contain a value!") } // I will serialyse the prototype. m := new(bytes.Buffer) enc := gob.NewEncoder(m) err := enc.Encode(prototype) if err != nil { log.Println("Prototype encode:", err) return err } if len(prototype.TypeName) == 0 { // The typeName cant be nil! panic(prototype) } // I will save the entity prototype in a file. if strings.HasPrefix(prototype.TypeName, this.GetId()) { file, err := os.Create(this.m_path + "/" + prototype.TypeName + ".gob") defer file.Close() if err == nil { encoder := gob.NewEncoder(file) encoder.Encode(prototype) } else { return err } this.m_prototypes[prototype.TypeName] = prototype } return nil } /** * Save an entity prototype. */ func (this *GraphStore) SaveEntityPrototype(prototype *EntityPrototype) error { if len(prototype.TypeName) == 0 { return errors.New("Entity prototype type name must contain a value!") } if this.m_ipv4 != "127.0.0.1" { // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function SaveEntityPrototype(storeId, prototype){ return GetServer().GetEntityManager().SaveEntityPrototype(storeId, prototype, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "storeId" param1.Value = this.m_id param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "prototype" param2.Value = prototype // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // So here I will marchal the values from a json string and // initialyse the entity values from the values the contain. var results []map[string]interface{} json.Unmarshal(rspMsg.msg.Rsp.Results[0].DataBytes, &results) // Set the TYPENAME property here. results[0]["TYPENAME"] = "Server.EntityPrototype" value, err := Utility.InitializeStructure(results[0], setEntityFct) if err != nil { resultsChan <- err } else { resultsChan <- value.Interface().(*EntityPrototype) } } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan if reflect.TypeOf(results).String() == "*Server.EntityPrototype" { return nil } return results.(error) // return an error message instead. } // Get the current entity prototype. prototype_, err := GetServer().GetEntityManager().getEntityPrototype(prototype.TypeName, this.m_id) if err != nil { return err } // I will serialyse the prototype. prototype.setSuperTypeFields() // I will remove it from substitution group as neeeded... for i := 0; i < len(prototype_.SuperTypeNames); i++ { if !Utility.Contains(prototype.SuperTypeNames, prototype_.SuperTypeNames[i]) { // Here I will remove the prototype from superType substitution group. superTypeName := prototype_.SuperTypeNames[i] superType, err := GetServer().GetEntityManager().getEntityPrototype(superTypeName, superTypeName[0:strings.Index(superTypeName, ".")]) if err != nil { return err } substitutionGroup := make([]string, 0) for j := 0; j < len(superType.SubstitutionGroup); j++ { if superType.SubstitutionGroup[j] != prototype_.TypeName { substitutionGroup = append(substitutionGroup, superType.SubstitutionGroup[j]) } } superType.SubstitutionGroup = substitutionGroup store := GetServer().GetDataManager().getDataStore(superTypeName[0:strings.Index(superTypeName, ".")]) err = store.SaveEntityPrototype(superType) if err != nil { return err } } } // Register it to the vm... JS.GetJsRuntimeManager().AppendScript("CargoWebServer/"+prototype.TypeName, prototype.generateConstructor(), true) file, err := os.Create(this.m_path + "/" + prototype.TypeName + ".gob") defer file.Close() if err == nil { encoder := gob.NewEncoder(file) encoder.Encode(prototype) } else { return err } this.m_prototypes[prototype.TypeName] = prototype var eventDatas []*MessageData evtData := new(MessageData) evtData.TYPENAME = "Server.MessageData" evtData.Name = "prototype" evtData.Value = prototype eventDatas = append(eventDatas, evtData) evt, _ := NewEvent(UpdatePrototypeEvent, PrototypeEvent, eventDatas) GetServer().GetEventManager().BroadcastEvent(evt) return nil } /** * Remove an entity prototype and all it releated values. */ func (this *GraphStore) DeleteEntityPrototype(typeName string) error { // In case of remote data store. if this.m_ipv4 != "127.0.0.1" { // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function DeleteEntityPrototype(typeName, storeId){ GetServer().GetEntityManager().DeleteEntityPrototype(typeName, storeId, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "typeName" param1.Value = typeName param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "storeId" param2.Value = this.m_id // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { LogInfo("---> entity protoype deleted!") // update success resultsChan <- nil } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan // in case of error if results != nil { if reflect.TypeOf(results).String() == "*string" { return errors.New(*results.(*string)) } } return nil } prototype := this.m_prototypes[typeName] // The prototype does not exist. if prototype == nil { // not exist so no need to be removed... return nil } // Remove substitution group from it parent. for i := 0; i < len(prototype.SuperTypeNames); i++ { storeId := prototype.SuperTypeNames[i][0:strings.Index(prototype.SuperTypeNames[i], ".")] superPrototype, err := GetServer().GetEntityManager().getEntityPrototype(prototype.SuperTypeNames[i], storeId) if err == nil { substitutionGroup := make([]string, 0) for j := 0; j < len(superPrototype.SubstitutionGroup); j++ { if superPrototype.SubstitutionGroup[j] != typeName { substitutionGroup = append(substitutionGroup, superPrototype.SubstitutionGroup[j]) } } // Save the prototype. superPrototype.SubstitutionGroup = substitutionGroup store := GetServer().GetDataManager().getDataStore(storeId) store.SaveEntityPrototype(superPrototype) } } // I will delete all entity... entities, _ := GetServer().GetEntityManager().getEntities(prototype.TypeName, this.m_id, nil) for i := 0; i < len(entities); i++ { entity := entities[i] // remove it... GetServer().GetEntityManager().deleteEntity(entity) } delete(this.m_prototypes, typeName) err := os.Remove(this.m_path + "/" + prototype.TypeName + ".gob") return err } /** * Remove all prototypes. */ func (this *GraphStore) DeleteEntityPrototypes() error { if this.m_ipv4 == "127.0.0.1" { for typeName, prototype := range this.m_prototypes { // Remove substitution group from it parent. for i := 0; i < len(prototype.SuperTypeNames); i++ { storeId := prototype.SuperTypeNames[i][0:strings.Index(prototype.SuperTypeNames[i], ".")] if storeId != this.m_id { superPrototype, err := GetServer().GetEntityManager().getEntityPrototype(prototype.SuperTypeNames[i], storeId) if err == nil { substitutionGroup := make([]string, 0) for j := 0; j < len(superPrototype.SubstitutionGroup); j++ { if superPrototype.SubstitutionGroup[j] != typeName { substitutionGroup = append(substitutionGroup, superPrototype.SubstitutionGroup[j]) } } // Save the prototype. superPrototype.SubstitutionGroup = substitutionGroup store := GetServer().GetDataManager().getDataStore(storeId) store.SaveEntityPrototype(superPrototype) } } } // Remove the entity from the cache and send delete event. entities, _ := GetServer().GetEntityManager().getEntities(typeName, this.m_id, nil) for i := 0; i < len(entities); i++ { entity := entities[i] // remove it from the cache... if len(entity.GetParentUuid()) > 0 { if !strings.HasPrefix(entity.GetParentUuid(), this.m_id) { // I will get the parent uuid link. parent, err := GetServer().GetEntityManager().getEntityByUuid(entity.GetParentUuid()) if err != nil { return errors.New(err.GetBody()) } // Here I will remove it from it parent... // Get values as map[string]interface{} and also set the entity in it parent. if reflect.TypeOf(entity).String() == "*Server.DynamicEntity" { parent.(*DynamicEntity).removeValue(entity.GetParentLnk(), entity.GetUuid()) } else { removeMethode := strings.Replace(entity.GetParentLnk(), "M_", "", -1) removeMethode = "Remove" + strings.ToUpper(removeMethode[0:1]) + removeMethode[1:] params := make([]interface{}, 1) params[0] = entity _, err_ := Utility.CallMethod(parent, removeMethode, params) if err_ != nil { cargoError := NewError(Utility.FileLine(), ATTRIBUTE_NAME_DOESNT_EXIST_ERROR, SERVER_ERROR_CODE, err_.(error)) return errors.New(cargoError.GetBody()) } } // Update the parent here. var eventDatas []*MessageData evtData := new(MessageData) evtData.TYPENAME = "Server.MessageData" evtData.Name = "entity" if reflect.TypeOf(parent).String() == "*Server.DynamicEntity" { evtData.Value = parent.(*DynamicEntity).getValues() } else { evtData.Value = parent } eventDatas = append(eventDatas, evtData) evt, _ := NewEvent(UpdateEntityEvent, EntityEvent, eventDatas) GetServer().GetEventManager().BroadcastEvent(evt) } } GetServer().GetEntityManager().removeEntity(entity) // Send event message... var eventDatas []*MessageData evtData := new(MessageData) evtData.TYPENAME = "Server.MessageData" evtData.Name = "entity" if reflect.TypeOf(entity).String() == "*Server.DynamicEntity" { evtData.Value = entity.(*DynamicEntity).getValues() } else { evtData.Value = entity } eventDatas = append(eventDatas, evtData) evt, _ := NewEvent(DeleteEntityEvent, EntityEvent, eventDatas) GetServer().GetEventManager().BroadcastEvent(evt) } } // Remove all prototypes from the map. for typeName, _ := range this.m_prototypes { delete(this.m_prototypes, typeName) } } return nil } //////////////////////////////////////////////////////////////////////////////// // DataStore function //////////////////////////////////////////////////////////////////////////////// /** * This function is use to retreive an existing entity prototype... */ func (this *GraphStore) GetEntityPrototype(typeName string) (*EntityPrototype, error) { if len(typeName) == 0 { return nil, errors.New("Entity prototype type name must contain a value!") } // Here the store is not a local, so I will use a remote call to get the // list of it entity prototypes. if this.m_ipv4 != "127.0.0.1" { // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function GetEntityPrototype(typeName, storeId){ return GetServer().GetEntityManager().GetEntityPrototype(typeName, storeId, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "typeName" param1.Value = typeName param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "storeId" param2.Value = this.m_id // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // So here I will marchal the values from a json string and // initialyse the entity values from the values the contain. var results []map[string]interface{} json.Unmarshal(rspMsg.msg.Rsp.Results[0].DataBytes, &results) // Set the TYPENAME property here. results[0]["TYPENAME"] = "Server.EntityPrototype" value, err := Utility.InitializeStructure(results[0], setEntityFct) if err != nil { resultsChan <- err } else { resultsChan <- value.Interface().(*EntityPrototype) } } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan if reflect.TypeOf(results).String() == "*Server.EntityPrototype" { return results.(*EntityPrototype), nil } return nil, results.(error) // return an error message instead. } if this.m_prototypes[typeName] != nil { return this.m_prototypes[typeName], nil } else { // Local store stuff... var prototype *EntityPrototype prototype = new(EntityPrototype) file, err := os.Open(this.m_path + "/" + typeName + ".gob") defer file.Close() if err == nil { decoder := gob.NewDecoder(file) err = decoder.Decode(prototype) } else { file, err = os.Open(this.m_path + "/" + typeName + "_impl.gob") if err == nil { decoder := gob.NewDecoder(file) err = decoder.Decode(prototype) } } if err != nil { //log.Panicln("---> ", typeName, err) return nil, err } this.m_prototypes[typeName] = prototype return prototype, err } } /** * Retreive the list of all entity prototype in a given store. */ func (this *GraphStore) GetEntityPrototypes() ([]*EntityPrototype, error) { var prototypes []*EntityPrototype // Here the store is not a local, so I will use a remote call to get the // list of it entity prototypes. if this.m_ipv4 == "" { this.m_ipv4 = "127.0.0.1" } if this.m_ipv4 != "127.0.0.1" { if !this.m_conn.IsOpen() { err := this.Connect() if err != nil { return nil, err } } // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function GetEntityPrototypes(storeId){ return GetServer().GetEntityManager().GetEntityPrototypes(storeId, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "storeId" param1.Value = this.m_id // Append the params. params = append(params, param0) params = append(params, param1) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // So here I will marchal the values from a json string and // initialyse the entity values from the values the contain. var results [][]map[string]interface{} var prototypes []*EntityPrototype json.Unmarshal(rspMsg.msg.Rsp.Results[0].DataBytes, &results) for i := 0; i < len(results[0]); i++ { // Set the TYPENAME property here. results[0][i]["TYPENAME"] = "Server.EntityPrototype" values, err := Utility.InitializeStructure(results[0][i], setEntityFct) if err == nil { prototypes = append(prototypes, values.Interface().(*EntityPrototype)) } } resultsChan <- prototypes } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan if reflect.TypeOf(results).String() == "[]*Server.EntityPrototype" { return results.([]*EntityPrototype), nil } return prototypes, errors.New(*results.(*string)) // return an error message instead. } // Get prototypes from files. files, err := ioutil.ReadDir(this.m_path) if err != nil { return nil, err } for _, info := range files { if strings.HasSuffix(info.Name(), ".gob") { if err == nil { prototype, err := this.GetEntityPrototype(strings.Split(info.Name(), ".gob")[0]) if err == nil { prototypes = append(prototypes, prototype) } } } } return prototypes, nil } /** * Return the name of a store. */ func (this *GraphStore) GetId() string { return this.m_id } // TODO validate the user and password here... func (this *GraphStore) Connect() error { if this.m_ipv4 != "127.0.0.1" { // I will not try to connect if a connection already exist. if this.m_conn != nil { if this.m_conn.IsOpen() { return nil } } // Here I will connect to a remote server. var err error this.m_conn, err = GetServer().connect(this.m_ipv4, this.m_port) if err != nil { return err } // Here I will use the user and password in the connection to validate // that the user can get data from the store. // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function Login(accountName, psswd, serverId){ return GetServer().GetSessionManager().Login(accountName, psswd, serverId, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "accountName" param1.Value = this.m_user param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "psswd" param2.Value = this.m_pwd param3 := new(MessageData) param3.TYPENAME = "Server.MessageData" param3.Name = "serverId" param3.Value = this.m_hostName // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) params = append(params, param3) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // So here I will marchal the values from a json string and // initialyse the entity values from the values the contain. var results []map[string]interface{} json.Unmarshal(rspMsg.msg.Rsp.Results[0].DataBytes, &results) if results[0] == nil { resultsChan <- "Fail to open session!" return } results[0]["TYPENAME"] = "CargoEntities.Session" values, err := Utility.InitializeStructure(results[0], setEntityFct) if err == nil { resultsChan <- values.Interface().(*CargoEntities.Session) } else { resultsChan <- err.Error() // send the error instead... } } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan if reflect.TypeOf(results).String() != "*CargoEntities.Session" { return errors.New(*results.(*string)) // return an error message instead. } } return nil } /** * Help to know if a store is connect or existing... */ func (this *GraphStore) Ping() error { if this.m_ipv4 != "127.0.0.1" { if this.m_conn != nil { if !this.m_conn.IsOpen() { err := this.Connect() if err != nil { return err } } } // Call ping on the distant server. id := Utility.RandomUUID() method := "Ping" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // So here I will marchal the values from a json string and // initialyse the entity values from the values the contain. resultsChan <- string(rspMsg.msg.Rsp.Results[0].DataBytes) } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan if reflect.TypeOf(results).String() != "string" { return errors.New(*results.(*string)) // return an error message instead. } return nil } // Local store ping... path := GetServer().GetConfigurationManager().GetDataPath() + "/" + this.GetId() _, err := os.Stat(path) return err } /** * Create a new entry in the database. */ func (this *GraphStore) Create(queryStr string, values []interface{}) (lastId interface{}, err error) { if this.m_ipv4 != "127.0.0.1" { if this.m_conn != nil { if !this.m_conn.IsOpen() { err := this.Connect() if err != nil { return nil, err } } } // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function CreateData(storeId, query, data){ return GetServer().GetDataManager().Create(storeId, query, data, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "storeId" param1.Value = this.m_id param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "query" param2.Value = queryStr param3 := new(MessageData) param3.TYPENAME = "Server.MessageData" param3.Name = "data" param3.Value = values // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) params = append(params, param3) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // So here I will marchal the values from a json string and resultsChan <- string(rspMsg.msg.Rsp.Results[0].DataBytes) // Return the last created id if there is some. } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan // in case of error if reflect.TypeOf(results).String() == "*string" { return -1, errors.New(*results.(*string)) } return results, nil } // Creation of entity for i := 0; i < len(values); i++ { v := values[i] // If the value is a dynamic entity... if reflect.TypeOf(v).Kind() == reflect.Ptr || reflect.TypeOf(v).Kind() == reflect.Struct || reflect.TypeOf(v).Kind() == reflect.Map { this.setEntity(v.(Entity)) } } return } /** * Get the value list... */ func (this *GraphStore) Read(queryStr string, fieldsType []interface{}, params []interface{}) (results [][]interface{}, err error) { if this.m_ipv4 != "127.0.0.1" { if this.m_conn != nil { if !this.m_conn.IsOpen() { err := this.Connect() if err != nil { return nil, err } } } // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function ReadData(storeId, query, fieldsType, parameters){ return GetServer().GetDataManager().Read(storeId, query, fieldsType, parameters, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "storeId" param1.Value = this.m_id param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "query" param2.Value = queryStr param3 := new(MessageData) param3.TYPENAME = "Server.MessageData" param3.Name = "fieldsType" param3.Value = fieldsType param4 := new(MessageData) param4.TYPENAME = "Server.MessageData" param4.Name = "parameters" param4.Value = params // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) params = append(params, param3) params = append(params, param4) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // So here I will marchal the values from a json string and // initialyse the entity values from the values the contain. var results [][][]interface{} // Tree dimension array of values err := json.Unmarshal(rspMsg.msg.Rsp.Results[0].DataBytes, &results) if err != nil { resultsChan <- err return } resultsChan <- results[0] // the first element contain the results. } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan // in case of error if reflect.TypeOf(results).String() == "error" { return nil, results.(error) // return an error message instead. } else if reflect.TypeOf(results).String() == "*string" { return nil, errors.New(*results.(*string)) } return results.([][]interface{}), nil } // First of all i will init the query... var query EntityQuery err = json.Unmarshal([]byte(queryStr), &query) if err != nil { return nil, err } results, err = this.getValues(query.Query, query.TypeName, query.Fields) return } /** * Update a entity value. * TODO think about a cute way to modify part of the entity and not the whole thing... */ func (this *GraphStore) Update(queryStr string, values []interface{}, params []interface{}) (err error) { // Remote server. if this.m_ipv4 != "127.0.0.1" { if this.m_conn != nil { if !this.m_conn.IsOpen() { err := this.Connect() if err != nil { return err } } } // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function UpdateData(storeId, query, fields, parameters){ return GetServer().GetDataManager().Update(storeId, query, fields, parameters, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "storeId" param1.Value = this.m_id param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "query" param2.Value = queryStr param3 := new(MessageData) param3.TYPENAME = "Server.MessageData" param3.Name = "fields" param3.Value = values param4 := new(MessageData) param4.TYPENAME = "Server.MessageData" param4.Name = "parameters" param4.Value = params // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) params = append(params, param3) params = append(params, param4) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // update success resultsChan <- nil } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan // in case of error if results != nil { if reflect.TypeOf(results).String() == "*string" { return errors.New(*results.(*string)) } } return nil } // The value to be save. for i := 0; i < len(values); i++ { v := values[i] // If the value is an entity... if reflect.TypeOf(v).Kind() == reflect.Ptr || reflect.TypeOf(v).Kind() == reflect.Struct || reflect.TypeOf(v).Kind() == reflect.Map { this.setEntity(v.(Entity)) } } return } /** * Delete entity from the store... */ func (this *GraphStore) Delete(queryStr string, values []interface{}) (err error) { // Remote server. if this.m_ipv4 != "127.0.0.1" { if this.m_conn != nil { if !this.m_conn.IsOpen() { err := this.Connect() if err != nil { return err } } } // I will use execute JS function to get the list of entity prototypes. id := Utility.RandomUUID() method := "ExecuteJsFunction" params := make([]*MessageData, 0) to := make([]*WebSocketConnection, 1) to[0] = this.m_conn param0 := new(MessageData) param0.TYPENAME = "Server.MessageData" param0.Name = "functionSrc" param0.Value = `function UpdateData(storeId, query, parameters){ return GetServer().GetDataManager().Delete(storeId, query, parameters, sessionId, messageId) }` param1 := new(MessageData) param1.TYPENAME = "Server.MessageData" param1.Name = "storeId" param1.Value = this.m_id param2 := new(MessageData) param2.TYPENAME = "Server.MessageData" param2.Name = "query" param2.Value = queryStr param3 := new(MessageData) param3.TYPENAME = "Server.MessageData" param3.Name = "parameters" param3.Value = params // Append the params. params = append(params, param0) params = append(params, param1) params = append(params, param2) params = append(params, param3) // The channel will be use to wait for results. resultsChan := make(chan interface{}) // The success callback. successCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(rspMsg *message, caller interface{}) { // update success resultsChan <- nil } }(resultsChan) // The error callback. errorCallback := func(resultsChan chan interface{}) func(*message, interface{}) { return func(errMsg *message, caller interface{}) { resultsChan <- errMsg.msg.Err.Message } }(resultsChan) rqst, _ := NewRequestMessage(id, method, params, to, successCallback, nil, errorCallback, nil) go func(rqst *message) { GetServer().getProcessor().m_sendRequest <- rqst }(rqst) // wait for result here. results := <-resultsChan // in case of error if results != nil { if reflect.TypeOf(results).String() == "*string" { return errors.New(*results.(*string)) } } return nil } // Remove the list of obsolete triples from the datastore. for i := 0; i < len(values); i++ { toDelete := values[i] if reflect.TypeOf(toDelete).Kind() == reflect.String { if Utility.IsValidEntityReferenceName(toDelete.(string)) { this.deleteEntity(toDelete.(string)) } } } return } /** * Close the backend store. */ func (this *GraphStore) Close() error { // Remote server. if this.m_ipv4 != "127.0.0.1" { // Close the connection. if this.m_conn != nil { this.m_conn.Close() } return nil } return nil } //////////////////////////////////////////////////////////////////////////////// // Search functionality. //////////////////////////////////////////////////////////////////////////////// // Generate prefix is use to create a indexation key for a given document. // the field must be in the index or id's. func generatePrefix(typeName string, field string) string { // remove the M_ part of the field name prefix := typeName if len(field) > 0 { prefix += "." + field } // replace unwanted character's prefix = strings.Replace(prefix, ".", "_", -1) + "%" prefix = "X" + strings.ToLower(prefix) return prefix } // Remove ambiquous query symbols % - . and replace it with _ func formalize(uuid string) string { return strings.TrimSpace(strings.ToLower(Utility.ToString(strings.Replace(strings.Replace(strings.Replace(uuid, "-", "_", -1), ".", "_", -1), "%", "_", -1)))) } // Index entity string field. func (this *GraphStore) indexStringField(data string, field string, typeName string, termGenerator xapian.TermGenerator) { // I will index all string field to be able to found it back latter. termGenerator.Index_text(strings.ToLower(data), uint(1), strings.ToUpper(field)) if Utility.IsUriBase64(data) { data_, err := base64.StdEncoding.DecodeString(data) if err == nil { if strings.Index(data, ":text/") > -1 || strings.Index(data, ":application/") > -1 { termGenerator.Index_text(strings.ToLower(string(data_))) } } } else if Utility.IsStdBase64(data) { data_, err := base64.StdEncoding.DecodeString(data) if err == nil { termGenerator.Index_text(strings.ToLower(string(data_))) if field != "M_data" { termGenerator.Index_text(strings.ToLower(string(data))) } } } else { termGenerator.Index_text(strings.ToLower(data)) } } // Index entity field func (this *GraphStore) indexField(data interface{}, field string, fieldType string, typeName string, termGenerator xapian.TermGenerator, doc xapian.Document, index int) { // This will give possibility to search for given fields. if data != nil { if reflect.TypeOf(data).Kind() == reflect.Slice { str_, err := Utility.ToJson(data) if err == nil { s := reflect.ValueOf(data) for i := 0; i < s.Len(); i++ { // I will remove nil values. item := s.Index(i) if item.IsValid() { zeroValue := reflect.Zero(item.Type()) if zeroValue != item { this.indexField(s.Index(i).Interface(), field, fieldType, typeName, termGenerator, doc, -1) } else { this.indexField(nil, field, fieldType, typeName, termGenerator, doc, -1) } } } doc.Add_value(uint(index), Utility.ToString(str_)) } else { doc.Add_value(uint(index), "null") } } else { if index != -1 { doc.Add_value(uint(index), Utility.ToString(data)) } if (XML_Schemas.IsXsNumeric(fieldType) || XML_Schemas.IsXsInt(fieldType)) && index != -1 { value := Utility.ToNumeric(data) doc.Add_value(uint(index), xapian.Sortable_serialise(value)) } else if reflect.TypeOf(data).Kind() == reflect.String { str := Utility.ToString(data) // If the the value is a valid entity reference i I will use boolean term. if Utility.IsValidEntityReferenceName(str) { term := generatePrefix(typeName, field) + formalize(str) doc.Add_boolean_term(term) } else { this.indexStringField(str, field, typeName, termGenerator) } } } } else { doc.Add_value(uint(index), "null") } } // index entity information. func (this *GraphStore) indexEntity(doc xapian.Document, values map[string]interface{}) { // The term generator termGenerator := xapian.NewTermGenerator() // set english by default. stemmer := xapian.NewStem("en") termGenerator.Set_stemmer(stemmer) termGenerator.Set_document(doc) // Regular text indexation... termGenerator.Index_text(values["TYPENAME"].(string), uint(1), "TYPENAME") // Boolean term indexation exact match. typeNameIndex := generatePrefix(values["TYPENAME"].(string), "TYPENAME") + formalize(values["TYPENAME"].(string)) doc.Add_boolean_term(typeNameIndex) prototype, _ := this.GetEntityPrototype(values["TYPENAME"].(string)) // also index value supertype... for i := 0; i < len(prototype.SuperTypeNames); i++ { termGenerator.Index_text(prototype.SuperTypeNames[i], uint(1), "TYPENAME") typeNameIndex := generatePrefix(prototype.SuperTypeNames[i], "TYPENAME") + formalize(prototype.SuperTypeNames[i]) doc.Add_boolean_term(typeNameIndex) } // Here I will append boolean term. for i := 0; i < len(prototype.Fields); i++ { // Index the value. var value interface{} value = values[prototype.Fields[i]] if Utility.Contains(prototype.Ids, prototype.Fields[i]) || Utility.Contains(prototype.Indexs, prototype.Fields[i]) { // Index the unique value index for the typeName and this field. if value != nil { term := generatePrefix(prototype.TypeName, prototype.Fields[i]) + formalize(Utility.ToString(value)) doc.Add_boolean_term(term) } } index := prototype.getFieldIndex(prototype.Fields[i]) this.indexField(value, prototype.Fields[i], prototype.FieldsType[i], prototype.TypeName, termGenerator, doc, index) } xapian.DeleteStem(stemmer) xapian.DeleteTermGenerator(termGenerator) } //////////////////////////////////////////////////////////////////////////////// // Search functionality. //////////////////////////////////////////////////////////////////////////////// /** * Merge tow results in one... */ func (this *GraphStore) merge(r1 map[string]map[string]interface{}, r2 map[string]map[string]interface{}) map[string]map[string]interface{} { for k, v := range r1 { r2[k] = v } return r2 } /** * Evaluate an expression. */ func (this *GraphStore) evaluate(typeName string, fieldName string, comparator string, expected interface{}, value interface{}) (bool, error) { isMatch := false // if the value is nil i will automatically return if value == nil { return isMatch, nil } prototype, err := this.GetEntityPrototype(typeName) if err != nil { return false, err } // The type name. fieldType := prototype.FieldsType[prototype.getFieldIndex(fieldName)] fieldType = strings.Replace(fieldType, "[]", "", -1) // here for the date I will get it unix time value... if fieldType == "xs.date" || fieldType == "xs.dateTime" { expectedDateValue, err := Utility.MatchISO8601_Date(expected.(string)) if err == nil { dateValue, _ := Utility.MatchISO8601_Date(value.(string)) if fieldType == "xs.dateTime" { expected = expectedDateValue.Unix() // get the unix resultstime for calcul value = dateValue.Unix() // get the unix time for calcul } else { expected = expectedDateValue.Truncate(24 * time.Hour).Unix() // get the unix time for calcul value = dateValue.Truncate(24 * time.Hour).Unix() // get the unix time for calcul } } else { // I will try with data time instead. expectedDateValue, err := Utility.MatchISO8601_DateTime(expected.(string)) if err == nil { dateValue, _ := Utility.MatchISO8601_DateTime(value.(string)) if fieldType == "xs.dateTime" { expected = expectedDateValue.Unix() // get the unix time for calcul value = dateValue.Unix() // get the unix time for calcul } else { expected = expectedDateValue.Truncate(24 * time.Hour).Unix() // get the unix time for calcul value = dateValue.Truncate(24 * time.Hour).Unix() // get the unix time for calcul } } else { return false, err } } } if comparator == "==" { // Equality comparator. // Case of string type. if reflect.TypeOf(expected).Kind() == reflect.String && reflect.TypeOf(value).Kind() == reflect.String { isRegex := strings.HasPrefix(expected.(string), "/") && strings.HasSuffix(expected.(string), "/") if isRegex { // here I will try to match the regular expression. var err error isMatch, err = regexp.MatchString(expected.(string)[1:len(expected.(string))-1], value.(string)) if err != nil { return false, err } } else { isMatch = Utility.RemoveAccent(expected.(string)) == Utility.RemoveAccent(value.(string)) } } else if reflect.TypeOf(expected).Kind() == reflect.Bool && reflect.TypeOf(value).Kind() == reflect.Bool { return expected.(bool) == value.(bool), nil } else if reflect.TypeOf(expected).Kind() == reflect.Int64 && reflect.TypeOf(value).Kind() == reflect.Int64 { return expected.(int64) == value.(int64), nil } else if reflect.TypeOf(expected).Kind() == reflect.Float64 && reflect.TypeOf(value).Kind() == reflect.Float64 { return expected.(float64) == value.(float64), nil } } else if comparator == "~=" { // Approximation comparator, string only... // Case of string types. if reflect.TypeOf(expected).Kind() == reflect.String && reflect.TypeOf(value).Kind() == reflect.String { distance := smetrics.JaroWinkler(Utility.RemoveAccent(expected.(string)), Utility.RemoveAccent(value.(string)), 0.7, 4) isMatch = distance >= .85 } else { return false, errors.New("Operator ~= can be only used with strings.") } } else if comparator == "!=" { // Equality comparator. // Case of string type. if reflect.TypeOf(expected).Kind() == reflect.String && reflect.TypeOf(value).Kind() == reflect.String { isMatch = Utility.RemoveAccent(expected.(string)) != Utility.RemoveAccent(value.(string)) } else if reflect.TypeOf(expected).Kind() == reflect.Bool && reflect.TypeOf(value).Kind() == reflect.Bool { return expected.(bool) != value.(bool), nil } else if reflect.TypeOf(expected).Kind() == reflect.Int64 && reflect.TypeOf(value).Kind() == reflect.Int64 { return expected.(int64) != value.(int64), nil } else if reflect.TypeOf(expected).Kind() == reflect.Float64 && reflect.TypeOf(value).Kind() == reflect.Float64 { return expected.(float64) != value.(float64), nil } } else if comparator == "^=" { if reflect.TypeOf(expected).Kind() == reflect.String && reflect.TypeOf(value).Kind() == reflect.String { return strings.HasPrefix(value.(string), expected.(string)), nil } else { return false, nil } } else if comparator == "$=" { if reflect.TypeOf(expected).Kind() == reflect.String && reflect.TypeOf(value).Kind() == reflect.String { return strings.HasSuffix(value.(string), expected.(string)), nil } else { return false, nil } } else if comparator == "<" { // Number operator only... if reflect.TypeOf(expected).Kind() == reflect.Int64 && reflect.TypeOf(value).Kind() == reflect.Int64 { return value.(int64) < expected.(int64), nil } else if reflect.TypeOf(expected).Kind() == reflect.Float64 && reflect.TypeOf(value).Kind() == reflect.Float64 { return value.(float64) < expected.(float64), nil } } else if comparator == "<=" { if reflect.TypeOf(expected).Kind() == reflect.Int64 && reflect.TypeOf(value).Kind() == reflect.Int64 { return value.(int64) <= expected.(int64), nil } else if reflect.TypeOf(expected).Kind() == reflect.Float64 && reflect.TypeOf(value).Kind() == reflect.Float64 { return value.(float64) <= expected.(float64), nil } } else if comparator == ">" { if reflect.TypeOf(expected).Kind() == reflect.Int64 && reflect.TypeOf(value).Kind() == reflect.Int64 { return value.(int64) > expected.(int64), nil } else if reflect.TypeOf(expected).Kind() == reflect.Float64 && reflect.TypeOf(value).Kind() == reflect.Float64 { return value.(float64) > expected.(float64), nil } } else if comparator == ">=" { if reflect.TypeOf(expected).Kind() == reflect.Int64 && reflect.TypeOf(value).Kind() == reflect.Int64 { return value.(int64) >= expected.(int64), nil } else if reflect.TypeOf(expected).Kind() == reflect.Float64 && reflect.TypeOf(value).Kind() == reflect.Float64 { return value.(float64) >= expected.(float64), nil } } return isMatch, nil } /** * That function test if a given value match all expressions of a given ast... */ func (this *GraphStore) match(ast *ast.QueryAst, values map[string]interface{}) (bool, error) { // test if the value is composite. if ast.IsComposite() { ast1, _, ast2 := ast.GetSubQueries() // both side of the tree must match. isMatch, err := this.match(ast1, values) if err != nil { return false, err } if isMatch == false { return false, nil } isMatch, err = this.match(ast2, values) if err != nil { return false, err } if isMatch == false { return false, nil } } else { // I will evaluate the expression... typeName, fieldName, comparator, expected := ast.GetExpression() return this.evaluate(typeName, fieldName, comparator, expected, values[fieldName]) } return true, nil } func (this *GraphStore) getIndexationValues(uuid string, fields []string) (map[string]interface{}, error) { object := make(map[string]interface{}, 0) // So here I will retreive values for given object fields... typeName := strings.Split(uuid, "%")[0] prefix := generatePrefix(typeName, "UUID") + formalize(uuid) query := xapian.NewQuery(prefix) results, err := this.runXapianQuery(typeName, query, fields) object["TYPENAME"] = typeName if len(fields) > 0 { for i := 0; i < len(fields); i++ { object[fields[i]] = results[0][i] } } else { object = results[0][0].(map[string]interface{}) } return object, err } /** * Run a query and return it result. */ func (this *GraphStore) runXapianQuery(typeName string, query xapian.Query, fields []string) ([][]interface{}, error) { var err error var results [][]interface{} var prototype *EntityPrototype prototype, err = this.GetEntityPrototype(typeName) if err != nil { return results, err } path := this.m_path + "/" + typeName + ".glass" db := xapian.NewDatabase(path) enquire := xapian.NewEnquire(db) enquire.Set_query(query) mset := enquire.Get_mset(uint(0), uint(10000)) // Now I will process the results. for i := 0; i < mset.Size(); i++ { it := mset.Get_hit(uint(i)) doc := it.Get_document() if len(fields) > 0 { values := make([]interface{}, 0) for j := 0; j < len(fields); j++ { // Get the field index. fieldIndex := prototype.getFieldIndex(fields[j]) fieldType := prototype.FieldsType[fieldIndex] value := doc.Get_value(uint(fieldIndex)) if XML_Schemas.IsXsNumeric(fieldType) { values = append(values, xapian.Sortable_unserialise(value)) } else if XML_Schemas.IsXsString(fieldType) { values = append(values, value) } else if XML_Schemas.IsXsId(fieldType) { values = append(values, value) } else if XML_Schemas.IsXsDate(fieldType) { values = append(values, xapian.Sortable_unserialise(value)) } else { var v map[string]interface{} err := json.Unmarshal([]byte(doc.Get_data()), &v) if err == nil { values = append(values, v[fields[j]]) } else { values = append(values, nil) } } } results = append(results, values) } else { // In that case the data contain in the document are return. var v map[string]interface{} err := json.Unmarshal([]byte(doc.Get_data()), &v) if err == nil { results = append(results, []interface{}{v}) } } xapian.DeleteDocument(doc) xapian.DeleteMSetIterator(it) } if len(results) == 0 { err = errors.New("No results found!") } db.Close() xapian.DeleteMSet(mset) xapian.DeleteEnquire(enquire) xapian.DeleteQuery(query) xapian.DeleteDatabase(db) return results, err } // Return the UUID for a given func (this *GraphStore) getIndexation(typeName string, fieldName string, expected interface{}) ([]interface{}, error) { // Indexations contain array of string var ids []interface{} var prototype, err = this.GetEntityPrototype(typeName) if err != nil { return ids, err } // I will retreive the value... if len(fieldName) == 0 { // Indexation by typeName... typeNameIndex := generatePrefix(typeName, "TYPENAME") + formalize(typeName) query := xapian.NewQuery(typeNameIndex) results, err := this.runXapianQuery(typeName, query, []string{"UUID"}) if err != nil { return ids, err } for i := 0; i < len(results); i++ { ids = append(ids, results[i][0].(string)) } } else { fieldIndex := prototype.getFieldIndex(fieldName) if fieldIndex != -1 { fieldType := prototype.FieldsType[fieldIndex] fieldType_ := strings.Replace(fieldType, "[]", "", -1) fieldType_ = strings.Replace(fieldType_, ":Ref", "", -1) var query xapian.Query if fieldName == "UUID" || fieldName == "TYPENAME" || fieldName == "ParentUuid" || fieldName == "ParentLnk" { prefix := generatePrefix(typeName, fieldName) + formalize(Utility.ToString(expected)) query = xapian.NewQuery(prefix) } else if strings.HasPrefix(fieldName, "M_") { if XML_Schemas.IsXsString(fieldType) || XML_Schemas.IsXsId(fieldType) { p := xapian.NewQueryParser() // I ill add the prefix to the query parser. p.Add_prefix(fieldName[2:], strings.ToUpper(fieldName)) query = p.Parse_query(fieldName[2:] + ":" + expected.(string)) xapian.DeleteQueryParser(p) } } results, err := this.runXapianQuery(typeName, query, []string{"UUID"}) if err != nil { return ids, err } // Now I will get the results... for i := 0; i < len(results); i++ { ids = append(ids, results[i][0].(string)) } } } return ids, nil } /** * Here i will walk the tree and generate the query. */ func (this *GraphStore) runQuery(ast *ast.QueryAst, fields []string) (map[string]map[string]interface{}, error) { // I will create the array if it dosent exist. results := make(map[string]map[string]interface{}, 0) if ast.IsComposite() { // Get the sub-queries ast1, operator, ast2 := ast.GetSubQueries() r1, err := this.runQuery(ast1, fields) if err != nil { return nil, err } r2, err := this.runQuery(ast2, fields) if err != nil { return nil, err } if operator == "&&" { // conjonction for k, v := range r2 { isMatch, err := this.match(ast1, v) if err != nil { return nil, err } if isMatch { results[k] = v } } for k, v := range r1 { isMatch, err := this.match(ast2, v) if err != nil { return nil, err } if isMatch { results[k] = v } } } else if operator == "||" { // disjonction results = this.merge(r1, r2) } } else { typeName, fieldName, comparator, expected := ast.GetExpression() values := make(map[string]map[string]interface{}, 0) // Need the prototype here. prototype, err := this.GetEntityPrototype(typeName) if err != nil { return nil, err } if fieldName == "TYPENAME" { indexations, err := this.getIndexation(typeName, "", "") if err != nil { return nil, err } for i := 0; i < len(indexations); i++ { values_, err := this.getIndexationValues(indexations[i].(string), fields) if err == nil { results[indexations[i].(string)] = values_ } } } else { fieldType := prototype.FieldsType[prototype.getFieldIndex(fieldName)] isArray := strings.HasPrefix(fieldType, "[]") isRef := strings.HasSuffix(fieldType, ":Ref") fieldType = strings.Replace(fieldType, "[]", "", -1) isString := fieldType == "xs.string" || fieldType == "xs.token" || fieldType == "xs.anyURI" || fieldType == "xs.anyURI" || fieldType == "xs.IDREF" || fieldType == "xs.QName" || fieldType == "xs.NOTATION" || fieldType == "xs.normalizedString" || fieldType == "xs.Name" || fieldType == "xs.NCName" || fieldType == "xs.ID" || fieldType == "xs.language" // Integers types. isInt := fieldType == "xs.int" || fieldType == "xs.integer" || fieldType == "xs.long" || fieldType == "xs.unsignedInt" || fieldType == "xs.short" || fieldType == "xs.unsignedLong" // decimal value isDecimal := fieldType == "xs.float" || fieldType == "xs.decimal" || fieldType == "xs.double" // Date time isDate := fieldType == "xs.date" || fieldType == "xs.dateTime" fields = prototype.Fields // all field must be search... // Strings or references... if isString || isRef { // The string expected value... if expected != nil { expectedStr := expected.(string) isRegex := strings.HasPrefix(expectedStr, "/") && strings.HasSuffix(expectedStr, "/") if comparator == "==" && !isRegex { // Now i will get the value from the indexation. if len(expectedStr) > 0 { indexations, err := this.getIndexation(typeName, fieldName, expectedStr) if err == nil { for i := 0; i < len(indexations); i++ { values[indexations[i].(string)], err = this.getIndexationValues(indexations[i].(string), []string{}) if err != nil { return nil, err } var isMatch bool if isArray { // Here I have an array of values to test. var strValues []string err = json.Unmarshal([]byte(values[indexations[i].(string)][fieldName].(string)), &strValues) if err != nil { return nil, err } for j := 0; j < len(strValues); j++ { isMatch, err = this.evaluate(typeName, fieldName, comparator, expected, strValues[j]) } } else { isMatch, err = this.evaluate(typeName, fieldName, comparator, expected, values[indexations[i].(string)][fieldName]) } if err != nil { return nil, err } if isMatch { // if the result match I put it inside the map result. results[indexations[i].(string)] = values[indexations[i].(string)] } } } } } else if comparator == "~=" || comparator == "!=" || comparator == "^=" || comparator == "$=" || (isRegex && comparator == "==") { // Here I will use the typename as indexation key... indexations, err := this.getIndexation(typeName, "", "") if err == nil { for i := 0; i < len(indexations); i++ { values[indexations[i].(string)], err = this.getIndexationValues(indexations[i].(string), fields) if err != nil { return nil, err } isMatch, err := this.evaluate(typeName, fieldName, comparator, expected, values[indexations[i].(string)][fieldName]) if err != nil { return nil, err } if isMatch { // if the result match I put it inside the map result. results[indexations[i].(string)] = values[indexations[i].(string)] } } } else { LogInfo("---> 2230 ", err) } } else { if !isRegex { return nil, errors.New("Unexpexted comparator " + comparator + " for type \"string\".") } else { return nil, errors.New("Unexpexted comparator " + comparator + " for regex, use \"==\" insted") } } } else if isRef { // In that case the only the operato == and != are define. if comparator == "==" || comparator == "!=" { typeNameIndex := generatePrefix(typeName, "TYPENAME") + formalize(typeName) query := xapian.NewQuery(typeNameIndex) uuids, err := this.runXapianQuery(typeName, query, []string{"UUID"}) if err != nil { return nil, err } if comparator == "==" { for i := 0; i < len(uuids); i++ { values, err := this.getIndexationValues(uuids[i][0].(string), fields) if err == nil { results[uuids[i][0].(string)] = values } } } else if comparator == "!=" { for i := 0; i < len(uuids); i++ { values, err := this.getIndexationValues(uuids[i][0].(string), fields) if err == nil { results[uuids[i][0].(string)] = values } } } } else { return nil, errors.New("Unexpexted comparator " + comparator + " for regex, use \"==\" insted") } } } else if fieldType == "xs.boolean" { if !(comparator == "==" || comparator == "!=") { return nil, errors.New("Unexpexted comparator " + comparator + " for bool values, use \"==\" or \"!=\"") } // Get the boolean value. indexations, err := this.getIndexation(typeName, fieldName, strconv.FormatBool(expected.(bool))) if err == nil { for i := 0; i < len(indexations); i++ { values[indexations[i].(string)], err = this.getIndexationValues(indexations[i].(string), fields) if err != nil { return nil, err } isMatch, err := this.evaluate(typeName, fieldName, comparator, expected, values[indexations[i].(string)][fieldName]) if err != nil { return nil, err } if isMatch { // if the result match I put it inside the map result. results[indexations[i].(string)] = values[indexations[i].(string)] } } } } else if isInt || isDecimal || isDate { // Numeric values or date that are covert at evaluation time as integer. if comparator == "~=" { return nil, errors.New("Unexpexted comparator " + comparator + " for type numeric value.") } // Get the boolean value. if comparator == "==" { indexations, err := this.getIndexation(typeName, fieldName, expected) if err == nil { for i := 0; i < len(indexations); i++ { values[indexations[i].(string)], err = this.getIndexationValues(indexations[i].(string), fields) if err != nil { return nil, err } isMatch, err := this.evaluate(typeName, fieldName, comparator, expected, values[indexations[i].(string)][fieldName]) if err != nil { return nil, err } if isMatch { // if the result match I put it inside the map result. results[indexations[i].(string)] = values[indexations[i].(string)] } } } } else { // for the other comparator I will get all the entities of the given type and test each of those. indexations, err := this.getIndexation(typeName, "", "") if err == nil { for i := 0; i < len(indexations); i++ { values[indexations[i].(string)], err = this.getIndexationValues(indexations[i].(string), fields) if err != nil { return nil, err } isMatch, err := this.evaluate(typeName, fieldName, comparator, expected, values[indexations[i].(string)][fieldName]) if err != nil { return nil, err } if isMatch { // if the result match I put it inside the map result. results[indexations[i].(string)] = values[indexations[i].(string)] } } } } } } } return results, nil } /** * Execute a search query. */ func (this *GraphStore) executeSearchQuery(query string, fields []string) ([][]interface{}, error) { s := lexer.NewLexer([]byte(query)) p := parser.NewParser() a, err := p.Parse(s) if err == nil { astree := a.(*ast.QueryAst) fieldLength := len(fields) r, err := this.runQuery(astree, fields) if err != nil { return nil, err } // Here I will keep the result part... results := make([][]interface{}, 0) for _, object := range r { if fieldLength == 0 { // In that case the whole object will be set in the result. results = append(results, []interface{}{object}) } else { results_ := make([]interface{}, 0) for i := 0; i < fieldLength; i++ { results_ = append(results_, object[fields[i]]) } results = append(results, results_) } } return results, err } else { LogInfo("-------> query: ", query, fields) LogInfo("--> search error ", err) } return nil, err }
apache-2.0
Blazebit/blaze-utils
blaze-common-utils/src/main/java/com/blazebit/text/URLFormat.java
563
/* * Copyright 2011 Blazebit */ package com.blazebit.text; import java.net.MalformedURLException; import java.net.URL; /** * @author Christian Beikov * @since 0.1.9 */ public class URLFormat extends AbstractFormat<URL> { private static final long serialVersionUID = 1L; public URLFormat() { super(URL.class); } public URL parse(String value, ParserContext context) { try { return new URL(value); } catch (MalformedURLException e) { throw new IllegalArgumentException(e); } } }
apache-2.0
DariusX/camel
components/camel-saxon/src/test/java/org/apache/camel/component/xslt/SaxonInvalidXsltFileTest.java
2029
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.xslt; import javax.xml.transform.TransformerException; import org.apache.camel.CamelContext; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.impl.DefaultCamelContext; import org.apache.camel.test.junit4.TestSupport; import org.junit.Test; public class SaxonInvalidXsltFileTest extends TestSupport { @Test public void testInvalidStylesheet() throws Exception { try { RouteBuilder builder = createRouteBuilder(); CamelContext context = new DefaultCamelContext(); context.addRoutes(builder); context.start(); fail("Should have thrown an exception due XSL compilation error"); } catch (Exception e) { // expected assertIsInstanceOf(TransformerException.class, e.getCause().getCause().getCause()); } } protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { public void configure() throws Exception { from("seda:a").to("xslt-saxon:org/apache/camel/component/xslt/notfound.xsl?transformerFactoryClass=net.sf.saxon.TransformerFactoryImpl"); } }; } }
apache-2.0
mmeany/mvm-link-checker
src/main/java/com/mvmlabs/springboot/web/AdminController.java
1376
package com.mvmlabs.springboot.web; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.security.core.userdetails.UserDetails; import org.springframework.security.web.bind.annotation.AuthenticationPrincipal; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; /** * Only here to test access restrictions for Spring Security. * * @author Mark Meany * */ @Controller @RequestMapping(value = "/admin") public class AdminController { /** Logger implementation. */ private final Logger logger = LoggerFactory.getLogger(this.getClass()); @RequestMapping(value = "/greet/{name}", method = RequestMethod.GET) public String greet(@AuthenticationPrincipal final UserDetails user, @PathVariable(value = "name") final String name, final Model model) { if (user == null) { throw new RuntimeException("Authentication error"); } model.addAttribute("username", user.getUsername()); model.addAttribute("name", name); logger.info("The authenticated user '" + user.getUsername() + "' is masquarading as '" + name + "'."); return "site.admin.greet"; } }
apache-2.0
WIgor/hadoop
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
9756
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import static org.mockito.Mockito.spy; import java.io.File; import java.io.IOException; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.security.AccessControlException; import org.mockito.Mockito; import org.mockito.internal.util.reflection.Whitebox; /** * This is a utility class to expose NameNode functionality for unit tests. */ public class NameNodeAdapter { /** * Get the namesystem from the namenode */ public static FSNamesystem getNamesystem(NameNode namenode) { return namenode.getNamesystem(); } /** * Get block locations within the specified range. */ public static LocatedBlocks getBlockLocations(NameNode namenode, String src, long offset, long length) throws IOException { return namenode.getNamesystem().getBlockLocations("foo", src, offset, length); } public static HdfsFileStatus getFileInfo(NameNode namenode, String src, boolean resolveLink) throws AccessControlException, UnresolvedLinkException, StandbyException, IOException { namenode.getNamesystem().readLock(); try { return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem() .getFSDirectory(), src, resolveLink); } finally { namenode.getNamesystem().readUnlock(); } } public static boolean mkdirs(NameNode namenode, String src, PermissionStatus permissions, boolean createParent) throws UnresolvedLinkException, IOException { return namenode.getNamesystem().mkdirs(src, permissions, createParent); } public static void saveNamespace(NameNode namenode) throws AccessControlException, IOException { namenode.getNamesystem().saveNamespace(0, 0); } public static void enterSafeMode(NameNode namenode, boolean resourcesLow) throws IOException { namenode.getNamesystem().enterSafeMode(resourcesLow); } public static void leaveSafeMode(NameNode namenode) { namenode.getNamesystem().leaveSafeMode(false); } public static void abortEditLogs(NameNode nn) { FSEditLog el = nn.getFSImage().getEditLog(); el.abortCurrentLogSegment(); } /** * Get the internal RPC server instance. * @return rpc server */ public static Server getRpcServer(NameNode namenode) { return ((NameNodeRpcServer)namenode.getRpcServer()).clientRpcServer; } public static DelegationTokenSecretManager getDtSecretManager( final FSNamesystem ns) { return ns.getDelegationTokenSecretManager(); } public static HeartbeatResponse sendHeartBeat(DatanodeRegistration nodeReg, DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException { return namesystem.handleHeartbeat(nodeReg, BlockManagerTestUtil.getStorageReportsForDatanode(dd), dd.getCacheCapacity(), dd.getCacheRemaining(), 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT); } public static boolean setReplication(final FSNamesystem ns, final String src, final short replication) throws IOException { return ns.setReplication(src, replication); } public static LeaseManager getLeaseManager(final FSNamesystem ns) { return ns.leaseManager; } /** Set the softLimit and hardLimit of client lease periods. */ public static void setLeasePeriod(final FSNamesystem namesystem, long soft, long hard) { getLeaseManager(namesystem).setLeasePeriod(soft, hard); namesystem.leaseManager.triggerMonitorCheckNow(); } public static Lease getLeaseForPath(NameNode nn, String path) { final FSNamesystem fsn = nn.getNamesystem(); INode inode; try { inode = fsn.getFSDirectory().getINode(path, DirOp.READ); } catch (UnresolvedLinkException e) { throw new RuntimeException("Lease manager should not support symlinks"); } catch (IOException ioe) { return null; // unresolvable path, ex. parent dir is a file } return inode == null ? null : fsn.leaseManager.getLease((INodeFile) inode); } public static String getLeaseHolderForPath(NameNode namenode, String path) { Lease l = getLeaseForPath(namenode, path); return l == null? null: l.getHolder(); } /** * @return the timestamp of the last renewal of the given lease, * or -1 in the case that the lease doesn't exist. */ public static long getLeaseRenewalTime(NameNode nn, String path) { Lease l = getLeaseForPath(nn, path); return l == null ? -1 : l.getLastUpdate(); } /** * Return the datanode descriptor for the given datanode. */ public static DatanodeDescriptor getDatanode(final FSNamesystem ns, DatanodeID id) throws IOException { ns.readLock(); try { return ns.getBlockManager().getDatanodeManager().getDatanode(id); } finally { ns.readUnlock(); } } /** * Return the FSNamesystem stats */ public static long[] getStats(final FSNamesystem fsn) { return fsn.getStats(); } public static ReentrantReadWriteLock spyOnFsLock(FSNamesystem fsn) { ReentrantReadWriteLock spy = Mockito.spy(fsn.getFsLockForTests()); fsn.setFsLockForTests(spy); return spy; } public static FSImage spyOnFsImage(NameNode nn1) { FSNamesystem fsn = nn1.getNamesystem(); FSImage spy = Mockito.spy(fsn.getFSImage()); Whitebox.setInternalState(fsn, "fsImage", spy); return spy; } public static FSEditLog spyOnEditLog(NameNode nn) { FSEditLog spyEditLog = spy(nn.getNamesystem().getFSImage().getEditLog()); DFSTestUtil.setEditLogForTesting(nn.getNamesystem(), spyEditLog); EditLogTailer tailer = nn.getNamesystem().getEditLogTailer(); if (tailer != null) { tailer.setEditLog(spyEditLog); } return spyEditLog; } public static JournalSet spyOnJournalSet(NameNode nn) { FSEditLog editLog = nn.getFSImage().getEditLog(); JournalSet js = Mockito.spy(editLog.getJournalSet()); editLog.setJournalSetForTesting(js); return js; } public static String getMkdirOpPath(FSEditLogOp op) { if (op.opCode == FSEditLogOpCodes.OP_MKDIR) { return ((MkdirOp) op).path; } else { return null; } } public static FSEditLogOp createMkdirOp(String path) { MkdirOp op = MkdirOp.getInstance(new FSEditLogOp.OpInstanceCache()) .setPath(path) .setTimestamp(0) .setPermissionStatus(new PermissionStatus( "testuser", "testgroup", FsPermission.getDefault())); return op; } /** * @return the number of blocks marked safe by safemode, or -1 * if safemode is not running. */ public static long getSafeModeSafeBlocks(NameNode nn) { if (!nn.getNamesystem().isInSafeMode()) { return -1; } Object bmSafeMode = Whitebox.getInternalState( nn.getNamesystem().getBlockManager(), "bmSafeMode"); return (long)Whitebox.getInternalState(bmSafeMode, "blockSafe"); } /** * @return Replication queue initialization status */ public static boolean safeModeInitializedReplQueues(NameNode nn) { return nn.getNamesystem().getBlockManager().isPopulatingReplQueues(); } public static File getInProgressEditsFile(StorageDirectory sd, long startTxId) { return NNStorage.getInProgressEditsFile(sd, startTxId); } public static NamenodeCommand startCheckpoint(NameNode nn, NamenodeRegistration backupNode, NamenodeRegistration activeNamenode) throws IOException { return nn.getNamesystem().startCheckpoint(backupNode, activeNamenode); } }
apache-2.0
leafclick/intellij-community
jps/model-serialization/src/com/intellij/openapi/components/PathMacroMap.java
3156
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.openapi.components; import com.intellij.openapi.application.PathMacroFilter; import com.intellij.openapi.diagnostic.Logger; import org.jdom.*; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * @author Eugene Zhuravlev */ public abstract class PathMacroMap { private static final Logger LOG = Logger.getInstance(PathMacroMap.class); @NotNull public abstract String substitute(@NotNull String text, boolean caseSensitive); @NotNull public final String substitute(@NotNull String text, boolean caseSensitive, boolean recursively) { return recursively ? substituteRecursively(text, caseSensitive) : substitute(text, caseSensitive); } public final void substitute(@NotNull Element e, boolean caseSensitive) { substitute(e, caseSensitive, false); } public final void substitute(@NotNull Element element, boolean caseSensitive, boolean recursively, @Nullable PathMacroFilter filter) { if (filter != null && filter.skipPathMacros(element)) { return; } for (Content child : element.getContent()) { if (child instanceof Element) { substitute((Element)child, caseSensitive, recursively, filter); } else if (child instanceof Text) { Text t = (Text)child; String oldText = t.getText(); String newText = recursively ? substituteRecursively(oldText, caseSensitive) : substitute(oldText, caseSensitive); if (oldText != newText) { // it is faster to call 'setText' right away than perform additional 'equals' check t.setText(newText); } } else if (!(child instanceof Comment)) { LOG.error("Wrong content: " + child.getClass()); } } if (!element.hasAttributes()) { return; } for (Attribute attribute : element.getAttributes()) { if (filter == null || !filter.skipPathMacros(attribute)) { String newValue = getAttributeValue(attribute, filter, caseSensitive, recursively); if (attribute.getValue() != newValue) { // it is faster to call 'setValue' right away than perform additional 'equals' check attribute.setValue(newValue); } } } } @NotNull public String getAttributeValue(@NotNull Attribute attribute, @Nullable PathMacroFilter filter, boolean caseSensitive, boolean recursively) { String oldValue = attribute.getValue(); if (recursively || (filter != null && filter.recursePathMacros(attribute))) { return substituteRecursively(oldValue, caseSensitive); } else { return substitute(oldValue, caseSensitive); } } public final void substitute(@NotNull Element e, boolean caseSensitive, final boolean recursively) { substitute(e, caseSensitive, recursively, null); } @NotNull public String substituteRecursively(@NotNull String text, boolean caseSensitive) { return substitute(text, caseSensitive); } public abstract int hashCode(); }
apache-2.0
praveev/druid
benchmarks/src/main/java/io/druid/benchmark/TopNTypeInterfaceBenchmark.java
22188
/* * Licensed to Metamarkets Group Inc. (Metamarkets) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Metamarkets licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.druid.benchmark; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.io.Files; import io.druid.benchmark.datagen.BenchmarkDataGenerator; import io.druid.benchmark.datagen.BenchmarkSchemaInfo; import io.druid.benchmark.datagen.BenchmarkSchemas; import io.druid.benchmark.query.QueryBenchmarkUtil; import io.druid.collections.StupidPool; import io.druid.concurrent.Execs; import io.druid.data.input.InputRow; import io.druid.hll.HyperLogLogHash; import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.granularity.Granularities; import io.druid.java.util.common.guava.Sequence; import io.druid.java.util.common.guava.Sequences; import io.druid.java.util.common.logger.Logger; import io.druid.offheap.OffheapBufferGenerator; import io.druid.query.FinalizeResultsQueryRunner; import io.druid.query.Query; import io.druid.query.QueryPlus; import io.druid.query.QueryRunner; import io.druid.query.QueryRunnerFactory; import io.druid.query.QueryToolChest; import io.druid.query.Result; import io.druid.query.aggregation.AggregatorFactory; import io.druid.query.aggregation.DoubleMinAggregatorFactory; import io.druid.query.aggregation.DoubleSumAggregatorFactory; import io.druid.query.aggregation.LongMaxAggregatorFactory; import io.druid.query.aggregation.LongSumAggregatorFactory; import io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory; import io.druid.query.aggregation.hyperloglog.HyperUniquesSerde; import io.druid.query.dimension.ExtractionDimensionSpec; import io.druid.query.extraction.IdentityExtractionFn; import io.druid.query.ordering.StringComparators; import io.druid.query.spec.MultipleIntervalSegmentSpec; import io.druid.query.spec.QuerySegmentSpec; import io.druid.query.topn.DimensionTopNMetricSpec; import io.druid.query.topn.TopNQuery; import io.druid.query.topn.TopNQueryBuilder; import io.druid.query.topn.TopNQueryConfig; import io.druid.query.topn.TopNQueryQueryToolChest; import io.druid.query.topn.TopNQueryRunnerFactory; import io.druid.query.topn.TopNResultValue; import io.druid.segment.IndexIO; import io.druid.segment.IndexMergerV9; import io.druid.segment.IndexSpec; import io.druid.segment.QueryableIndex; import io.druid.segment.QueryableIndexSegment; import io.druid.segment.column.ColumnConfig; import io.druid.segment.incremental.IncrementalIndex; import io.druid.segment.serde.ComplexMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Measurement; import org.openjdk.jmh.annotations.Mode; import org.openjdk.jmh.annotations.OutputTimeUnit; import org.openjdk.jmh.annotations.Param; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.infra.Blackhole; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; // Benchmark for determining the interface overhead of TopN with multiple type implementations @State(Scope.Benchmark) @Fork(value = 1) @Warmup(iterations = 10) @Measurement(iterations = 25) public class TopNTypeInterfaceBenchmark { @Param({"1"}) private int numSegments; @Param({"750000"}) private int rowsPerSegment; @Param({"basic.A"}) private String schemaAndQuery; @Param({"10"}) private int threshold; private static final Logger log = new Logger(TopNTypeInterfaceBenchmark.class); private static final int RNG_SEED = 9999; private static final IndexMergerV9 INDEX_MERGER_V9; private static final IndexIO INDEX_IO; public static final ObjectMapper JSON_MAPPER; private List<IncrementalIndex> incIndexes; private List<QueryableIndex> qIndexes; private QueryRunnerFactory factory; private BenchmarkSchemaInfo schemaInfo; private TopNQueryBuilder queryBuilder; private TopNQuery stringQuery; private TopNQuery longQuery; private TopNQuery floatQuery; private ExecutorService executorService; static { JSON_MAPPER = new DefaultObjectMapper(); INDEX_IO = new IndexIO( JSON_MAPPER, new ColumnConfig() { @Override public int columnCacheSizeBytes() { return 0; } } ); INDEX_MERGER_V9 = new IndexMergerV9(JSON_MAPPER, INDEX_IO); } private static final Map<String, Map<String, TopNQueryBuilder>> SCHEMA_QUERY_MAP = new LinkedHashMap<>(); private void setupQueries() { // queries for the basic schema Map<String, TopNQueryBuilder> basicQueries = new LinkedHashMap<>(); BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic"); { // basic.A QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval())); List<AggregatorFactory> queryAggs = new ArrayList<>(); queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential")); queryAggs.add(new LongMaxAggregatorFactory("maxLongUniform", "maxLongUniform")); queryAggs.add(new DoubleSumAggregatorFactory("sumFloatNormal", "sumFloatNormal")); queryAggs.add(new DoubleMinAggregatorFactory("minFloatZipf", "minFloatZipf")); queryAggs.add(new HyperUniquesAggregatorFactory("hyperUniquesMet", "hyper")); // Use an IdentityExtractionFn to force usage of DimExtractionTopNAlgorithm TopNQueryBuilder queryBuilderString = new TopNQueryBuilder() .dataSource("blah") .granularity(Granularities.ALL) .dimension(new ExtractionDimensionSpec("dimSequential", "dimSequential", IdentityExtractionFn.getInstance())) .metric("sumFloatNormal") .intervals(intervalSpec) .aggregators(queryAggs); // DimExtractionTopNAlgorithm is always used for numeric columns TopNQueryBuilder queryBuilderLong = new TopNQueryBuilder() .dataSource("blah") .granularity(Granularities.ALL) .dimension("metLongUniform") .metric("sumFloatNormal") .intervals(intervalSpec) .aggregators(queryAggs); TopNQueryBuilder queryBuilderFloat = new TopNQueryBuilder() .dataSource("blah") .granularity(Granularities.ALL) .dimension("metFloatNormal") .metric("sumFloatNormal") .intervals(intervalSpec) .aggregators(queryAggs); basicQueries.put("string", queryBuilderString); basicQueries.put("long", queryBuilderLong); basicQueries.put("float", queryBuilderFloat); } { // basic.numericSort QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval())); List<AggregatorFactory> queryAggs = new ArrayList<>(); queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential")); TopNQueryBuilder queryBuilderA = new TopNQueryBuilder() .dataSource("blah") .granularity(Granularities.ALL) .dimension("dimUniform") .metric(new DimensionTopNMetricSpec(null, StringComparators.NUMERIC)) .intervals(intervalSpec) .aggregators(queryAggs); basicQueries.put("numericSort", queryBuilderA); } { // basic.alphanumericSort QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval())); List<AggregatorFactory> queryAggs = new ArrayList<>(); queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential")); TopNQueryBuilder queryBuilderA = new TopNQueryBuilder() .dataSource("blah") .granularity(Granularities.ALL) .dimension("dimUniform") .metric(new DimensionTopNMetricSpec(null, StringComparators.ALPHANUMERIC)) .intervals(intervalSpec) .aggregators(queryAggs); basicQueries.put("alphanumericSort", queryBuilderA); } SCHEMA_QUERY_MAP.put("basic", basicQueries); } @Setup public void setup() throws IOException { log.info("SETUP CALLED AT " + System.currentTimeMillis()); if (ComplexMetrics.getSerdeForType("hyperUnique") == null) { ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault())); } executorService = Execs.multiThreaded(numSegments, "TopNThreadPool"); setupQueries(); schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get("basic"); queryBuilder = SCHEMA_QUERY_MAP.get("basic").get("string"); queryBuilder.threshold(threshold); stringQuery = queryBuilder.build(); TopNQueryBuilder longBuilder = SCHEMA_QUERY_MAP.get("basic").get("long"); longBuilder.threshold(threshold); longQuery = longBuilder.build(); TopNQueryBuilder floatBuilder = SCHEMA_QUERY_MAP.get("basic").get("float"); floatBuilder.threshold(threshold); floatQuery = floatBuilder.build(); incIndexes = new ArrayList<>(); for (int i = 0; i < numSegments; i++) { log.info("Generating rows for segment " + i); BenchmarkDataGenerator gen = new BenchmarkDataGenerator( schemaInfo.getColumnSchemas(), RNG_SEED + i, schemaInfo.getDataInterval(), rowsPerSegment ); IncrementalIndex incIndex = makeIncIndex(); for (int j = 0; j < rowsPerSegment; j++) { InputRow row = gen.nextRow(); if (j % 10000 == 0) { log.info(j + " rows generated."); } incIndex.add(row); } incIndexes.add(incIndex); } File tmpFile = Files.createTempDir(); log.info("Using temp dir: " + tmpFile.getAbsolutePath()); tmpFile.deleteOnExit(); qIndexes = new ArrayList<>(); for (int i = 0; i < numSegments; i++) { File indexFile = INDEX_MERGER_V9.persist( incIndexes.get(i), tmpFile, new IndexSpec() ); QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile); qIndexes.add(qIndex); } factory = new TopNQueryRunnerFactory( new StupidPool<>( "TopNBenchmark-compute-bufferPool", new OffheapBufferGenerator("compute", 250000000), 0, Integer.MAX_VALUE ), new TopNQueryQueryToolChest(new TopNQueryConfig(), QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()), QueryBenchmarkUtil.NOOP_QUERYWATCHER ); } private IncrementalIndex makeIncIndex() { return new IncrementalIndex.Builder() .setSimpleTestingIndexSchema(schemaInfo.getAggsArray()) .setReportParseExceptions(false) .setMaxRowCount(rowsPerSegment) .buildOnheap(); } private static <T> List<T> runQuery(QueryRunnerFactory factory, QueryRunner runner, Query<T> query) { QueryToolChest toolChest = factory.getToolchest(); QueryRunner<T> theRunner = new FinalizeResultsQueryRunner<>( toolChest.mergeResults(toolChest.preMergeQueryDecoration(runner)), toolChest ); Sequence<T> queryResult = theRunner.run(QueryPlus.wrap(query), Maps.newHashMap()); return Sequences.toList(queryResult, Lists.<T>newArrayList()); } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexStringOnly(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, stringQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexStringTwice(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, stringQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, stringQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexStringThenLong(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, stringQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, longQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexStringThenFloat(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, stringQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, floatQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexLongOnly(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, longQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexLongTwice(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, longQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, longQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexLongThenString(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, longQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, stringQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexLongThenFloat(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, longQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, floatQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexFloatOnly(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, floatQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexFloatTwice(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, floatQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, floatQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexFloatThenString(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, floatQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, stringQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndexFloatThenLong(Blackhole blackhole) throws Exception { QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); List<Result<TopNResultValue>> results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, floatQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } runner = QueryBenchmarkUtil.makeQueryRunner( factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)) ); results = TopNTypeInterfaceBenchmark.runQuery(factory, runner, longQuery); for (Result<TopNResultValue> result : results) { blackhole.consume(result); } } }
apache-2.0
googleapis/google-api-php-client-services
src/CertificateAuthorityService/EcKeyType.php
1198
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ namespace Google\Service\CertificateAuthorityService; class EcKeyType extends \Google\Model { /** * @var string */ public $signatureAlgorithm; /** * @param string */ public function setSignatureAlgorithm($signatureAlgorithm) { $this->signatureAlgorithm = $signatureAlgorithm; } /** * @return string */ public function getSignatureAlgorithm() { return $this->signatureAlgorithm; } } // Adding a class alias for backwards compatibility with the previous class name. class_alias(EcKeyType::class, 'Google_Service_CertificateAuthorityService_EcKeyType');
apache-2.0
xushaomin/apple-jms
apple-jms-springkafka/src/main/java/com/appleframework/jms/kafka/consumer/multithread/thread/ObjectMessageConsumer.java
369
package com.appleframework.jms.kafka.consumer.multithread.thread; import com.appleframework.jms.core.consumer.IMessageConusmer; /** * @author Cruise.Xu * */ public abstract class ObjectMessageConsumer extends BaseMessageConsumer<Object> implements IMessageConusmer<Object> { @Override public void processMessage(Object message) { onMessage(message); } }
apache-2.0
debop/debop4s
debop4s-core/src/main/scala/debop4s/core/spring/Profiles.scala
362
package debop4s.core.spring /** * debop4s.core.spring.Profiles * * @author 배성혁 sunghyouk.bae@gmail.com * @since 2013. 12. 12. 오전 10:54 */ class Profiles extends Enumeration { type Profiles = Value val LOCAL = Value(0, "LOCAL") val DEVELOP = Value(1, "DEVELOP") val TEST = Value(2, "TEST") val PRODUCTION = Value(3, "PRODUCTION") }
apache-2.0
ideaworld/BioDesigner
system/gene.py
15970
""" gene.py realize the methods that are related to system recommendation. @author: Bowen """ from system.models import gene, reaction, compound, reaction_compound, compound_gene, pathway, pathway_compound, organism from system.fasta_reader import parse_fasta_str from elasticsearch import Elasticsearch import traceback import urllib2 import json from django.db.models import Q def search_compound(keyword): """ search compound based on the keyword @param keyword: the keyword that the user typed. Which would be used in search @type keyword: str @return: return a list that contains searched compounds @rtype: list """ es = Elasticsearch() result = format_fuzzy_result(fuzzy_search_compound(es, keyword)) return result def fuzzy_search_compound(es, keyword): """ fuzzy search compound based on the keyword with elasticsearch @param es: the elasticsearch object @param keyword: the search keyword @type es: Elasticsearch @type keyword: str @return a dict generated by the elasticsearch, which contains the search result @rtype: dict """ query_body = { "from" : 0, "size" : 20, "query" : { "fuzzy_like_this" : { "fields" : ["name"], "like_text" : keyword, "max_query_terms" : 20 } } } result = es.search(index="biodesigners", doc_type="compounds", body=query_body) return result def format_fuzzy_result(es_result): """ format the es search result to front end processable format @param es_result: the es search result @type es_result: dict @return: the front end processable format, while will be like this:: [{'compound_id': id, 'name': name},...] @rtype: list """ compound_result = es_result['hits']['hits'] result = list() if len(compound_result) != 0: for compound_item in compound_result: info = compound_item['_source'] compound_info = { 'compound_id': info["compound_id"], 'name': info['name'], } result.append(compound_info) return result def get_gene_info(gid): """ get gene information from the database @param gid: the gene id @ytpe gid: str @return: gene information dict @rtype: dict """ base_gene_url = 'http://www.ncbi.nlm.nih.gov/gene/' try: gene_obj = gene.objects.get(gene_id=gid) result = { 'gene_id': gene_obj.gene_id, 'name': gene_obj.name, 'definition': gene_obj.definition, 'organism_short': gene_obj.organism_short, 'organism': gene_obj.organism, 'gene_url' : base_gene_url + gene_obj.gene_id } return True, result except: traceback.print_exc() return False, None def get_compound_info(cid): """ get a specific compound's information @param cid: compound id @type cid: str @return: a tunple that contains is compound can be retrived and the information @rtype: dict """ try: compound_obj = compound.objects.get(compound_id=cid) result = { 'compound_id' : compound_obj.compound_id, 'name': compound_obj.name, 'nicknames' : compound_obj.nicknames.replace('_', '\n'), 'formula' : compound_obj.formula, 'exact_mass' : compound_obj.exact_mass, 'mol_weight' : compound_obj.mol_mass } return True, result except: traceback.print_exc() return False, None class gene_graph: """ gene graph, including calculation and generate of gene & protein relation graph """ def __init__(self, cid_list, ogm): """ constructor for gene_graph class @param cid_list: compound id list @type cid_list: str @param ogm: organisms @type ogm:str """ if cid_list.startswith('_'): cid_list = cid_list[1:] if cid_list.endswith('_'): cid_list = cid_list[:-1] self.cid_list = cid_list.split('_') self.nodes = list() self.edges = list() self.index_dict = dict() self.index = 0 if ogm != None: if ogm.startswith('_'): ogm = ogm[1:] if ogm.endswith('_'): ogm = ogm[:-1] self.organisms = ogm.split('_') else: self.organisms = None def get_compound_object(self, cid): """ get compound object by compound id @param cid: compound id @type cid: str @return: compound object or none if not found @rtype: compound """ try: compound_obj = compound.objects.get(compound_id=cid) return compound_obj except: return None def retrive_gene_detain(self, gid): """ get gene data from ncib @param gid: gene id @type gid: str @return: gene information in dict or none @rtype: dict """ #get information from ncbi baseUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&retmode=json&version=2.0&id=' try: req = urllib2.Request(baseUrl + gid) response = urllib2.urlopen(req) resStr = response.read() result = json.loads(resStr) infos = result['result'][gid] detail_info = dict() detail_info['name'] = infos['name'] detail_info['definition'] = infos['description'] detail_info['organism'] = infos['organism']['scientificname'] return detail_info except: traceback.print_exc() return None def related_compound(self, cid): """ find a compound's related compound @param cid: compound id @type cid: str @return: list of related compound @rtype: list """ compound_obj = self.get_compound_object(cid) if self.organisms != None: organism_pathway_id_list = pathway.objects.filter(organism_id__in=self.organisms).values_list('pathway_id', flat=True) else: organism_pathway_id_list = pathway.objects.all() valued_pathway_id_list = pathway_compound.objects.filter(pathway_id__in=organism_pathway_id_list, compound=compound_obj) valued_compound_list = pathway_compound.objects.filter(Q(pathway_id__in=valued_pathway_id_list), ~Q(compound=compound_obj)).values_list('compound', flat=True) compound_list = compound.objects.filter(compound_id__in=valued_compound_list) return compound_list def create_node(self, name, id): """ create a node (gene or compound) in the graph @param name: name for the node @param id: id for the node @type name : str @type id : str """ node_info = { 'name': name, 'id': id } self.nodes.append(node_info) if id in self.index_dict.keys(): return True self.index_dict[id] = self.index self.index += 1 return True def create_n_link(self, center_node, compound_obj): """ create nodes and link them @param center_node: source node @type center_node:compound @param compound_obj: compound object @type compound_obj: compound """ gene_list = self.search_gene(compound_obj) for gene_id in gene_list: try: gene_obj = gene.objects.get(gene_id=gene_id) if self.create_node(gene_obj.name, gene_obj.gene_id): edge_info = { 'source' : self.index_dict[center_node], 'target' : self.index_dict[gene_obj.gene_id], 'relation' : compound_obj.name } self.edges.append(edge_info) except: traceback.print_exc() pass return gene_list[0] def get_or_create_gene(self, gid): """ find gene in database, if found, return gene, or search in ncbi @param gid: gene id @type gid: str @return gene object @rtype: gene """ #get in database try: gene_obj = gene.objects.get(gene_id=gid) return gene_obj except: #get from ncbi baseUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&rettype=fasta&id=' req = urllib2.Request(baseUrl + gid) response = urllib2.urlopen(req) resStr = response.read() gene_dict = parse_fasta_str(resStr) for gn in gene_dict.keys(): gid = gn.split('|')[1] #get detail information new_gene_obj = gene(gene_id=gid) detail_info = self.retrive_gene_detain(gid) if detail_info == None: continue new_gene_obj.name = detail_info['name'] new_gene_obj.definition = detail_info['definition'] new_gene_obj.organism = detail_info['organism'] new_gene_obj.ntseq = gene_dict[gn] new_gene_obj.ntseq_length = len(gene_dict[gn]) try: new_gene_obj.save() return new_gene_obj except: pass return None def save_relation_to_db(self, geneIdList, compound_obj): """ save relation between compound_obj and gene to database @param geneIdList: gene id in a list @type geneIdList: list @param compound_obj: compound object @type compound_obj: compound """ #create new obj for gid in geneIdList: new_rela_obj = compound_gene(compound=compound_obj) gene_obj = self.get_or_create_gene(gid) if gene_obj == None: continue new_rela_obj.gene = gene_obj try: new_rela_obj.save() except: pass def search_gene(self, compound_obj): """ find gene realted to a compound @param compound_obj: the compound object @type compound_obj: compound @return related genes @rtype: list """ #search in database obj_list = compound_gene.objects.filter(compound=compound_obj) if len(obj_list) != 0: geneIdList = list() for obj in obj_list: geneIdList.append(obj.gene.gene_id) return geneIdList[:2] else: baseGeneFindUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=gene&retmode=json&term=' try: req = urllib2.Request(baseGeneFindUrl + compound_obj.name) response = urllib2.urlopen(req) resStr = response.read() except: traceback.print_exc() return None if len(resStr) == 0: return None result = json.loads(resStr) geneIdList = result['esearchresult']['idlist'] self.save_relation_to_db(geneIdList, compound_obj) return geneIdList[:2] def cal_graph(self): """ calculate the relation graph """ for cid in self.cid_list: center_compound_obj = self.get_compound_object(cid) if center_compound_obj == None: continue self.create_node(center_compound_obj.name, center_compound_obj.compound_id) related_list = self.related_compound(center_compound_obj.compound_id)[:5] for compound_obj in related_list: new_center = self.create_n_link(center_compound_obj.compound_id, compound_obj) self.create_node(compound_obj.name, compound_obj.compound_id) edge_info = { 'source': self.index_dict[center_compound_obj.compound_id], 'target': self.index_dict[compound_obj.compound_id], 'relation': compound_obj.name, } deep_related_list = self.related_compound(compound_obj.compound_id)[:2] for deep_compound_obj in deep_related_list: self.create_n_link(compound_obj.compound_id, deep_compound_obj) def get_graph(self): """ get the graph @return: th graph @rtype: dict """ result = { 'nodes': self.nodes, 'edges' : self.edges } return result ''' def find_related_compound(cid_str): """ find the compound that are related to current compound in reaction @param cid: list of compound id @type cid: list @return: dict of compound that are related to the compound, empty list will be returned if there is no related compound @rtype: dict """ result = dict() nodes = list() edges = list() all_genes = list() index_dict = dict() index = 0 if cid_str.endswith('_'): cid_str = cid_str[:-1] cid_list = cid_str.split('_') for cid in cid_list: try: compound_obj = compound.objects.get(compound_id=cid) #get first gene and create new node cen_gene_id = None try: cen_gene_id = search_gene_in_ncbi(compound_obj.name,)[0] if not cen_gene_id in all_genes: all_genes.append(cen_gene_id) gene_obj = gene.objects.get(gene_id=cen_gene_id) node_info = { 'name': gene_obj.name, 'id': gene_obj.gene_id } nodes.append(node_info) index_dict[cen_gene_id] = index index += 1 except: pass # find related reactions rid_list = reaction_compound.objects.filter(compound=compound_obj, isReactant=True).values_list('reaction_id', flat=True) cname_list = list() for rid in rid_list: rs = reaction_compound.objects.filter(Q(reaction_id=rid), ~Q(compound=compound_obj))[:5] for r in rs: cname_list.append(r.compound.name) for cname in cname_list: # find genes gene_list = search_gene_in_ncbi(cname, expect=cen_gene_id, index=1) for gene_id in gene_list: if gene_id in all_genes: continue try: gene_obj = gene.objects.get(gene_id=gene_id) #create new node all_genes.append(gene_id) node_info = { 'name' : gene_obj.name, 'id': gene_obj.gene_id } nodes.append(node_info) index_dict[gene_obj.gene_id] = index index += 1 # add edge edge_info = { 'source': index_dict[cen_gene_id], 'target': index_dict[gene_obj.gene_id], 'relation': cname } edges.append(edge_info) except: traceback.print_exc() pass except: traceback.print_exc() pass result = { 'nodes': nodes, 'edges': edges } return result '''
apache-2.0
CiNC0/Cartier
cartier-redis/src/test/java/com/lambdaworks/redis/cluster/NodeSelectionAsyncTest.java
10040
/* * Copyright 2011-2016 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.lambdaworks.redis.cluster; import static com.lambdaworks.redis.ScriptOutputType.STATUS; import static org.assertj.core.api.Assertions.assertThat; import java.util.*; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import com.google.code.tempusfugit.temporal.WaitFor; import com.lambdaworks.redis.internal.LettuceSets; import org.junit.After; import org.junit.Before; import org.junit.Test; import com.lambdaworks.Wait; import com.lambdaworks.redis.api.async.RedisAsyncCommands; import com.lambdaworks.redis.cluster.api.StatefulRedisClusterConnection; import com.lambdaworks.redis.cluster.api.async.AsyncExecutions; import com.lambdaworks.redis.cluster.api.async.AsyncNodeSelection; import com.lambdaworks.redis.cluster.api.async.RedisAdvancedClusterAsyncCommands; import com.lambdaworks.redis.cluster.api.async.RedisClusterAsyncCommands; import com.lambdaworks.redis.cluster.api.sync.RedisAdvancedClusterCommands; import com.lambdaworks.redis.cluster.models.partitions.Partitions; import com.lambdaworks.redis.cluster.models.partitions.RedisClusterNode; /** * @author Mark Paluch */ public class NodeSelectionAsyncTest extends AbstractClusterTest { private RedisAdvancedClusterAsyncCommands<String, String> commands; private RedisAdvancedClusterCommands<String, String> syncCommands; private StatefulRedisClusterConnection<String, String> clusterConnection; @Before public void before() throws Exception { clusterClient.reloadPartitions(); clusterConnection = clusterClient.connect(); commands = clusterConnection.async(); syncCommands = clusterConnection.sync(); } @After public void after() throws Exception { commands.close(); } @Test public void testMultiNodeOperations() throws Exception { List<String> expectation = new ArrayList<>(); for (char c = 'a'; c < 'z'; c++) { String key = new String(new char[] { c, c, c }); expectation.add(key); commands.set(key, value).get(); } List<String> result = new Vector<>(); CompletableFuture.allOf(commands.masters().commands().keys(result::add, "*").futures()).get(); assertThat(result).hasSize(expectation.size()); Collections.sort(expectation); Collections.sort(result); assertThat(result).isEqualTo(expectation); } @Test public void testNodeSelectionCount() throws Exception { assertThat(commands.all().size()).isEqualTo(4); assertThat(commands.slaves().size()).isEqualTo(2); assertThat(commands.masters().size()).isEqualTo(2); assertThat(commands.nodes(redisClusterNode -> redisClusterNode.is(RedisClusterNode.NodeFlag.MYSELF)).size()).isEqualTo( 1); } @Test public void testNodeSelection() throws Exception { AsyncNodeSelection<String, String> onlyMe = commands.nodes(redisClusterNode -> redisClusterNode.getFlags().contains( RedisClusterNode.NodeFlag.MYSELF)); Map<RedisClusterNode, RedisAsyncCommands<String, String>> map = onlyMe.asMap(); assertThat(map).hasSize(1); RedisClusterAsyncCommands<String, String> node = onlyMe.commands(0); assertThat(node).isNotNull(); RedisClusterNode redisClusterNode = onlyMe.node(0); assertThat(redisClusterNode.getFlags()).contains(RedisClusterNode.NodeFlag.MYSELF); assertThat(onlyMe.asMap()).hasSize(1); } @Test public void testDynamicNodeSelection() throws Exception { Partitions partitions = commands.getStatefulConnection().getPartitions(); partitions.forEach(redisClusterNode -> redisClusterNode.setFlags(Collections.singleton(RedisClusterNode.NodeFlag.MASTER))); AsyncNodeSelection<String, String> selection = commands.nodes( redisClusterNode -> redisClusterNode.getFlags().contains(RedisClusterNode.NodeFlag.MYSELF), true); assertThat(selection.asMap()).hasSize(0); partitions.getPartition(0) .setFlags(LettuceSets.unmodifiableSet(RedisClusterNode.NodeFlag.MYSELF, RedisClusterNode.NodeFlag.MASTER)); assertThat(selection.asMap()).hasSize(1); partitions.getPartition(1) .setFlags(LettuceSets.unmodifiableSet(RedisClusterNode.NodeFlag.MYSELF, RedisClusterNode.NodeFlag.MASTER)); assertThat(selection.asMap()).hasSize(2); } @Test public void testNodeSelectionAsyncPing() throws Exception { AsyncNodeSelection<String, String> onlyMe = commands.nodes(redisClusterNode -> redisClusterNode.getFlags().contains( RedisClusterNode.NodeFlag.MYSELF)); Map<RedisClusterNode, RedisAsyncCommands<String, String>> map = onlyMe.asMap(); assertThat(map).hasSize(1); AsyncExecutions<String> ping = onlyMe.commands().ping(); CompletionStage<String> completionStage = ping.get(onlyMe.node(0)); assertThat(completionStage.toCompletableFuture().get()).isEqualTo("PONG"); } @Test public void testStaticNodeSelection() throws Exception { AsyncNodeSelection<String, String> selection = commands.nodes( redisClusterNode -> redisClusterNode.getFlags().contains(RedisClusterNode.NodeFlag.MYSELF), false); assertThat(selection.asMap()).hasSize(1); commands.getStatefulConnection().getPartitions().getPartition(2) .setFlags(Collections.singleton(RedisClusterNode.NodeFlag.MYSELF)); assertThat(selection.asMap()).hasSize(1); } @Test public void testAsynchronicityOfMultiNodeExecution() throws Exception { RedisAdvancedClusterAsyncCommands<String, String> connection2 = clusterClient.connectClusterAsync(); AsyncNodeSelection<String, String> masters = connection2.masters(); CompletableFuture.allOf(masters.commands().configSet("lua-time-limit", "10").futures()).get(); AsyncExecutions<Object> eval = masters.commands().eval("while true do end", STATUS, new String[0]); for (CompletableFuture<Object> future : eval.futures()) { assertThat(future.isDone()).isFalse(); assertThat(future.isCancelled()).isFalse(); } Thread.sleep(200); AsyncExecutions<String> kill = commands.masters().commands().scriptKill(); CompletableFuture.allOf(kill.futures()).get(); for (CompletionStage<String> execution : kill) { assertThat(execution.toCompletableFuture().get()).isEqualTo("OK"); } CompletableFuture.allOf(eval.futures()).exceptionally(throwable -> null).get(); for (CompletableFuture<Object> future : eval.futures()) { assertThat(future.isDone()).isTrue(); } } @Test public void testSlavesReadWrite() throws Exception { AsyncNodeSelection<String, String> nodes = commands.nodes(redisClusterNode -> redisClusterNode.getFlags().contains( RedisClusterNode.NodeFlag.SLAVE)); assertThat(nodes.size()).isEqualTo(2); commands.set(key, value).get(); waitForReplication(key, port4); List<Throwable> t = new ArrayList<>(); AsyncExecutions<String> keys = nodes.commands().get(key); keys.stream().forEach(lcs -> { lcs.toCompletableFuture().exceptionally(throwable -> { t.add(throwable); return null; }); }); CompletableFuture.allOf(keys.futures()).exceptionally(throwable -> null).get(); assertThat(t.size()).isGreaterThan(0); } @Test public void testSlavesWithReadOnly() throws Exception { AsyncNodeSelection<String, String> nodes = commands.slaves(redisClusterNode -> redisClusterNode .is(RedisClusterNode.NodeFlag.SLAVE)); assertThat(nodes.size()).isEqualTo(2); commands.set(key, value).get(); waitForReplication(key, port4); List<Throwable> t = new ArrayList<>(); List<String> strings = new ArrayList<>(); AsyncExecutions<String> keys = nodes.commands().get(key); keys.stream().forEach(lcs -> { lcs.toCompletableFuture().exceptionally(throwable -> { t.add(throwable); return null; }); lcs.thenAccept(strings::add); }); CompletableFuture.allOf(keys.futures()).exceptionally(throwable -> null).get(); Wait.untilEquals(1, () -> t.size()).waitOrTimeout(); assertThat(t).hasSize(1); assertThat(strings).hasSize(1).contains(value); } protected void waitForReplication(String key, int port) throws Exception { waitForReplication(commands, key, port); } protected static void waitForReplication(RedisAdvancedClusterAsyncCommands<String, String> commands, String key, int port) throws Exception { AsyncNodeSelection<String, String> selection = commands .slaves(redisClusterNode -> redisClusterNode.getUri().getPort() == port); Wait.untilNotEquals(null, () -> { for (CompletableFuture<String> future : selection.commands().get(key).futures()) { if (future.get() != null) { return future.get(); } } return null; }).waitOrTimeout(); } }
apache-2.0
william-taylor/world-cup-manager
app/app/src/main/java/application/events/StateEvent.java
1394
package application.events; import java.util.TimerTask; import java.util.Timer; import framework.IEvent; import framework.core.*; import framework.graphics.Button; public class StateEvent implements IEvent { private Boolean goToNextScene; private Button button; private Integer stateID; private Timer timerTwo; private Timer timerOne; private class TextureChange extends TimerTask { private final String filename = "sprites/button2.png"; @Override public void run() { button.setTexture(filename); timerOne.cancel(); } } private class TimedStateChange extends TimerTask { @Override public void run() { goToNextScene = true; timerTwo.cancel(); } } public StateEvent(Integer state, Button sprite) { goToNextScene = false; button = sprite; stateID = state; } @Override public void update() { if(goToNextScene) { SceneManager.get().switchTo(stateID); goToNextScene = false; } } @Override public void onActivate(Object data) { button.setTexture("sprites/fill.png"); timerOne = new Timer(); timerTwo = new Timer(); timerOne.schedule(new TextureChange(), 100); timerTwo.schedule(new TimedStateChange(), 350); } }
apache-2.0
lesaint/damapping
core-parent/annotation-processor/src/main/java/fr/javatronic/damapping/processor/impl/javaxparsing/model/JavaxDASourceClass.java
3371
/** * Copyright (C) 2013 Sébastien Lesaint (http://www.javatronic.fr/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fr.javatronic.damapping.processor.impl.javaxparsing.model; import fr.javatronic.damapping.processor.model.DAAnnotation; import fr.javatronic.damapping.processor.model.DAEnumValue; import fr.javatronic.damapping.processor.model.DAInterface; import fr.javatronic.damapping.processor.model.DAMethod; import fr.javatronic.damapping.processor.model.DAModifier; import fr.javatronic.damapping.processor.model.DAName; import fr.javatronic.damapping.processor.model.DASourceClass; import fr.javatronic.damapping.processor.model.DAType; import fr.javatronic.damapping.processor.model.InstantiationType; import fr.javatronic.damapping.processor.model.visitor.DAModelVisitor; import fr.javatronic.damapping.util.Optional; import java.util.List; import java.util.Set; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; import javax.lang.model.element.TypeElement; import static fr.javatronic.damapping.util.Preconditions.checkNotNull; /** * JavaxDASourceClass - * * @author Sébastien Lesaint */ @Immutable public class JavaxDASourceClass implements DASourceClass { @Nonnull private final DASourceClass delegate; @Nonnull private final TypeElement classElement; public JavaxDASourceClass(@Nonnull DASourceClass delegate, @Nonnull TypeElement classElement) { this.delegate = checkNotNull(delegate); this.classElement = checkNotNull(classElement); } @Nonnull public TypeElement getClassElement() { return classElement; } @Override @Nonnull public DAType getType() { return delegate.getType(); } @Override @Nullable public DAName getPackageName() { return delegate.getPackageName(); } @Override @Nonnull public List<DAAnnotation> getAnnotations() { return delegate.getAnnotations(); } @Override @Nonnull public Optional<DAAnnotation> getInjectableAnnotation() { return delegate.getInjectableAnnotation(); } @Override @Nonnull public Set<DAModifier> getModifiers() { return delegate.getModifiers(); } @Override @Nonnull public List<DAInterface> getInterfaces() { return delegate.getInterfaces(); } @Override @Nonnull public List<DAMethod> getMethods() { return delegate.getMethods(); } @Override @Nonnull public List<DAMethod> getAccessibleConstructors() { return delegate.getAccessibleConstructors(); } @Override @Nonnull public List<DAEnumValue> getEnumValues() { return delegate.getEnumValues(); } @Override @Nonnull public InstantiationType getInstantiationType() { return delegate.getInstantiationType(); } @Override public void accept(DAModelVisitor visitor) { delegate.accept(visitor); } }
apache-2.0
aanand/dockerpty
setup.py
1647
# dockerpty. # # Copyright 2014 Chris Corbyn <chris@w3style.co.uk> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup import os def fopen(filename): return open(os.path.join(os.path.dirname(__file__), filename)) def read(filename): return fopen(filename).read() setup( name='dockerpty', version='0.2.4', description='Python library to use the pseudo-tty of a docker container', long_description=read('README.md'), url='https://github.com/d11wtq/dockerpty', author='Chris Corbyn', author_email='chris@w3style.co.uk', license='Apache 2.0', keywords='docker, tty, pty, terminal', packages=['dockerpty'], classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Environment :: Console', 'Intended Audience :: Developers', 'Topic :: Terminals', 'Topic :: Terminals :: Terminal Emulators/X Terminals', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
apache-2.0
ecohealthalliance/GoodQuestion
js/components/Header.js
5911
import React from 'react'; import { View, Text, TouchableWithoutFeedback, Alert, Platform, Animated, Easing, StyleSheet, } from 'react-native'; import pubsub from 'pubsub-js'; import { NotificationChannels } from '../models/messages/Notification'; import Icon from 'react-native-vector-icons/FontAwesome'; import Styles from '../styles/Styles'; import Color from '../styles/Color'; import Store from '../data/Store'; const _styles = StyleSheet.create({ notificationIcon: { position: 'absolute', top: 20, right: 16, backgroundColor: Color.warning, width: 12, height: 12, borderRadius: 10, borderWidth: 2, borderColor: Color.background1, }, }); const Header = React.createClass({ subscriptions: {}, propTypes: { navigator: React.PropTypes.object, }, getInitialState() { // subscript to the notification create channel const subscription = pubsub.subscribe(NotificationChannels.CREATE, () => { this.updateTitle(); }); this.subscriptions[NotificationChannels.CREATE] = subscription; const routeStack = this.props.navState.routeStack; const position = routeStack.length - 1; const title = routeStack && routeStack[position] ? routeStack[position].title : ''; const path = routeStack && routeStack[position] ? routeStack[position].path : ''; return { index: 0, title: title, path: path, bounceValue: new Animated.Value(0), fadeAnim: new Animated.Value(0), translateAnim: new Animated.Value(0), hasNewNotifications: Store.newNotifications > 0, }; }, componentWillUnmount() { const keys = Object.keys(this.subscriptions); if (keys.length > 0) { keys.forEach((key) => { pubsub.unsubscribe(this.subscriptions[key]); }); this.subscriptions = {}; } }, immediatelyRefresh() { // NoOp https://github.com/facebook/react-native/issues/6205 }, componentWillReceiveProps() { this.updateTitle(); }, componentDidMount() { Animated.timing( this.state.fadeAnim, {toValue: 1} ).start(); }, /* Methods */ updateTitle(indexOffset = 0) { if (Store.navigator) { const routeStack = Store.navigator.getCurrentRoutes(); const position = routeStack.length - 1 - indexOffset; if (!routeStack[position]) { return; } let title = this.state.title; let path = this.state.path; const nextTitle = routeStack[position].title; const nextPath = routeStack[position].path; if (nextPath && nextPath !== path || nextTitle && nextTitle !== title) { path = nextPath; title = nextTitle; this.state.fadeAnim.setValue(0.5); this.state.translateAnim.setValue(indexOffset ? -10 : 10); Animated.timing( this.state.fadeAnim, {toValue: 1, duration: 300, easing: Easing.out(Easing.quad)} ).start(); Animated.timing( this.state.translateAnim, {toValue: 0, duration: 300, easing: Easing.out(Easing.quad)} ).start(); } this.setState({ title: title, index: position, path: path, hasNewNotifications: Store.newNotifications, }); } }, updateNotifications() { this.setState({hasNewNotifications: Store.newNotifications > 0}); }, backToLogin() { this.props.navigator.resetTo({path: 'login', title: ''}); this.setState({title: ''}); }, navigateBack() { if (this.state.path === 'registration') { Alert.alert( 'Exit registration?', '', [ {text: 'OK', onPress: () => this.backToLogin()}, {text: 'Cancel', style: 'cancel'}, ] ); } else { this.updateTitle(1); this.props.navigator.pop(); } }, /* Render */ renderDrawer() { if (typeof this.props.openDrawer === 'undefined') { return ( <View style={Styles.header.navBarRightButton}></View> ); } return ( <TouchableWithoutFeedback onPress={this.props.openDrawer}> <View style={Styles.header.navBarRightButton}> <Icon name='bars' size={25} color='#FFFFFF' /> { this.state.hasNewNotifications ? <View style={_styles.notificationIcon} /> : null } </View> </TouchableWithoutFeedback> ); }, renderIOSPadding() { if (Platform.OS === 'ios') { return ( <View style={Styles.header.iOSPadding}></View> ); } }, render() { let title = this.state.title; let navbarStyles = [Styles.header.navBar]; switch (this.state.path) { case 'none': case 'login': case 'registration': case 'forgotPassword': case 'verifyForgotPassword': navbarStyles.push(Styles.header.navBarClear); title = ''; break; default: } return ( <View style={navbarStyles}> {this.renderIOSPadding()} <TouchableWithoutFeedback onPress={this.navigateBack}> { this.state.index > 0 ? <View style={Styles.header.navBarLeftButton}><Icon name='chevron-left' size={25} color='#FFFFFF' /></View> : <View style={Styles.header.navBarLeftButton}><Icon name='chevron-left' size={25} color={Color.background1} /></View> } </TouchableWithoutFeedback> <View style={Styles.header.navBarTitle}> <Animated.Text source={{uri: 'http://i.imgur.com/XMKOH81.jpg'}} style={[Styles.header.navBarTitleText, { opacity: this.state.fadeAnim, transform: [ {translateX: this.state.translateAnim}, ], }]} > {title} </Animated.Text> </View> {this.renderDrawer()} </View> ); }, }); module.exports = Header;
apache-2.0
lenovor/zen
ml/src/main/scala/com/github/cloudml/zen/ml/recommendation/MVMModel.scala
6012
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.cloudml.zen.ml.recommendation import com.github.cloudml.zen.ml.recommendation.MVM._ import com.github.cloudml.zen.ml.util.LoaderUtils import com.github.cloudml.zen.ml.util.SparkUtils._ import org.apache.spark.SparkContext import org.apache.spark.mllib.evaluation.{RegressionMetrics, BinaryClassificationMetrics} import org.apache.spark.mllib.linalg.{Vector => SV} import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.mllib.util.{Loader, Saveable} import org.apache.spark.rdd.RDD import org.apache.spark.sql.{Row, SQLContext} import org.apache.spark.storage.StorageLevel import org.json4s.DefaultFormats import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import scala.math._ class MVMModel( val k: Int, val views: Array[Long], val classification: Boolean, val factors: RDD[(Long, VD)]) extends Serializable with Saveable { def predict(data: RDD[(Long, SV)]): RDD[(Long, ED)] = { val numFeatures = data.first()._2.size.toLong data.flatMap { case (sampleId, features) => features.activeIterator.filter(_._2 != 0.0).map { case (featureId, value) => (featureId.toLong, (sampleId, value)) } ++ views.indices.map { i => (numFeatures + i, (sampleId, 1D)) } }.join(factors).map { case (featureId, ((sampleId, x), w)) => val viewSize = views.length val viewId = featureId2viewId(featureId, views) (sampleId, forwardInterval(k, viewSize, viewId, x, w)) }.reduceByKey(reduceInterval).map { case (sampleId, arr) => var result = predictInterval(k, arr) if (classification) { result = 1.0 / (1.0 + math.exp(-result)) } (sampleId, result) } } def loss(data: RDD[(Long, LabeledPoint)]): Double = { // val minTarget = data.map(_._2.label).min() // val maxTarget = data.map(_._2.label).max() val perd = predict(data.map(t => (t._1, t._2.features))) val label = data.map(t => (t._1, t._2.label)) val scoreAndLabels = label.join(perd).map { case (_, (label, score)) => // var r = Math.max(score, minTarget) // r = Math.min(r, maxTarget) // pow(l - r, 2) (score, label) } scoreAndLabels.persist(StorageLevel.MEMORY_AND_DISK) val ret = if (classification) auc(scoreAndLabels) else rmse(scoreAndLabels) scoreAndLabels.unpersist(blocking = false) ret } def rmse(scoreAndLabels: RDD[(Double, Double)]): Double = { val metrics = new RegressionMetrics(scoreAndLabels) metrics.rootMeanSquaredError } def auc(scoreAndLabels: RDD[(Double, Double)]): Double = { val metrics = new BinaryClassificationMetrics(scoreAndLabels) metrics.areaUnderROC() } override def save(sc: SparkContext, path: String): Unit = { MVMModel.SaveLoadV1_0.save(sc, path, k, views, classification, factors) } override protected def formatVersion: String = MVMModel.SaveLoadV1_0.formatVersionV1_0 } object MVMModel extends Loader[MVMModel] { override def load(sc: SparkContext, path: String): MVMModel = { val (loadedClassName, version, metadata) = LoaderUtils.loadMetadata(sc, path) val versionV1_0 = SaveLoadV1_0.formatVersionV1_0 val classNameV1_0 = SaveLoadV1_0.classNameV1_0 if (loadedClassName == classNameV1_0 && version == versionV1_0) { implicit val formats = DefaultFormats val classification = (metadata \ "classification").extract[Boolean] val views = (metadata \ "views").extract[String].split(",").map(_.toLong) val k = (metadata \ "k").extract[Int] val dataPath = LoaderUtils.dataPath(path) val sqlContext = new SQLContext(sc) val dataRDD = sqlContext.parquetFile(dataPath) val dataArray = dataRDD.select("featureId", "factors").take(1) assert(dataArray.size == 1, s"Unable to load $loadedClassName data from: $dataPath") val data = dataArray(0) assert(data.size == 2, s"Unable to load $loadedClassName data from: $dataPath") val factors = dataRDD.rdd.map { case Row(featureId: Long, factors: Seq[Double]) => (featureId, factors.toArray) } new MVMModel(k, views, classification, factors) } else { throw new Exception( s"FMModel.load did not recognize model with (className, format version):" + s"($loadedClassName, $version). Supported:\n" + s" ($classNameV1_0, 1.0)") } } private object SaveLoadV1_0 { val formatVersionV1_0 = "1.0" val classNameV1_0 = "com.github.cloudml.zen.ml.recommendation.MVMModel" def save( sc: SparkContext, path: String, k: Int, views: Array[Long], classification: Boolean, factors: RDD[(Long, Array[Double])]): Unit = { val metadata = compact(render (("class" -> classNameV1_0) ~ ("version" -> formatVersionV1_0) ~ ("k" -> k) ~ ("views" -> views.mkString(",")) ~ ("classification" -> classification))) sc.parallelize(Seq(metadata), 1).saveAsTextFile(LoaderUtils.metadataPath(path)) val sqlContext = new SQLContext(sc) import sqlContext.implicits._ // Create Parquet data. factors.toDF("featureId", "factors").saveAsParquetFile(LoaderUtils.dataPath(path)) } } }
apache-2.0
jaredhoberock/gotham
windows/include/boost/math/distributions.hpp
1902
// Copyright John Maddock 2006, 2007. // Copyright Paul A. Bristow 2006, 2007. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // This file includes *all* the distributions. // this may be useful if many are used // - to avoid including each distribution individually. #ifndef BOOST_MATH_DISTRIBUTIONS_HPP #define BOOST_MATH_DISTRIBUTIONS_HPP #include <boost/math/distributions/bernoulli.hpp> #include <boost/math/distributions/beta.hpp> #include <boost/math/distributions/binomial.hpp> #include <boost/math/distributions/cauchy.hpp> #include <boost/math/distributions/chi_squared.hpp> #include <boost/math/distributions/complement.hpp> #include <boost/math/distributions/exponential.hpp> #include <boost/math/distributions/extreme_value.hpp> #include <boost/math/distributions/fisher_f.hpp> #include <boost/math/distributions/gamma.hpp> #include <boost/math/distributions/lognormal.hpp> #include <boost/math/distributions/negative_binomial.hpp> #include <boost/math/distributions/non_central_chi_squared.hpp> #include <boost/math/distributions/non_central_beta.hpp> #include <boost/math/distributions/non_central_f.hpp> #include <boost/math/distributions/non_central_t.hpp> #include <boost/math/distributions/normal.hpp> #include <boost/math/distributions/pareto.hpp> #include <boost/math/distributions/poisson.hpp> #include <boost/math/distributions/rayleigh.hpp> #include <boost/math/distributions/students_t.hpp> #include <boost/math/distributions/triangular.hpp> #include <boost/math/distributions/uniform.hpp> #include <boost/math/distributions/weibull.hpp> #include <boost/math/distributions/find_scale.hpp> #include <boost/math/distributions/find_location.hpp> #endif // BOOST_MATH_DISTRIBUTIONS_HPP
apache-2.0
ryan-developer/TechEval.HCatalyst
source/TechEval.HCatalyst/TechEval.HCatalyst.Web/Utilities/IStringUtils.cs
140
namespace TechEval.HCatalyst.Web.Utilities { public interface IStringUtils { string GenerateRandomAlphaNumeric(); } }
apache-2.0
SirPigles/rsf
forums/inc/languages/english/applythreadprefix.lang.php
1320
<?php /** * Apply Thread Prefix * Copyright 2011 Starpaul20 */ $l['applythreadprefix_info_name'] = "Apply Thread Prefix"; $l['applythreadprefix_info_desc'] = "Allows moderators to apply a thread prefix to threads without having to edit the post."; $l['apply_thread_prefix'] = "Apply Thread Prefix"; $l['new_prefix'] = "New Prefix:"; $l['prefix_note'] = "Please note any prefix this thread currently has will be overwritten"; $l['no_prefix'] = "Select a Prefix:"; $l['nav_apply_prefix'] = "Apply Thread Prefix"; $l['thread_prefix_applied'] = "Thread Prefix applied"; $l['redirect_thread_prefix_applied'] = "The prefix has been successfully applied to the thread.<br />You will now be redirected to the thread."; $l['redirect_inline_thread_prefix_applied'] = "The prefix has been successfully applied to the selected threads.<br />You will now be redirected to the forum."; $l['no_prefix_selected'] = "No prefix was selected."; $l['error_invalidthread'] = "The specified thread does not exist."; $l['error_invalidforum'] = "The specified forum does not exist."; $l['error_inline_nothreadsselected'] = "Sorry, but you did not select any threads to perform inline moderation on, or your previous moderation session has expired (Automatically after 1 hour of inactivity). Please select some threads and try again."; ?>
apache-2.0
AnujaLK/andes
modules/andes-core/broker/src/main/java/org/wso2/andes/server/cluster/coordination/hazelcast/HazelcastAgent.java
18069
/* * Copyright (c) 2005-2014, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.andes.server.cluster.coordination.hazelcast; import com.hazelcast.core.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.wso2.andes.configuration.AndesConfigurationManager; import org.wso2.andes.configuration.enums.AndesConfiguration; import org.wso2.andes.kernel.AndesException; import org.wso2.andes.server.cluster.coordination.ClusterCoordinationHandler; import org.wso2.andes.server.cluster.coordination.ClusterNotification; import org.wso2.andes.server.cluster.coordination.CoordinationConstants; import org.wso2.andes.server.cluster.coordination.hazelcast.custom.serializer.wrapper .HashmapStringTreeSetWrapper; import org.wso2.andes.server.cluster.coordination.hazelcast.custom.serializer.wrapper.TreeSetSlotWrapper; import org.wso2.andes.server.cluster.coordination.hazelcast.custom.serializer.wrapper.TreeSetLongWrapper; import java.util.*; /** * This is a singleton class, which contains all Hazelcast related operations. */ public class HazelcastAgent { private static Log log = LogFactory.getLog(HazelcastAgent.class); /** * Value used to indicate the cluster initialization success state */ private static final long INIT_SUCCESSFUL = 1L; /** * Singleton HazelcastAgent Instance. */ private static HazelcastAgent hazelcastAgentInstance = new HazelcastAgent(); /** * Hazelcast instance exposed by Carbon. */ private HazelcastInstance hazelcastInstance; /** * Distributed topic to communicate subscription change notifications among cluster nodes. */ private ITopic<ClusterNotification> subscriptionChangedNotifierChannel; /** * Distributed topic to communicate binding change notifications among cluster nodes. */ private ITopic<ClusterNotification> bindingChangeNotifierChannel; /** * Distributed topic to communicate queue purge notifications among cluster nodes. */ private ITopic<ClusterNotification> queueChangedNotifierChannel; /** * Distributed topic to communicate exchange change notification among cluster nodes. */ private ITopic<ClusterNotification> exchangeChangeNotifierChannel; /** * These distributed maps are used for slot management */ /** * distributed Map to store message ID list against queue name */ private IMap<String, TreeSetLongWrapper> slotIdMap; /** * to keep track of assigned slots up to now. Key of the map contains nodeID+"_"+queueName */ private IMap<String, HashmapStringTreeSetWrapper> slotAssignmentMap; /** * To keep track of slots that overlap with already assigned slots (in slotAssignmentMap). This is to ensure that * messages assigned to a specific assigned slot are only handled by that node itself. */ private IMap<String, HashmapStringTreeSetWrapper> overLappedSlotMap; /** *distributed Map to store last assigned ID against queue name */ private IMap<String, Long> lastAssignedIDMap; /** * distributed Map to store last published ID against node ID */ private IMap<String, Long> lastPublishedIDMap; /** * Distributed Map to keep track of non-empty slots which are unassigned from * other nodes */ private IMap<String, TreeSetSlotWrapper> unAssignedSlotMap; /** * This map is used to store thrift server host and thrift server port * map's key is port or host name. */ private IMap<String,String> thriftServerDetailsMap; /** * Unique ID generated to represent the node. * This ID is used when generating message IDs. */ private int uniqueIdOfLocalMember; /** * Lock used to initialize the Slot map used by the Slot manager. */ private ILock initializationLock; /** * This is used to indicate if the cluster initialization was done properly. Used a atomic long * since am atomic boolean is not available in the current Hazelcast implementation. */ private IAtomicLong initializationDoneIndicator; /** * This map is used to store coordinator node's host address and port. */ private IMap<String,String> coordinatorNodeDetailsMap; /** * Private constructor. */ private HazelcastAgent() { } /** * Get singleton HazelcastAgent. * * @return HazelcastAgent */ public static synchronized HazelcastAgent getInstance() { return hazelcastAgentInstance; } /** * Initialize HazelcastAgent instance. * * @param hazelcastInstance obtained hazelcastInstance from the OSGI service */ @SuppressWarnings("unchecked") public void init(HazelcastInstance hazelcastInstance) { log.info("Initializing Hazelcast Agent"); this.hazelcastInstance = hazelcastInstance; /** * membership changes */ this.hazelcastInstance.getCluster().addMembershipListener(new AndesMembershipListener()); /** * subscription changes */ this.subscriptionChangedNotifierChannel = this.hazelcastInstance.getTopic( CoordinationConstants.HAZELCAST_SUBSCRIPTION_CHANGED_NOTIFIER_TOPIC_NAME); ClusterSubscriptionChangedListener clusterSubscriptionChangedListener = new ClusterSubscriptionChangedListener(); clusterSubscriptionChangedListener.addSubscriptionListener(new ClusterCoordinationHandler(this)); this.subscriptionChangedNotifierChannel.addMessageListener(clusterSubscriptionChangedListener); /** * exchange changes */ this.exchangeChangeNotifierChannel = this.hazelcastInstance.getTopic( CoordinationConstants.HAZELCAST_EXCHANGE_CHANGED_NOTIFIER_TOPIC_NAME); ClusterExchangeChangedListener clusterExchangeChangedListener = new ClusterExchangeChangedListener(); clusterExchangeChangedListener.addExchangeListener(new ClusterCoordinationHandler(this)); this.exchangeChangeNotifierChannel.addMessageListener(clusterExchangeChangedListener); /** * queue changes */ this.queueChangedNotifierChannel = this.hazelcastInstance.getTopic( CoordinationConstants.HAZELCAST_QUEUE_CHANGED_NOTIFIER_TOPIC_NAME); ClusterQueueChangedListener clusterQueueChangedListener = new ClusterQueueChangedListener(); clusterQueueChangedListener.addQueueListener(new ClusterCoordinationHandler(this)); this.queueChangedNotifierChannel.addMessageListener(clusterQueueChangedListener); /** * binding changes */ this.bindingChangeNotifierChannel = this.hazelcastInstance.getTopic( CoordinationConstants.HAZELCAST_BINDING_CHANGED_NOTIFIER_TOPIC_NAME); ClusterBindingChangedListener clusterBindingChangedListener = new ClusterBindingChangedListener(); clusterBindingChangedListener.addBindingListener(new ClusterCoordinationHandler(this)); this.bindingChangeNotifierChannel.addMessageListener(clusterBindingChangedListener); // generates a unique id for the node unique for the cluster IdGenerator idGenerator = hazelcastInstance.getIdGenerator(CoordinationConstants.HAZELCAST_ID_GENERATOR_NAME); this.uniqueIdOfLocalMember = (int) idGenerator.newId(); /** * Initialize hazelcast maps for slots */ unAssignedSlotMap = hazelcastInstance.getMap(CoordinationConstants.UNASSIGNED_SLOT_MAP_NAME); slotIdMap = hazelcastInstance.getMap(CoordinationConstants.SLOT_ID_MAP_NAME); lastAssignedIDMap = hazelcastInstance.getMap(CoordinationConstants.LAST_ASSIGNED_ID_MAP_NAME); lastPublishedIDMap = hazelcastInstance.getMap(CoordinationConstants.LAST_PUBLISHED_ID_MAP_NAME); slotAssignmentMap = hazelcastInstance.getMap(CoordinationConstants.SLOT_ASSIGNMENT_MAP_NAME); overLappedSlotMap = hazelcastInstance.getMap(CoordinationConstants.OVERLAPPED_SLOT_MAP_NAME); /** * Initialize hazelcast map fot thrift server details */ thriftServerDetailsMap = hazelcastInstance.getMap(CoordinationConstants.THRIFT_SERVER_DETAILS_MAP_NAME); /** * Initialize hazelcast map for coordinator node details */ coordinatorNodeDetailsMap = hazelcastInstance.getMap(CoordinationConstants.COORDINATOR_NODE_DETAILS_MAP_NAME); /** * Initialize distributed lock and boolean related to slot map initialization */ initializationLock = hazelcastInstance.getLock(CoordinationConstants.INITIALIZATION_LOCK); initializationDoneIndicator = hazelcastInstance .getAtomicLong(CoordinationConstants.INITIALIZATION_DONE_INDICATOR); log.info("Successfully initialized Hazelcast Agent"); if (log.isDebugEnabled()) { log.debug("Unique ID generation for message ID generation:" + uniqueIdOfLocalMember); } } /** * Node ID is generated in the format of "NODE/<host IP>:<Port>" * @return NodeId Identifier of the node in the cluster */ public String getNodeId() { String nodeId; // Get Node ID configured by user in broker.xml (if not "default" we must use it as the ID) nodeId = AndesConfigurationManager.readValue(AndesConfiguration.COORDINATION_NODE_ID); // If the config value is "default" we must generate the ID if (AndesConfiguration.COORDINATION_NODE_ID.get().getDefaultValue().equals(nodeId)) { Member localMember = hazelcastInstance.getCluster().getLocalMember(); nodeId = getIdOfNode(localMember); } return nodeId; } /** * All members of the cluster are returned as a Set of Members * * @return Set of Members */ public Set<Member> getAllClusterMembers() { return hazelcastInstance.getCluster().getMembers(); } /** * Get node IDs of all nodes available in the cluster. * * @return List of node IDs. */ public List<String> getMembersNodeIDs() { Set<Member> members = this.getAllClusterMembers(); List<String> nodeIDList = new ArrayList<String>(); for (Member member : members) { nodeIDList.add(getIdOfNode(member)); } return nodeIDList; } /** * Get local node. * * @return local node as a Member. */ public Member getLocalMember() { return hazelcastInstance.getCluster().getLocalMember(); } /** * Get number of members in the cluster. * * @return number of members. */ public int getClusterSize() { return hazelcastInstance.getCluster().getMembers().size(); } /** * Get unique ID to represent local member. * * @return unique ID. */ public int getUniqueIdForNode() { return uniqueIdOfLocalMember; } /** * Get node ID of the given node. * * @param node cluster node to get the ID * @return node ID. */ public String getIdOfNode(Member node) { return CoordinationConstants.NODE_NAME_PREFIX + node.getSocketAddress(); } /** * Each member of the cluster is given an unique UUID and here the UUIDs of all nodes are sorted * and the index of the belonging UUID of the given node is returned. * * @param node node to get the index * @return the index of the specified node */ public int getIndexOfNode(Member node) { TreeSet<String> membersUniqueRepresentations = new TreeSet<String>(); for (Member member : this.getAllClusterMembers()) { membersUniqueRepresentations.add(member.getUuid()); } return membersUniqueRepresentations.headSet(node.getUuid()).size(); } /** * Get the index where the local node is placed when all * the cluster nodes are sorted according to their UUID. * * @return the index of the local node */ public int getIndexOfLocalNode() { return this.getIndexOfNode(this.getLocalMember()); } public void notifySubscriptionsChanged(ClusterNotification clusterNotification) throws AndesException { if (log.isDebugEnabled()) { log.debug("Sending GOSSIP: " + clusterNotification.getDescription()); } try { this.subscriptionChangedNotifierChannel.publish(clusterNotification); } catch (Exception ex) { log.error("Error while sending subscription change notification : " + clusterNotification.getEncodedObjectAsString(), ex); throw new AndesException("Error while sending queue change notification : " + clusterNotification.getEncodedObjectAsString(), ex); } } public void notifyQueuesChanged(ClusterNotification clusterNotification) throws AndesException { if (log.isDebugEnabled()) { log.debug("Sending GOSSIP: " + clusterNotification.getDescription()); } try { this.queueChangedNotifierChannel.publish(clusterNotification); } catch (Exception e) { log.error("Error while sending queue change notification : " + clusterNotification.getEncodedObjectAsString(), e); throw new AndesException("Error while sending queue change notification : " + clusterNotification.getEncodedObjectAsString(), e); } } public void notifyExchangesChanged(ClusterNotification clusterNotification) throws AndesException { if (log.isDebugEnabled()) { log.debug("Sending GOSSIP: " + clusterNotification.getDescription()); } try { this.exchangeChangeNotifierChannel.publish(clusterNotification); } catch (Exception e) { log.error("Error while sending exchange change notification" + clusterNotification.getEncodedObjectAsString(), e); throw new AndesException("Error while sending exchange change notification" + clusterNotification.getEncodedObjectAsString(), e); } } public void notifyBindingsChanged(ClusterNotification clusterNotification) throws AndesException { if (log.isDebugEnabled()) { log.debug("GOSSIP: " + clusterNotification.getDescription()); } try { this.bindingChangeNotifierChannel.publish(clusterNotification); } catch (Exception e) { log.error("Error while sending binding change notification" + clusterNotification.getEncodedObjectAsString(), e); throw new AndesException("Error while sending binding change notification" + clusterNotification.getEncodedObjectAsString(), e); } } public IMap<String, TreeSetSlotWrapper> getUnAssignedSlotMap() { return unAssignedSlotMap; } public IMap<String, TreeSetLongWrapper> getSlotIdMap() { return slotIdMap; } public IMap<String, Long> getLastAssignedIDMap() { return lastAssignedIDMap; } public IMap<String, Long> getLastPublishedIDMap() { return lastPublishedIDMap; } public IMap<String, HashmapStringTreeSetWrapper> getSlotAssignmentMap() { return slotAssignmentMap; } public IMap<String, HashmapStringTreeSetWrapper> getOverLappedSlotMap() { return overLappedSlotMap; } /** * This method returns a map containing thrift server port and hostname * @return thriftServerDetailsMap */ public IMap<String, String> getThriftServerDetailsMap() { return thriftServerDetailsMap; } /** * This method returns a map containing thrift server port and hostname * @return coordinatorNodeDetailsMap */ public IMap<String, String> getCoordinatorNodeDetailsMap() { return coordinatorNodeDetailsMap; } /** * Acquire the distributed lock related to cluster initialization. This lock is required to * avoid two nodes initializing the map twice. */ public void acquireInitializationLock() { if (log.isDebugEnabled()) { log.debug("Trying to acquire initialization lock."); } initializationLock.lock(); if (log.isDebugEnabled()) { log.debug("Initialization lock acquired."); } } /** * Inform other members in the cluster that the cluster was initialized properly. */ public void indicateSuccessfulInitilization() { initializationDoneIndicator.set(INIT_SUCCESSFUL); } /** * Check if a member has already initialized the cluster * * @return true if cluster is already initialized */ public boolean isClusterInitializedSuccessfully() { return initializationDoneIndicator.get() == INIT_SUCCESSFUL; } /** * Release the initialization lock. */ public void releaseInitializationLock() { initializationLock.unlock(); if (log.isDebugEnabled()) { log.debug("Initialization lock released."); } } /** * Method to check if the hazelcast instance has shutdown. * @return boolean */ public boolean isActive() { if (null != hazelcastInstance) { return hazelcastInstance.getLifecycleService().isRunning(); } else { return false; } } }
apache-2.0
kool79/IocPerformance
IocPerformance/Benchmarks/Advanced/09_ChildContainer_Benchmark.cs
1738
using System; using IocPerformance.Adapters; using IocPerformance.Classes.Child; using IocPerformance.Classes.Standard; namespace IocPerformance.Benchmarks.Advanced { public class ChildContainer_09_Benchmark : Benchmark { public override bool IsSupportedBy(IContainerAdapter container) { return container.SupportsChildContainer; } public override void MethodToBenchmark(IContainerAdapter container) { using (var childContainer = container.CreateChildContainerAdapter()) { childContainer.Prepare(); var scopedCombined = (ICombined1)childContainer.Resolve(typeof(ICombined1)); } using (var childContainer = container.CreateChildContainerAdapter()) { childContainer.Prepare(); var scopedCombined = (ICombined2)childContainer.Resolve(typeof(ICombined2)); } using (var childContainer = container.CreateChildContainerAdapter()) { childContainer.Prepare(); var scopedCombined = (ICombined3)childContainer.Resolve(typeof(ICombined3)); } } public override void Verify(Adapters.IContainerAdapter container) { if (!container.SupportsChildContainer) { return; } if (ScopedCombined1.Instances != this.LoopCount || ScopedCombined2.Instances != this.LoopCount || ScopedCombined3.Instances != this.LoopCount) { throw new Exception(string.Format("ScopedCombined count must be {0}", this.LoopCount)); } } } }
apache-2.0
knative-sandbox/eventing-rabbitmq
vendor/github.com/rabbitmq/messaging-topology-operator/pkg/generated/clientset/versioned/typed/rabbitmq.com/v1beta1/exchange.go
6684
/* RabbitMQ Messaging Topology Kubernetes Operator Copyright 2021 VMware, Inc. This product is licensed to you under the Mozilla Public License 2.0 license (the "License"). You may not use this product except in compliance with the Mozilla 2.0 License. This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. */ // Code generated by client-gen. DO NOT EDIT. package v1beta1 import ( "context" "time" v1beta1 "github.com/rabbitmq/messaging-topology-operator/api/v1beta1" scheme "github.com/rabbitmq/messaging-topology-operator/pkg/generated/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" ) // ExchangesGetter has a method to return a ExchangeInterface. // A group's client should implement this interface. type ExchangesGetter interface { Exchanges(namespace string) ExchangeInterface } // ExchangeInterface has methods to work with Exchange resources. type ExchangeInterface interface { Create(ctx context.Context, exchange *v1beta1.Exchange, opts v1.CreateOptions) (*v1beta1.Exchange, error) Update(ctx context.Context, exchange *v1beta1.Exchange, opts v1.UpdateOptions) (*v1beta1.Exchange, error) UpdateStatus(ctx context.Context, exchange *v1beta1.Exchange, opts v1.UpdateOptions) (*v1beta1.Exchange, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Exchange, error) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ExchangeList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Exchange, err error) ExchangeExpansion } // exchanges implements ExchangeInterface type exchanges struct { client rest.Interface ns string } // newExchanges returns a Exchanges func newExchanges(c *RabbitmqV1beta1Client, namespace string) *exchanges { return &exchanges{ client: c.RESTClient(), ns: namespace, } } // Get takes name of the exchange, and returns the corresponding exchange object, and an error if there is any. func (c *exchanges) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Exchange, err error) { result = &v1beta1.Exchange{} err = c.client.Get(). Namespace(c.ns). Resource("exchanges"). Name(name). VersionedParams(&options, scheme.ParameterCodec). Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Exchanges that match those selectors. func (c *exchanges) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ExchangeList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } result = &v1beta1.ExchangeList{} err = c.client.Get(). Namespace(c.ns). Resource("exchanges"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested exchanges. func (c *exchanges) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("exchanges"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Watch(ctx) } // Create takes the representation of a exchange and creates it. Returns the server's representation of the exchange, and an error, if there is any. func (c *exchanges) Create(ctx context.Context, exchange *v1beta1.Exchange, opts v1.CreateOptions) (result *v1beta1.Exchange, err error) { result = &v1beta1.Exchange{} err = c.client.Post(). Namespace(c.ns). Resource("exchanges"). VersionedParams(&opts, scheme.ParameterCodec). Body(exchange). Do(ctx). Into(result) return } // Update takes the representation of a exchange and updates it. Returns the server's representation of the exchange, and an error, if there is any. func (c *exchanges) Update(ctx context.Context, exchange *v1beta1.Exchange, opts v1.UpdateOptions) (result *v1beta1.Exchange, err error) { result = &v1beta1.Exchange{} err = c.client.Put(). Namespace(c.ns). Resource("exchanges"). Name(exchange.Name). VersionedParams(&opts, scheme.ParameterCodec). Body(exchange). Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *exchanges) UpdateStatus(ctx context.Context, exchange *v1beta1.Exchange, opts v1.UpdateOptions) (result *v1beta1.Exchange, err error) { result = &v1beta1.Exchange{} err = c.client.Put(). Namespace(c.ns). Resource("exchanges"). Name(exchange.Name). SubResource("status"). VersionedParams(&opts, scheme.ParameterCodec). Body(exchange). Do(ctx). Into(result) return } // Delete takes name of the exchange and deletes it. Returns an error if one occurs. func (c *exchanges) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("exchanges"). Name(name). Body(&opts). Do(ctx). Error() } // DeleteCollection deletes a collection of objects. func (c *exchanges) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second } return c.client.Delete(). Namespace(c.ns). Resource("exchanges"). VersionedParams(&listOpts, scheme.ParameterCodec). Timeout(timeout). Body(&opts). Do(ctx). Error() } // Patch applies the patch and returns the patched exchange. func (c *exchanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Exchange, err error) { result = &v1beta1.Exchange{} err = c.client.Patch(pt). Namespace(c.ns). Resource("exchanges"). Name(name). SubResource(subresources...). VersionedParams(&opts, scheme.ParameterCodec). Body(data). Do(ctx). Into(result) return }
apache-2.0
zhangminglei/flink
flink-runtime/src/test/java/org/apache/flink/runtime/state/heap/CachingInternalPriorityQueueSetTestBase.java
1819
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.state.heap; import org.apache.flink.runtime.state.InternalPriorityQueue; import org.apache.flink.runtime.state.InternalPriorityQueueTestBase; /** * Test for {@link CachingInternalPriorityQueueSet}. */ public abstract class CachingInternalPriorityQueueSetTestBase extends InternalPriorityQueueTestBase { @Override protected InternalPriorityQueue<TestElement> newPriorityQueue(int initialCapacity) { final CachingInternalPriorityQueueSet.OrderedSetCache<TestElement> cache = createOrderedSetCache(); final CachingInternalPriorityQueueSet.OrderedSetStore<TestElement> store = createOrderedSetStore(); return new CachingInternalPriorityQueueSet<>(cache, store); } @Override protected boolean testSetSemanticsAgainstDuplicateElements() { return true; } protected abstract CachingInternalPriorityQueueSet.OrderedSetStore<TestElement> createOrderedSetStore(); protected abstract CachingInternalPriorityQueueSet.OrderedSetCache<TestElement> createOrderedSetCache(); }
apache-2.0
HubSpot/Baragon
BaragonService/src/main/java/com/hubspot/baragon/service/config/BaragonServiceDWSettings.java
389
package com.hubspot.baragon.service.config; public class BaragonServiceDWSettings { private final int port; private final String contextPath; public BaragonServiceDWSettings(int port, String contextPath) { this.port = port; this.contextPath = contextPath; } public int getPort() { return port; } public String getContextPath() { return contextPath; } }
apache-2.0
junforjun/webp
src/main/java/com/webp/model/UserAuthentication.java
1134
package com.webp.model; import java.io.Serializable; import java.sql.Timestamp; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.Table; import javax.persistence.IdClass; import javax.persistence.Id; import com.webp.model.pk.UserAuthenticationPK; /** * MODEL autogen macro * UserAuthentication * @auther KIM */ @Entity @Table(name="USER_AUTHENTICATION") @IdClass(value = UserAuthenticationPK.class) public class UserAuthentication implements Serializable { public static final String TABLE = "USER_AUTHENTICATION"; /** ユーザID */ @Id @Column(name = "USER_ID") public String userId; /** ユーザレベル */ @Id @Column(name = "USER_LEVEL") public String userLevel; /** 生成ユーザID */ @Column(name = "CREATED_USER", nullable = false) public String createdUser; /** 生成時間 */ @Column(name = "CREATED_TIME", nullable = false) public Timestamp createdTime; /** 修正ユーザID */ @Column(name = "EDITED_USER") public String editedUser; /** 修正時間 */ @Column(name = "EDITED_TIME") public Timestamp editedTime; }
apache-2.0
taimir/dashboard
src/app/frontend/replicationcontrollerlist/podlogsmenu_component.js
4975
// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import {StateParams} from 'logs/logs_state'; import {stateName as logsStateName} from 'logs/logs_state'; /** * Controller for the logs menu view. * * @final */ export class PodLogsMenuController { /** * @param {!ui.router.$state} $state * @param {!angular.$log} $log * @param {!angular.$resource} $resource * @ngInject */ constructor($state, $log, $resource) { /** @private {!ui.router.$state} */ this.state_ = $state; /** @private {!angular.$resource} */ this.resource_ = $resource; /** @private {!angular.$log} */ this.log_ = $log; /** * This is initialized from the scope. * @export {string} */ this.replicationControllerName; /** * This is initialized from the scope. * @export {string} */ this.namespace; /** * This is initialized on open menu. * @export {!Array<!backendApi.ReplicationControllerPodWithContainers>} */ this.replicationControllerPodsList; /** @export */ this.i18n = i18n; } /** * Opens menu with pods and link to logs. * @param {!function(!MouseEvent)} $mdOpenMenu * @param {!MouseEvent} $event * @export */ openMenu($mdOpenMenu, $event) { // This is needed to resolve problem with data refresh. // Sometimes old data was included to the new one for a while. if (this.replicationControllerPodsList) { this.replicationControllerPodsList = []; } this.getReplicationControllerPods_(); $mdOpenMenu($event); } /** * @private */ getReplicationControllerPods_() { /** @type {!angular.Resource<!backendApi.ReplicationControllerPods>} */ let resource = this.resource_( `api/v1/replicationcontroller/pod/${this.namespace}/` + `${this.replicationControllerName}?limit=10`); resource.get( (replicationControllerPods) => { this.log_.info( 'Successfully fetched Replication Controller pods: ', replicationControllerPods); this.replicationControllerPodsList = replicationControllerPods.pods; }, (err) => { this.log_.error('Error fetching Replication Controller pods: ', err); }); } /** * @param {string} podName * @return {string} * @export */ getLogsHref(podName) { return this.state_.href(logsStateName, new StateParams(this.namespace, podName)); } /** * Checks if pod contains at least one container. Return true if yes, otherwise false. * @param {!backendApi.ReplicationControllerPodWithContainers} pod * @return {boolean} * @export */ podContainerExists(pod) { if (pod.podContainers[0].name === undefined) { return false; } return true; } /** * Checks if pod containers were restarted. Return true if yes, otherwise false. * @param {backendApi.ReplicationControllerPodWithContainers} pod * @return {boolean} * @export */ podContainersRestarted(pod) { if (pod) { return pod.totalRestartCount > 0; } return false; } } /** * Returns directive definition object for logs menu. * @return {!angular.Directive} */ export const podLogsMenuComponent = { bindings: { 'namespace': '=', 'replicationControllerName': '=', }, controller: PodLogsMenuController, templateUrl: 'replicationcontrollerlist/podlogsmenu.html', }; const i18n = { /** @export {string} @desc Tooltip 'Logs' on the logs button on a replication controller card */ MSG_RC_LIST_LOGS_TOOLTIP: goog.getMsg('Logs'), /** @export {string} @desc Label 'Logs' at the top of the drop down menu when a user clicks the logs button on a replication controller card. */ MSG_RC_LIST_LOGS_LABEL: goog.getMsg('Logs'), /** @export {string} @desc Label 'Pod' for the column with pods in the logs drop down view (opened from a replication controller card). */ MSG_RC_LIST_LOGS_POD_LABEL: goog.getMsg('Pod'), /** @export {string} @desc Label 'Running since' for the respective column in the logs drop down view (opened from a replication controller card). */ MSG_RC_LIST_LOGS_RUNNING_SINCE_LABEL: goog.getMsg('Running since'), /** @export {string} @desc Label 'Not running' which appears in the pod logs list (opened from a replication controller card) when there are no logs for a given pod. */ MSG_RC_LIST_LOGS_NOT_RUNNING_LABEL: goog.getMsg('Not running'), };
apache-2.0
denouche/selma
processor/src/test/java/fr/xebia/extras/selma/it/generic/GenericMapperIT.java
3465
/* * Copyright 2013 Xebia and Séven Le Mesle * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package fr.xebia.extras.selma.it.generic; import fr.xebia.extras.selma.Selma; import fr.xebia.extras.selma.beans.AddressIn; import fr.xebia.extras.selma.beans.AddressOut; import fr.xebia.extras.selma.beans.CityIn; import fr.xebia.extras.selma.beans.CityOut; import fr.xebia.extras.selma.it.utils.Compile; import fr.xebia.extras.selma.it.utils.IntegrationTestBase; import junit.framework.Assert; import org.junit.Test; /** * */ @Compile(withClasses = {SpecificMapper.class, GenericMapper.class}) public class GenericMapperIT extends IntegrationTestBase { @Test public void beanMapper_should_map_properties_resolving_generics() throws Exception { SpecificMapper mapper = Selma.getMapper(SpecificMapper.class); AddressIn addressIn = new AddressIn(); addressIn.setPrincipal(true); addressIn.setNumber(55); addressIn.setStreet("rue de la truanderie"); addressIn.setCity(new CityIn()); addressIn.getCity().setCapital(true); addressIn.getCity().setName("Paris"); addressIn.getCity().setPopulation(3 * 1000 * 1000); AddressOut res = mapper.asEntity(addressIn); Assert.assertNotNull(res); verifyAddress(addressIn, res); } @Test public void beanMapper_should_update_properties_resolving_generics() throws Exception { SpecificMapper mapper = Selma.getMapper(SpecificMapper.class); AddressIn addressIn = new AddressIn(); addressIn.setPrincipal(true); addressIn.setNumber(55); addressIn.setStreet("rue de la truanderie"); addressIn.setCity(new CityIn()); addressIn.getCity().setCapital(true); addressIn.getCity().setName("Paris"); addressIn.getCity().setPopulation(3 * 1000 * 1000); AddressOut res = new AddressOut(); AddressOut result = mapper.updateEntity(addressIn, res); Assert.assertNotNull(result); Assert.assertTrue(res == result); verifyAddress(addressIn, res); } private void verifyAddress(AddressIn address, AddressOut address1) { if (address == null) { Assert.assertNull(address1); } else { Assert.assertEquals(address.getStreet(), address1.getStreet()); Assert.assertEquals(address.getNumber(), address1.getNumber()); Assert.assertEquals(address.getExtras(), address1.getExtras()); verifyCity(address.getCity(), address1.getCity()); } } private void verifyCity(CityIn city, CityOut city1) { if (city == null) { Assert.assertNull(city1); } else { Assert.assertEquals(city.getName(), city1.getName()); Assert.assertEquals(city.getPopulation(), city1.getPopulation()); Assert.assertEquals(city.isCapital(), city1.isCapital()); } } }
apache-2.0
o3project/openflowj-otn
src/main/java/org/projectfloodlight/openflow/protocol/ver13/OFActionIdBsnSetTunnelDstVer13.java
6449
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University // Copyright (c) 2011, 2012 Open Networking Foundation // Copyright (c) 2012, 2013 Big Switch Networks, Inc. // This library was generated by the LoxiGen Compiler. // See the file LICENSE.txt which should have been included in the source distribution // Automatically generated by LOXI from template of_class.java // Do not modify package org.projectfloodlight.openflow.protocol.ver13; import org.projectfloodlight.openflow.protocol.*; import org.projectfloodlight.openflow.protocol.action.*; import org.projectfloodlight.openflow.protocol.actionid.*; import org.projectfloodlight.openflow.protocol.bsntlv.*; import org.projectfloodlight.openflow.protocol.errormsg.*; import org.projectfloodlight.openflow.protocol.meterband.*; import org.projectfloodlight.openflow.protocol.instruction.*; import org.projectfloodlight.openflow.protocol.instructionid.*; import org.projectfloodlight.openflow.protocol.match.*; import org.projectfloodlight.openflow.protocol.oxm.*; import org.projectfloodlight.openflow.protocol.queueprop.*; import org.projectfloodlight.openflow.types.*; import org.projectfloodlight.openflow.util.*; import org.projectfloodlight.openflow.exceptions.*; import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.jboss.netty.buffer.ChannelBuffer; import com.google.common.hash.PrimitiveSink; import com.google.common.hash.Funnel; class OFActionIdBsnSetTunnelDstVer13 implements OFActionIdBsnSetTunnelDst { private static final Logger logger = LoggerFactory.getLogger(OFActionIdBsnSetTunnelDstVer13.class); // version: 1.3 final static byte WIRE_VERSION = 4; final static int LENGTH = 12; // OF message fields // // Immutable default instance final static OFActionIdBsnSetTunnelDstVer13 DEFAULT = new OFActionIdBsnSetTunnelDstVer13( ); final static OFActionIdBsnSetTunnelDstVer13 INSTANCE = new OFActionIdBsnSetTunnelDstVer13(); // private empty constructor - use shared instance! private OFActionIdBsnSetTunnelDstVer13() { } // Accessors for OF message fields @Override public OFActionType getType() { return OFActionType.EXPERIMENTER; } @Override public long getExperimenter() { return 0x5c16c7L; } @Override public long getSubtype() { return 0x2L; } @Override public OFVersion getVersion() { return OFVersion.OF_13; } // no data members - do not support builder public OFActionIdBsnSetTunnelDst.Builder createBuilder() { throw new UnsupportedOperationException("OFActionIdBsnSetTunnelDstVer13 has no mutable properties -- builder unneeded"); } final static Reader READER = new Reader(); static class Reader implements OFMessageReader<OFActionIdBsnSetTunnelDst> { @Override public OFActionIdBsnSetTunnelDst readFrom(ChannelBuffer bb) throws OFParseError { int start = bb.readerIndex(); // fixed value property type == 65535 short type = bb.readShort(); if(type != (short) 0xffff) throw new OFParseError("Wrong type: Expected=OFActionType.EXPERIMENTER(65535), got="+type); int length = U16.f(bb.readShort()); if(length != 12) throw new OFParseError("Wrong length: Expected=12(12), got="+length); if(bb.readableBytes() + (bb.readerIndex() - start) < length) { // Buffer does not have all data yet bb.readerIndex(start); return null; } if(logger.isTraceEnabled()) logger.trace("readFrom - length={}", length); // fixed value property experimenter == 0x5c16c7L int experimenter = bb.readInt(); if(experimenter != 0x5c16c7) throw new OFParseError("Wrong experimenter: Expected=0x5c16c7L(0x5c16c7L), got="+experimenter); // fixed value property subtype == 0x2L int subtype = bb.readInt(); if(subtype != 0x2) throw new OFParseError("Wrong subtype: Expected=0x2L(0x2L), got="+subtype); if(logger.isTraceEnabled()) logger.trace("readFrom - returning shared instance={}", INSTANCE); return INSTANCE; } } public void putTo(PrimitiveSink sink) { FUNNEL.funnel(this, sink); } final static OFActionIdBsnSetTunnelDstVer13Funnel FUNNEL = new OFActionIdBsnSetTunnelDstVer13Funnel(); static class OFActionIdBsnSetTunnelDstVer13Funnel implements Funnel<OFActionIdBsnSetTunnelDstVer13> { private static final long serialVersionUID = 1L; @Override public void funnel(OFActionIdBsnSetTunnelDstVer13 message, PrimitiveSink sink) { // fixed value property type = 65535 sink.putShort((short) 0xffff); // fixed value property length = 12 sink.putShort((short) 0xc); // fixed value property experimenter = 0x5c16c7L sink.putInt(0x5c16c7); // fixed value property subtype = 0x2L sink.putInt(0x2); } } public void writeTo(ChannelBuffer bb) { WRITER.write(bb, this); } final static Writer WRITER = new Writer(); static class Writer implements OFMessageWriter<OFActionIdBsnSetTunnelDstVer13> { @Override public void write(ChannelBuffer bb, OFActionIdBsnSetTunnelDstVer13 message) { // fixed value property type = 65535 bb.writeShort((short) 0xffff); // fixed value property length = 12 bb.writeShort((short) 0xc); // fixed value property experimenter = 0x5c16c7L bb.writeInt(0x5c16c7); // fixed value property subtype = 0x2L bb.writeInt(0x2); } } @Override public String toString() { StringBuilder b = new StringBuilder("OFActionIdBsnSetTunnelDstVer13("); b.append(")"); return b.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; return true; } @Override public int hashCode() { int result = 1; return result; } }
apache-2.0
josephcsible/GravityBox
app/src/main/java/com/ceco/marshmallow/gravitybox/PhoneWrapper.java
15588
/* * Copyright (C) 2013 Peter Gregus for GravityBox Project (C3C076@xda) * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.ceco.marshmallow.gravitybox; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.os.Message; import android.provider.Settings; import de.robv.android.xposed.XC_MethodHook; import de.robv.android.xposed.XSharedPreferences; import de.robv.android.xposed.XposedBridge; import de.robv.android.xposed.XposedHelpers; public class PhoneWrapper { private static final String TAG = "GB:PhoneWrapper"; private static final boolean DEBUG = false; public static final int NT_WCDMA_PREFERRED = 0; // GSM/WCDMA (WCDMA preferred) (2g/3g) public static final int NT_GSM_ONLY = 1; // GSM Only (2g) public static final int NT_WCDMA_ONLY = 2; // WCDMA ONLY (3g) public static final int NT_GSM_WCDMA_AUTO = 3; // GSM/WCDMA Auto (2g/3g) public static final int NT_CDMA_EVDO = 4; // CDMA/EVDO Auto (2g/3g) public static final int NT_CDMA_ONLY = 5; // CDMA Only (2G) public static final int NT_EVDO_ONLY = 6; // Evdo Only (3G) public static final int NT_GLOBAL = 7; // GSM/WCDMA/CDMA Auto (2g/3g) public static final int NT_LTE_CDMA_EVDO = 8; public static final int NT_LTE_GSM_WCDMA = 9; public static final int NT_LTE_CMDA_EVDO_GSM_WCDMA = 10; public static final int NT_LTE_ONLY = 11; public static final int NT_LTE_WCDMA = 12; public static final int NT_MODE_UNKNOWN = 100; // TD-SCDMA public static final int NT_TDSCDMA_ONLY = 13; // 3G only public static final int NT_TDSCDMA_WCDMA = 14; // 3G only public static final int NT_LTE_TDSCDMA = 15; // LTE public static final int NT_TDSCDMA_GSM = 16; // 2G/3G public static final int NT_LTE_TDSCDMA_GSM = 17; // LTE public static final int NT_TDSCDMA_GSM_WCDMA = 18; // 2G/3G public static final int NT_LTE_TDSCDMA_WCDMA = 19; // LTE public static final int NT_LTE_TDSCDMA_GSM_WCDMA = 20; // LTE public static final int NT_TDSCDMA_CDMA_EVDO_GSM_WCDMA = 21; // 2G/3G public static final int NT_LTE_TDSCDMA_CDMA_EVDO_GSM_WCDMA = 22; // LTE private static final String PREFERRED_NETWORK_MODE = "preferred_network_mode"; public static final String ACTION_CHANGE_NETWORK_TYPE = "gravitybox.intent.action.CHANGE_NETWORK_TYPE"; public static final String ACTION_NETWORK_TYPE_CHANGED = "gravitybox.intent.action.NETWORK_TYPE_CHANGED"; public static final String ACTION_GET_CURRENT_NETWORK_TYPE = "gravitybox.intent.action.GET_CURRENT_NETWORK_TYPE"; public static final String EXTRA_NETWORK_TYPE = "networkType"; public static final String EXTRA_PHONE_ID = "phoneId"; public static final String EXTRA_RECEIVER_TAG = "receiverTag"; private static Class<?> mClsPhoneFactory; private static Class<?> mPhoneBaseClass; private static Class<?> mPhoneProxyClass; private static Class<?> mSystemProperties; private static Context mContext; private static int mSimSlot = 0; private static int mPhoneCount = -1; private static Boolean mHasMsimSupport = null; private static void log(String msg) { XposedBridge.log(TAG + ": " + msg); } public static String getNetworkModeNameFromValue(int networkMode) { switch(networkMode) { case NT_GSM_ONLY: return "GSM (2G)"; case NT_WCDMA_PREFERRED: return "GSM/WCDMA Preferred (3G/2G)"; case NT_GSM_WCDMA_AUTO: return "GSM/WCDMA Auto (2G/3G)"; case NT_WCDMA_ONLY: return "WCDMA (3G)"; case NT_CDMA_EVDO: return "CDMA/EvDo Auto"; case NT_CDMA_ONLY: return "CDMA"; case NT_EVDO_ONLY: return "EvDo"; case NT_GLOBAL: return "GSM/WCDMA/CDMA Auto (2G/3G)"; case NT_LTE_CDMA_EVDO: return "LTE (CDMA)"; case NT_LTE_GSM_WCDMA: return "LTE (GSM)"; case NT_LTE_CMDA_EVDO_GSM_WCDMA: return "LTE (Global)"; default: return "Undefined"; } } private static BroadcastReceiver mBroadcastReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (intent.getAction().equals(ACTION_CHANGE_NETWORK_TYPE) && intent.hasExtra(EXTRA_NETWORK_TYPE)) { int networkType = intent.getIntExtra(EXTRA_NETWORK_TYPE, NT_WCDMA_PREFERRED); if (DEBUG) log("received ACTION_CHANGE_NETWORK_TYPE broadcast: networkType = " + networkType); setPreferredNetworkType(networkType); } if (intent.getAction().equals(GravityBoxSettings.ACTION_PREF_QS_NETWORK_MODE_SIM_SLOT_CHANGED)) { mSimSlot = intent.getIntExtra(GravityBoxSettings.EXTRA_SIM_SLOT, 0); if (DEBUG) log("received ACTION_PREF_QS_NETWORK_MODE_SIM_SLOT_CHANGED broadcast: " + "mSimSlot = " + mSimSlot); setPreferredNetworkType(getCurrentNetworkType(mSimSlot)); } if (intent.getAction().equals(ACTION_GET_CURRENT_NETWORK_TYPE)) { int simSlot = intent.getIntExtra(EXTRA_PHONE_ID, mSimSlot); broadcastCurrentNetworkType(simSlot, getCurrentNetworkType(simSlot), intent.getStringExtra(EXTRA_RECEIVER_TAG)); } } }; private static Class<?> getPhoneFactoryClass() { return XposedHelpers.findClass("com.android.internal.telephony.PhoneFactory", null); } private static Class<?> getPhoneBaseClass() { return XposedHelpers.findClass("com.android.internal.telephony.PhoneBase", null); } private static Class<?> getPhoneProxyClass() { try { return XposedHelpers.findClass("com.android.internal.telephony.PhoneProxy", null); } catch (Throwable t) { return null; } } private static Class<?> getTelephonyManagerClass() { return XposedHelpers.findClass("android.telephony.TelephonyManager", null); } private static String getMakePhoneMethodName() { if (Utils.hasGeminiSupport()) { return "makeDefaultPhones"; } else if (hasMsimSupport()) { return "makeDefaultPhones"; } else { return "makeDefaultPhone"; } } private static Object getPhone() { if (mClsPhoneFactory == null) { return null; } else if (hasMsimSupport()) { return XposedHelpers.callStaticMethod(mClsPhoneFactory, "getPhone", mSimSlot); } else { return XposedHelpers.callStaticMethod(mClsPhoneFactory, "getDefaultPhone"); } } public static void initZygote(final XSharedPreferences prefs) { if (DEBUG) log("Entering init state"); try { mClsPhoneFactory = getPhoneFactoryClass(); mPhoneBaseClass = getPhoneBaseClass(); mPhoneProxyClass = getPhoneProxyClass(); mSystemProperties = XposedHelpers.findClass("android.os.SystemProperties", null); mSimSlot = 0; try { mSimSlot = Integer.valueOf(prefs.getString( GravityBoxSettings.PREF_KEY_QS_NETWORK_MODE_SIM_SLOT, "0")); } catch (NumberFormatException nfe) { log("Invalid value for SIM Slot preference: " + nfe.getMessage()); } if (DEBUG) log("mSimSlot = " + mSimSlot); XposedHelpers.findAndHookMethod(mClsPhoneFactory, getMakePhoneMethodName(), Context.class, new XC_MethodHook() { @Override protected void afterHookedMethod(final MethodHookParam param) throws Throwable { mContext = (Context) param.args[0]; if (DEBUG) log("PhoneFactory makeDefaultPhones - phone wrapper initialized"); onInitialize(); } }); XC_MethodHook spntHook = new XC_MethodHook() { @Override protected void afterHookedMethod(final MethodHookParam param) throws Throwable { int phoneId = XposedHelpers.getIntField(param.thisObject, "mPhoneId"); if (DEBUG) log("setPreferredNetworkType: networkType=" + param.args[0] + "; phoneId=" + phoneId); broadcastCurrentNetworkType(phoneId, (int)param.args[0], null); } }; XposedHelpers.findAndHookMethod(mPhoneBaseClass, "setPreferredNetworkType", int.class, Message.class, spntHook); if (mPhoneProxyClass != null) { XposedHelpers.findAndHookMethod(mPhoneProxyClass, "setPreferredNetworkType", int.class, Message.class, spntHook); } } catch (Throwable t) { XposedBridge.log(t); } } private static void onInitialize() { if (mContext != null) { IntentFilter intentFilter = new IntentFilter(ACTION_CHANGE_NETWORK_TYPE); intentFilter.addAction(GravityBoxSettings.ACTION_PREF_QS_NETWORK_MODE_SIM_SLOT_CHANGED); intentFilter.addAction(ACTION_GET_CURRENT_NETWORK_TYPE); mContext.registerReceiver(mBroadcastReceiver, intentFilter); } } private static void setPreferredNetworkType(int networkType) { try { Object defPhone = getPhone(); if (defPhone == null) return; if (Utils.hasGeminiSupport()) { mSimSlot = (Integer) XposedHelpers.callMethod(defPhone, "get3GSimId"); if (DEBUG) log("Gemini 3G SIM ID: " + mSimSlot); Class<?>[] paramArgs = new Class<?>[3]; paramArgs[0] = int.class; paramArgs[1] = Message.class; paramArgs[2] = int.class; XposedHelpers.callMethod(defPhone, "setPreferredNetworkTypeGemini", paramArgs, networkType, null, mSimSlot); } else { int subId = (int) XposedHelpers.callMethod(defPhone, "getSubId"); Settings.Global.putInt(mContext.getContentResolver(), PREFERRED_NETWORK_MODE + subId, networkType); Class<?>[] paramArgs = new Class<?>[2]; paramArgs[0] = int.class; paramArgs[1] = Message.class; XposedHelpers.callMethod(defPhone, "setPreferredNetworkType", paramArgs, networkType, null); } } catch (Throwable t) { log("setPreferredNetworkType failed: " + t.getMessage()); XposedBridge.log(t); } } public static int getDefaultNetworkType() { try { int mode = (Integer) XposedHelpers.callStaticMethod(mSystemProperties, "getInt", "ro.telephony.default_network", NT_WCDMA_PREFERRED); if (DEBUG) log("getDefaultNetworkMode: mode=" + mode); return mode; } catch (Throwable t) { XposedBridge.log(t); return NT_WCDMA_PREFERRED; } } private static int getCurrentNetworkType(int phoneId) { try { int networkType = getDefaultNetworkType(); Object[] phones = (Object[])XposedHelpers.callStaticMethod(mClsPhoneFactory, "getPhones"); if (phoneId < phones.length) { int subId = (int) XposedHelpers.callMethod(phones[phoneId], "getSubId"); if (DEBUG) log("getCurrentNetworkType: calculating network type for subId=" + subId); networkType = (int) XposedHelpers.callStaticMethod(mClsPhoneFactory, "calculatePreferredNetworkType", mContext, subId); } if (DEBUG) log("getCurrentNetworkType: phoneId=" + phoneId + "; networkType=" + getNetworkModeNameFromValue(networkType)); return networkType; } catch (Throwable t) { XposedBridge.log(t); return NT_WCDMA_PREFERRED; } } private static void broadcastCurrentNetworkType(int phoneId, int networkType, String receiverTag) { try { Intent intent = new Intent(ACTION_NETWORK_TYPE_CHANGED); intent.putExtra(EXTRA_PHONE_ID, phoneId); intent.putExtra(EXTRA_NETWORK_TYPE, networkType); if (receiverTag != null) { intent.putExtra(EXTRA_RECEIVER_TAG, receiverTag); } mContext.sendBroadcast(intent); } catch (Throwable t) { XposedBridge.log(t); } } public static boolean isLteNetworkType(int networkType) { return (networkType >= NT_LTE_CDMA_EVDO && networkType <= NT_LTE_WCDMA) || networkType == NT_LTE_TDSCDMA || networkType == NT_LTE_TDSCDMA_GSM || networkType == NT_LTE_TDSCDMA_WCDMA || networkType == NT_LTE_TDSCDMA_GSM_WCDMA || networkType == NT_LTE_TDSCDMA_CDMA_EVDO_GSM_WCDMA; } public static int getPhoneCount() { if (mPhoneCount != -1) return mPhoneCount; try { Object mtm = XposedHelpers.callStaticMethod(getTelephonyManagerClass(), "getDefault"); mPhoneCount = (int) XposedHelpers.callMethod(mtm, "getPhoneCount"); } catch (Throwable t) { if (DEBUG) XposedBridge.log(t); mPhoneCount = -1; } if (DEBUG) log("getPhoneCount: " + mPhoneCount); return mPhoneCount; } public static boolean hasMsimSupport() { if (mHasMsimSupport != null) return mHasMsimSupport; try { Object mtm = XposedHelpers.callStaticMethod(getTelephonyManagerClass(), "getDefault"); mHasMsimSupport = (Boolean) XposedHelpers.callMethod(mtm, "isMultiSimEnabled") && getPhoneCount() > 1; if (DEBUG) log("isMultiSimEnabled: " + (Boolean) XposedHelpers.callMethod(mtm, "isMultiSimEnabled")); if (DEBUG) log("getPhoneCount: " + getPhoneCount()); } catch (Throwable t) { if (DEBUG) XposedBridge.log(t); mHasMsimSupport = false; } if (DEBUG) log("hasMsimSupport: " + mHasMsimSupport); return mHasMsimSupport; } }
apache-2.0
anamariad/ML
Clusterization/clusterization/em2d.py
4353
import sys import getopt import random import numpy as np import utils as utils def estimation(K, means, points, stddev): points_size = len(points) expectations = np.zeros((points_size, K)) for i in range(0, points_size): total = 0 current_point = points[i] for j in range(0, K): total += np.exp(-1 / (2 * (stddev ** 2)) * (np.linalg.norm(current_point - means[j]) ** 2)) for j in range(0, K): expectations[i][j] = np.exp(-1 / (2 * (stddev ** 2)) * (np.linalg.norm(current_point - means[j]) ** 2)) / total return expectations def maximization(K, expectations, points): points_size = len(points) means = [] for j in range(0, K): m_step_numerator = np.zeros(2) m_step_denominator = 0 for i in range(0, points_size): m_step_numerator += expectations[i][j] * points[i] m_step_denominator += expectations[i][j] means.append(m_step_numerator / m_step_denominator) return means def q_function(K, stddev, points, centroids, expectations): q = 0.0 for i in range(len(points)): q += -np.log(K) + np.log(1.0 / np.sqrt(2 * np.pi * (stddev ** 2))) for j in range(K): q += -1.0 / (2 * stddev ** 2) * expectations[i][j] * ((np.linalg.norm(points[i] - centroids[j])) ** 2) return q def expectation_maximization(points, K, stddev, means, threshold): old_means = np.zeros(means.shape) expectations = None clusters = None while not utils.convergence(means, old_means, threshold): old_means = means # the E step expectations = estimation(K, means, points, stddev) # the M step means = np.array(maximization(K, expectations, points)) clusters = assign_points_to_clusters(points, expectations, K) return means, expectations, clusters # returns a tuple of them def assign_points_to_clusters(points, expectations, K): clusters = {} for i in range(K): clusters[i] = [] for i in range(len(points)): best_cluster_key = max([(j, expectations[i][j]) for j in range(K)], key = lambda t: t[1])[0] clusters[best_cluster_key].append(points[i]) return clusters error_msg = 'em1d.py -i <inputfile> -k <number of clusters> -m <comma-separated initial K means values> ' \ '-s <stddev> -t <threshold> -o <outputfile>' try: opts, args = getopt.getopt(sys.argv[1:], "i:k:m:s:t:o:", ["inputfile=", "means=", "stddev=", "threshold=", "outputfile="]) except getopt.GetoptError: print error_msg sys.exit(2) input_filename = None K = 0 means = None stddev = None threshold = None output_filename = None for opt, arg in opts: if opt in ('-i', '--inputfile'): input_filename = arg elif opt in ('-o', '--outputfile'): output_filename = arg elif opt == '-k': K = int(arg) elif opt in ('-s', '--stddev'): stddev = float(arg) elif opt in ('-t', '--threshold'): threshold = float(arg) elif opt in ('-m', '--means'): means_string = arg.split(":") first_mean = [float(m) for m in means_string[0].split(",")] second_mean = [float(m) for m in means_string[1].split(",")] means = np.array([first_mean, second_mean]) if input_filename is None or K == 0: print error_msg sys.exit(2) if threshold is None: threshold = 0.01 if output_filename is None: output_filename = "em.out" output_file = open(output_filename, 'w') input_points = utils.read_points(input_filename) if stddev is None: stddev = np.std(input_points) if means is None: means = np.array(random.sample(input_points, K)) # writing the standard deviation to file output_file.write(str(stddev)) output_file.write('\n') centroids, expectations, clusters = expectation_maximization(input_points, K, stddev, means, threshold) print "centroids:\n {} \n expectations:\n {}".format(centroids, expectations) # outputting q function to file output_file.write(str(q_function(K, stddev, input_points, centroids, expectations))) output_file.write('\n') # outputting centroids to file utils.print_array_to_file(centroids, output_file) # outputting expectations to file utils.print_matrix_to_file(expectations, output_file) output_file.close() utils.plot_clusters(centroids, clusters)
apache-2.0
wangzheng0822/algo
c-cpp/17_skiplist/SkipList.cpp
8155
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <string> #include <cstring> #include <random> #include <ctime> using namespace std; /** * Ìø±íµÄÒ»ÖÖʵÏÖ·½·¨¡£ * Ìø±íÖд洢µÄÊÇÕýÕûÊý£¬²¢ÇÒ´æ´¢µÄÊDz»Öظ´µÄ¡£ * * Ìø±íµÄC++°æ±¾. * ·­ÒëJAVA°æ±¾ Ô­×÷Õß Author£ºZHENG * * Author£ºpuhuaqiang * * Ìø±í½á¹¹: * * µÚK¼¶ 1 9 * µÚK-1¼¶ 1 5 9 * µÚK-2¼¶ 1 3 5 7 9 * ... .... * µÚ0¼¶(ԭʼÁ´±í) 1 2 3 4 5 6 7 8 9 */ const int MAX_LEVEL = 16; /** * @brief ½Úµã */ class CNode { public: CNode(); ~CNode(); std::string toString(); /** * @brief »ñÈ¡Ë÷ÒýÁ´±í */ CNode** GetIdxList(); /** * @brief ÉèÖÃÊý¾Ý */ void SetData(int v); /** * @brief »ñÈ¡Êý¾Ý */ int GetData(); /** * @brief ÉèÖÃ×î´óË÷Òý¼¶±ð */ void SetLevel(int l); private: /**µ±Ç°½ÚµãµÄÖµ*/ int m_data; /** * µ±Ç°½ÚµãµÄÿ¸öµÈ¼¶µÄÏÂÒ»¸ö½Úµã. * µÚ2¼¶ N1 N2 * µÚ1¼¶ N1 N2 * Èç¹ûN1ÊDZ¾½Úµã,Ôò m_lpForwards[x] ±£´æµÄÊÇN2 * * [0] ¾ÍÊÇԭʼÁ´±í. */ CNode* m_lpForwards[MAX_LEVEL]; /**µ±Ç°½ÚµãµÄËùÔÚµÄ×î´óË÷Òý¼¶±ð*/ int m_iMaxLevel; }; /** * @brief Ìø±í */ class CSkipList { public: CSkipList(); ~CSkipList(); /** * @brief ²éÕÒÖ¸¶¨µÄÖµµÄ½Úµã * @param v ÕýÕûÊý */ CNode* Find(int v); /** * @brief ²åÈëÖ¸¶¨µÄÖµ * @param v ÕýÕûÊý */ void Insert(int v); /** * @brief ɾ³ýÖ¸¶¨µÄÖµµÄ½Úµã * @param v ÕýÕûÊý */ int Delete(int v); void PrintAll(); /** * @brief ´òÓ¡Ìø±í½á¹¹ * @param l µÈÓÚ-1ʱ´òÓ¡ËùÓм¶±ðµÄ½á¹¹ >=0ʱ´òÓ¡Ö¸¶¨¼¶±ðµÄ½á¹¹ */ void PrintAll(int l); /** * @brief ²åÈë½Úµãʱ,µÃµ½²åÈëK¼¶µÄËæ»úº¯Êý * @return K */ int RandomLevel(); private: int levelCount; /** * Á´±í * ´øÍ·/ÉÚËù(½Úµã) */ CNode* m_lpHead; }; int main() { CSkipList skipList; /// ²åÈëԭʼֵ for(int i=1; i< 50; i++){ if((i%3) == 0){ skipList.Insert(i); } } for(int i=1; i< 50; i++){ if((i%3) == 1){ skipList.Insert(i); } } skipList.PrintAll(); std::cout<<std::endl; /// ´òÓ¡ËùÓеȼ¶½á¹¹ skipList.PrintAll(-1); /// ²éÕÒ std::cout<<std::endl; CNode* lpNode = skipList.Find(27); if(NULL != lpNode){ std::cout<<"²éÕÒֵΪ27µÄ½Úµã,ÕÒµ½¸Ã½Úµã,½ÚµãÖµ:"<<lpNode->GetData()<<std::endl; }else{ std::cout<<"²éÕÒֵΪ27µÄ½Úµã,δÕÒµ½¸Ã½Úµã"<<std::endl; } /// ɾ³ý std::cout<<std::endl; int ret = skipList.Delete(46); if(0 == ret){ std::cout<<"²éÕÒֵΪ46µÄ½Úµã,ÕÒµ½¸Ã½Úµã,²¢É¾³ý³É¹¦"<<std::endl; }else{ std::cout<<"²éÕÒֵΪ46µÄ½Úµã,ÕÒµ½¸Ã½Úµã,ɾ³ýʧ°Ü"<<std::endl; } std::cout<<std::endl; //´òÓ¡ËùÓеȼ¶½á¹¹ skipList.PrintAll(-1); std::cin.ignore(); return 0; } CNode::CNode() { m_data = -1; m_iMaxLevel = 0; for(int i=0; i<MAX_LEVEL; i++){ m_lpForwards[i] = NULL; } } CNode::~CNode() { } CNode** CNode::GetIdxList() { return m_lpForwards; } void CNode::SetData(int v) { m_data = v; } int CNode::GetData() { return m_data; } void CNode::SetLevel(int l) { m_iMaxLevel = l; } std::string CNode::toString() { char tmp[32]; std::string ret; ret.append("{ data: "); sprintf(tmp, "%d", m_data); ret.append(tmp); ret.append("; levels: "); sprintf(tmp, "%d", m_iMaxLevel); ret.append(tmp); ret.append(" }"); return ret; } CSkipList::CSkipList() { levelCount = 1; m_lpHead = new CNode(); } CSkipList::~CSkipList() { } CNode* CSkipList::Find(int v) { CNode* lpNode = m_lpHead; /** * ´Ó ×î´ó¼¶Ë÷ÒýÁ´±í¿ªÊ¼²éÕÒ. * K -> k-1 -> k-2 ...->0 */ for(int i=levelCount-1; i>=0; --i){ /** * ²éÕÒСÓÚvµÄ½Úµã(lpNode). */ while((NULL != lpNode->GetIdxList()[i]) && (lpNode->GetIdxList()[i]->GetData() < v)){ lpNode = lpNode->GetIdxList()[i]; } } /** * lpNode ÊÇСÓÚvµÄ½Úµã, lpNodeµÄÏÂÒ»¸ö½Úµã¾ÍµÈÓÚ»ò´óÓÚvµÄ½Úµã */ if((NULL != lpNode->GetIdxList()[0]) && (lpNode->GetIdxList()[0]->GetData() == v)){ return lpNode->GetIdxList()[0]; } return NULL; } void CSkipList::Insert(int v) { /// нڵã CNode* lpNewNode = new CNode(); if(NULL == lpNewNode){ return; } /** * нڵã×î´ó·Ö²¼ÔÚµÄË÷ÒýÁ´±íµÄÉÏÏÞ * Èç¹û·µ»Ø 3,Ôò еĽڵã»áÔÚË÷Òý1¡¢2¡¢3ÉϵÄÁ´±í¶¼´æÔÚ */ int level = RandomLevel(); lpNewNode->SetData(v); lpNewNode->SetLevel(level); /** * ÁÙʱË÷ÒýÁ´±í * Ö÷ÒªÊǵõ½ÐµĽڵãÔÚÿ¸öË÷ÒýÁ´±íÉϵÄλÖà */ CNode *lpUpdateNode[level]; for(int i=0; i<level; i++){ /// ÿ¸öË÷ÒýÁ´±íµÄÍ·½Úµã lpUpdateNode[i] =m_lpHead; } CNode* lpFind = m_lpHead; for(int i= level-1; i >= 0; --i){ /** * ²éÕÒλÖà * eg. µÚ1¼¶ 1 7 10 * Èç¹û²åÈëµÄÊÇ 6 * lpFind->GetIdxList()[i]->GetData() : ±íʾ½ÚµãlpFindÔÚµÚ1¼¶Ë÷ÒýµÄÏÂÒ»¸ö½ÚµãµÄÊý¾Ý * µ± "lpFind->GetIdxList()[i]->GetData() < v"²»³ÉÁ¢µÄʱºò, * нڵã¾ÍÒª²åÈëµ½ lpFind½ÚµãµÄºóÃæ, lpFind->GetIdxList()[i] ½ÚµãµÄÇ°Ãæ * ¼´ÔÚÕâÀï lpFind¾ÍÊÇ1 lpFind->GetIdxList()[i] ¾ÍÊÇ7 */ while((NULL != lpFind->GetIdxList()[i]) && (lpFind->GetIdxList()[i]->GetData() < v)){ lpFind = lpFind->GetIdxList()[i]; } /// lpFind ÊÇнڵãÔÚ µÚi¼¶Ë÷ÒýÁ´±íµÄºóÒ»¸ö½Úµã lpUpdateNode[i] = lpFind; } for(int i=0; i<level; ++i){ /** * ÖØÐÂÉèÖÃÁ´±íÖ¸ÕëλÖà * eg µÚ1¼¶Ë÷Òý 1 7 10 * ²åÈë6. * lpUpdateNode[i] ½ÚµãÊÇ1; lpUpdateNode[i]->GetIdxList()[i]½ÚµãÊÇ7 * * Õâ2¾ä´úÂë¾ÍÊÇ °Ñ6·ÅÔÚ 1ºÍ7Ö®¼ä */ lpNewNode->GetIdxList()[i] = lpUpdateNode[i]->GetIdxList()[i]; lpUpdateNode[i]->GetIdxList()[i] = lpNewNode; } if(levelCount < level){ levelCount = level; } } int CSkipList::Delete(int v) { int ret = -1; CNode *lpUpdateNode[levelCount]; CNode *lpFind = m_lpHead; for(int i=levelCount-1; i>= 0; --i){ /** * ²éÕÒСÓÚvµÄ½Úµã(lpFind). */ while((NULL != lpFind->GetIdxList()[i]) && (lpFind->GetIdxList()[i]->GetData() < v)){ lpFind = lpFind->GetIdxList()[i]; } lpUpdateNode[i] = lpFind; } /** * lpFind ÊÇСÓÚvµÄ½Úµã, lpFindµÄÏÂÒ»¸ö½Úµã¾ÍµÈÓÚ»ò´óÓÚvµÄ½Úµã */ if((NULL != lpFind->GetIdxList()[0]) && (lpFind->GetIdxList()[0]->GetData() == v)){ for(int i=levelCount-1; i>=0; --i){ if((NULL != lpUpdateNode[i]->GetIdxList()[i]) && (v == lpUpdateNode[i]->GetIdxList()[i]->GetData())){ lpUpdateNode[i]->GetIdxList()[i] = lpUpdateNode[i]->GetIdxList()[i]->GetIdxList()[i]; ret = 0; } } } return ret; } void CSkipList::PrintAll() { CNode* lpNode = m_lpHead; while(NULL != lpNode->GetIdxList()[0]){ std::cout<<lpNode->GetIdxList()[0]->toString().data()<<std::endl; lpNode = lpNode->GetIdxList()[0]; } } void CSkipList::PrintAll(int l) { for(int i=MAX_LEVEL-1; i>=0;--i){ CNode* lpNode = m_lpHead; std::cout<<"µÚ"<<i<<"¼¶:"<<std::endl; if((l < 0) || ((l >= 0) && (l == i))){ while(NULL != lpNode->GetIdxList()[i]){ std::cout<<lpNode->GetIdxList()[i]->GetData()<<" "; lpNode = lpNode->GetIdxList()[i]; } std::cout<<std::endl; if(l >= 0){ break; } } } } int GetRandom() { static int _count = 1; std::default_random_engine generator(time(0) + _count); std::uniform_int_distribution<int> distribution(1,99999/*0x7FFFFFFF*/); int dice_roll = distribution(generator); _count += 100; return dice_roll; } int CSkipList::RandomLevel() { int level = 1; for(int i=1; i<MAX_LEVEL; ++i){ if(1 == (GetRandom()%3)){ level++; } } return level; }
apache-2.0
lmjacksoniii/hazelcast
hazelcast/src/test/java/com/hazelcast/collection/impl/list/ClusterListTest.java
6619
/* * Copyright (c) 2008-2016, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.collection.impl.list; import com.hazelcast.config.Config; import com.hazelcast.config.ListConfig; import com.hazelcast.core.HazelcastInstance; import com.hazelcast.core.IList; import com.hazelcast.test.HazelcastParallelClassRunner; import com.hazelcast.test.HazelcastTestSupport; import com.hazelcast.test.TestHazelcastInstanceFactory; import com.hazelcast.test.annotation.ParallelTest; import com.hazelcast.test.annotation.QuickTest; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import java.util.ArrayList; import java.util.List; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @RunWith(HazelcastParallelClassRunner.class) @Category({QuickTest.class, ParallelTest.class}) public class ClusterListTest extends HazelcastTestSupport { @Test public void testAddRemove() { final String name = randomString(); final int count = 100; TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2); final HazelcastInstance[] instances = factory.newInstances(); IList<String> list1 = instances[0].getList(name); IList<String> list2 = instances[1].getList(name); for (int i = 0; i < count; i++) { assertTrue(list1.add("item" + i)); assertTrue(list2.add("item" + i)); } assertSizeEventually(200, list1); assertSizeEventually(200, list2); assertEquals("item0", list1.get(0)); assertEquals("item0", list2.get(0)); assertEquals("item99", list1.get(199)); assertEquals("item99", list2.get(199)); for (int i = 0; i < count; i++) { assertEquals("item" + i, list1.remove(i)); } assertSizeEventually(100, list2); for (int i = 0; i < count; i++) { assertTrue(list2.remove("item" + i)); } assertSizeEventually(0, list1); assertSizeEventually(0, list2); } @Test public void testAddContainsRemoveRetainsAll() { final String name = randomString(); TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2); HazelcastInstance instance1 = factory.newHazelcastInstance(); HazelcastInstance instance2 = factory.newHazelcastInstance(); IList<String> list1 = instance1.getList(name); IList<String> list2 = instance2.getList(name); List<String> listTest1 = new ArrayList<String>(); for (int i = 0; i < 100; i++) { listTest1.add("item" + i); } assertTrue(list1.addAll(listTest1)); assertSizeEventually(100, list2); List<String> listTest2 = new ArrayList<String>(); for (int i = 30; i < 40; i++) { listTest2.add("item" + i); } assertContainsAll(list2, listTest2); assertTrue(list2.retainAll(listTest2)); assertSizeEventually(10, list1); assertTrue(list1.removeAll(listTest2)); assertSizeEventually(0, list1); } @Test public void testShutdown() { final String name = randomString(); TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2); final HazelcastInstance[] instances = factory.newInstances(); IList<String> list1 = instances[0].getList(name); IList<String> list2 = instances[1].getList(name); warmUpPartitions(instances); for (int i = 0; i < 50; i++) { list1.add("item" + i); } instances[0].shutdown(); assertSizeEventually(50, list2); for (int i = 50; i < 100; i++) { list2.add("item" + i); } for (int i = 0; i < 100; i++) { assertEquals("item" + i, list2.remove(0)); } } @Test public void testMigration() { Config config = new Config(); final String name = randomString(); config.addListConfig(new ListConfig().setName(name).setBackupCount(1)); final int insCount = 4; TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(insCount); HazelcastInstance instance1 = factory.newHazelcastInstance(config); IList<String> list = instance1.getList(name); for (int i = 0; i < 100; i++) { list.add("item" + i); } HazelcastInstance instance2 = factory.newHazelcastInstance(config); assertEquals(100, instance2.getList(name).size()); HazelcastInstance instance3 = factory.newHazelcastInstance(config); assertEquals(100, instance3.getList(name).size()); instance1.shutdown(); assertEquals(100, instance3.getList(name).size()); list = instance2.getList(name); for (int i = 0; i < 100; i++) { list.add("item-" + i); } instance2.shutdown(); assertEquals(200, instance3.getList(name).size()); instance1 = factory.newHazelcastInstance(config); assertEquals(200, instance1.getList(name).size()); instance3.shutdown(); assertEquals(200, instance1.getList(name).size()); } @Test public void testMaxSize() { Config config = new Config(); final String name = "defList"; config.addListConfig(new ListConfig().setName(name).setBackupCount(1).setMaxSize(100)); final int insCount = 2; TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(insCount); HazelcastInstance instance1 = factory.newHazelcastInstance(config); HazelcastInstance instance2 = factory.newHazelcastInstance(config); IList<String> list = instance1.getList(name); for (int i = 0; i < 100; i++) { assertTrue(list.add("item" + i)); } assertFalse(list.add("item")); assertNotNull(list.remove(0)); assertTrue(list.add("item")); } }
apache-2.0
telefonicaid/fiware-cosmos-platform
infinity/client/src/test/scala/es/tid/cosmos/infinity/client/mock/ErrorBehavior.scala
1708
/* * Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package es.tid.cosmos.infinity.client.mock import javax.servlet.http.HttpServletResponse import unfiltered.filter.{Plan, Planify} import unfiltered.response._ import es.tid.cosmos.infinity.common.fs.{Path, PathMetadata} object ErrorBehavior extends Formatters { abstract class SimpleBehavior(response: ResponseFunction[HttpServletResponse]) extends Behavior { override def planFor( paths: Map[Path, PathMetadata], contents: Map[Path, String], dataFactory: DataFactory): Plan = Planify { case _ => response } } object BadRequestBehavior extends SimpleBehavior(BadRequest ~> errorBody("don't understand you")) object GibberishResponseBehavior extends SimpleBehavior(Ok ~> ResponseString("dlalkdkldijodjd")) object ForbiddenBehavior extends SimpleBehavior(Forbidden ~> errorBody("don't dare trying that")) object AlreadyExistsBehavior extends SimpleBehavior(Conflict ~> errorBody("file or directory already exists")) object NotFoundBehavior extends SimpleBehavior(NotFound ~> errorBody("file or directory does not exist")) }
apache-2.0
raybooysen/esp-js
src/system/logging/defaultSink.js
761
// notice_start /* * Copyright 2015 Dev Shop Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // notice_end export default function (logEvent) { console.log('[' + logEvent.logger + '] [' + logEvent.level + ']: ' + logEvent.message); };
apache-2.0
Windows-XAML/Template10
Source/Template10.Services.Nag/Default/NagSettings.cs
169
 namespace Template10.Services { public static class NagSettings { public static IDialogResourceResolver CustomResolver { get; set; } = null; } }
apache-2.0
williballenthin/synapse
synapse/lib/certdir.py
9049
import os from OpenSSL import crypto import synapse.lib.tags as s_tags from synapse.common import * defdir = os.getenv('SYN_CERT_DIR') if defdir == None: defdir = '~/.syn/certs' def iterFqdnUp(fqdn): levs = fqdn.split('.') for i in range(len(levs)): yield '.'.join( levs[i:] ) class CertDir: def __init__(self, path=None): if path == None: path = defdir gendir(path,'cas') gendir(path,'hosts') gendir(path,'users') self.certdir = reqdir(path) def getPathJoin(self, *paths): return genpath(self.certdir,*paths) def getCaCert(self, name): byts = reqbytes( self.getCaCertPath(name) ) return crypto.load_certificate(crypto.FILETYPE_PEM, byts) def getHostCert(self, name): byts = reqbytes( self.getHostCertPath(name) ) return crypto.load_certificate(crypto.FILETYPE_PEM, byts) def getUserCert(self, name): byts = reqbytes( self.getUserCertPath(name) ) return crypto.load_certificate(crypto.FILETYPE_PEM, byts) def getCaKey(self, name): byts = reqbytes( self.getCaKeyPath(name) ) return crypto.load_privatekey(crypto.FILETYPE_PEM, byts) def getHostKey(self, name): byts = reqbytes( self.getHostKeyPath(name) ) return crypto.load_privatekey(crypto.FILETYPE_PEM, byts) def getUserKey(self, name): byts = reqbytes( self.getUserKeyPath(name) ) return crypto.load_privatekey(crypto.FILETYPE_PEM, byts) #def getHostCert(self, name): #byts = reqbytes( self.getHostCertPath(name) ) #return crypto.load_certificate(crypto.FILETYPE_PEM, byts) #def saveCaCert(self, cert): #def saveUserCert(self, cert): #def saveHostCert(self, cert): #def saveX509Cert(self, cert): #def loadX509Cert(self, path): def _genBasePkeyCert(self, name, pkey=None): if pkey == None: pkey = crypto.PKey() pkey.generate_key(crypto.TYPE_RSA, 2048) cert = crypto.X509() cert.set_pubkey(pkey) cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(10*365*24*60*60) cert.set_serial_number( int(time.time()) ) cert.get_subject().CN = name return pkey,cert def _saveCertTo(self, cert, *paths): path = self.getPathJoin(*paths) if os.path.isfile(path): raise DupFileName(path=path) with genfile(path) as fd: fd.write( crypto.dump_certificate(crypto.FILETYPE_PEM, cert) ) return path def _savePkeyTo(self, pkey, *paths): path = self.getPathJoin(*paths) if os.path.isfile(path): raise DupFileName(path=path) with genfile(path) as fd: fd.write( crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey) ) return path def genCaCert(self, name, signas=None, outp=None): pkey,cert = self._genBasePkeyCert(name) ext0 = crypto.X509Extension(b'basicConstraints',False,b'CA:TRUE') cert.add_extensions([ext0]) if signas != None: self.signCertAs(cert,signas) else: self.selfSignCert(cert,pkey) keypath = self._savePkeyTo(pkey, 'cas','%s.key' % name) if outp != None: outp.printf('key saved: %s' % (keypath,)) crtpath = self._saveCertTo(cert, 'cas','%s.crt' % name) if outp != None: outp.printf('cert saved: %s' % (crtpath,)) return pkey,cert def genHostCert(self, name, signas=None, outp=None, pkey=None): pkey,cert = self._genBasePkeyCert(name,pkey=pkey) certtype = b'server' extuse = [b'serverAuth'] keyuse = [b'digitalSignature',b'keyEncipherment'] ext0 = crypto.X509Extension(b'nsCertType',False,certtype) ext1 = crypto.X509Extension(b'keyUsage',False,b','.join(keyuse)) extuse = b','.join(extuse) ext2 = crypto.X509Extension(b'extendedKeyUsage',False,extuse) ext3 = crypto.X509Extension(b'basicConstraints',False,b'CA:FALSE') cert.add_extensions([ext0,ext1,ext2,ext3]) if signas != None: self.signCertAs(cert,signas) else: self.selfSignCert(cert,pkey) if not pkey._only_public: keypath = self._savePkeyTo(pkey, 'hosts','%s.key' % name) if outp != None: outp.printf('key saved: %s' % (keypath,)) crtpath = self._saveCertTo(cert, 'hosts','%s.crt' % name) if outp != None: outp.printf('cert saved: %s' % (crtpath,)) return pkey,cert def genUserCert(self, name, signas=None, outp=None, pkey=None): pkey,cert = self._genBasePkeyCert(name, pkey=pkey) keyuse = [b'digitalSignature'] extuse = [b'clientAuth'] certtype = b'client' ext0 = crypto.X509Extension(b'nsCertType',False,certtype) ext1 = crypto.X509Extension(b'keyUsage',False,b','.join(keyuse)) extuse = b','.join(extuse) ext2 = crypto.X509Extension(b'extendedKeyUsage',False,extuse) ext3 = crypto.X509Extension(b'basicConstraints',False,b'CA:FALSE') cert.add_extensions([ext0,ext1,ext2,ext3]) if signas != None: self.signCertAs(cert,signas) else: self.selfSignCert(cert,pkey) if not pkey._only_public: keypath = self._savePkeyTo(pkey, 'users','%s.key' % name) if outp != None: outp.printf('key saved: %s' % (keypath,)) crtpath = self._saveCertTo(cert, 'users','%s.crt' % name) if outp != None: outp.printf('cert saved: %s' % (crtpath,)) return pkey,cert def genUserCsr(self, name, outp=None): return self._genPkeyCsr(name,'users',outp=outp) def genHostCsr(self, name, outp=None): return self._genPkeyCsr(name,'hosts',outp=outp) def signUserCsr(self, xcsr, signas, outp=None): pkey = xcsr.get_pubkey() name = xcsr.get_subject().CN return self.genUserCert(name, pkey=pkey, signas=signas, outp=outp) def signHostCsr(self, xcsr, signas, outp=None): pkey = xcsr.get_pubkey() name = xcsr.get_subject().CN return self.genHostCert(name, pkey=pkey, signas=signas, outp=outp) def _genPkeyCsr(self, name, mode, outp=None): pkey = crypto.PKey() pkey.generate_key(crypto.TYPE_RSA, 2048) xcsr = crypto.X509Req() xcsr.get_subject().CN = name xcsr.set_pubkey(pkey) xcsr.sign(pkey,'sha256') keypath = self._savePkeyTo(pkey,mode,'%s.key' % name) if outp != None: outp.printf('key saved: %s' % (keypath,)) csrpath = self.getPathJoin(mode,'%s.csr' % name) if os.path.isfile(csrpath): raise DupFileName(path=csrpath) with genfile(csrpath) as fd: fd.write( crypto.dump_certificate_request(crypto.FILETYPE_PEM, xcsr) ) if outp != None: outp.printf('csr saved: %s' %( csrpath,)) def signCertAs(self, cert, signas): cakey = self.getCaKey(signas) cacert = self.getCaCert(signas) cert.set_issuer( cacert.get_subject() ) cert.sign( cakey, 'sha256' ) def selfSignCert(self, cert, pkey): cert.set_issuer( cert.get_subject() ) cert.sign( pkey, 'sha256' ) def getUserForHost(self, user, host): for name in iterFqdnUp(host): usercert = '%s@%s' % (user,name) if self.isUserCert(usercert): return usercert def getCaCertPath(self, name): return reqpath(self.certdir,'cas','%s.crt' % name) def getCaKeyPath(self, name): return reqpath(self.certdir,'cas','%s.key' % name) def getHostCertPath(self, name): return reqpath(self.certdir,'hosts','%s.crt' % name) def getHostKeyPath(self, name): return reqpath(self.certdir,'hosts','%s.key' % name) def getUserCertPath(self, name): return reqpath(self.certdir,'users','%s.crt' % name) def getUserKeyPath(self, name): return reqpath(self.certdir,'users','%s.key' % name) def getUserCaPath(self, name): cert = self.getUserCert(name) subj = cert.get_issuer() capath = self.getPathJoin('cas','%s.crt' % subj.CN) if not os.path.isfile(capath): return None return capath def getHostCaPath(self, name): cert = self.getHostCert(name) subj = cert.get_issuer() capath = self.getPathJoin('cas','%s.crt' % subj.CN) if not os.path.isfile(capath): return None return capath def isUserCert(self, name): crtpath = self.getPathJoin('users','%s.crt' % name) return os.path.isfile(crtpath) def isCaCert(self, name): crtpath = self.getPathJoin('cas','%s.crt' % name) return os.path.isfile(crtpath) def isHostCert(self, name): crtpath = self.getPathJoin('hosts','%s.crt' % name) return os.path.isfile(crtpath)
apache-2.0
McLeodMoores/starling
projects/financial-types/src/test/java/com/opengamma/financial/convention/InflationLegConventionTest.java
2807
/** * Copyright (C) 2019 - present McLeod Moores Software Limited. All rights reserved. */ package com.opengamma.financial.convention; import static org.testng.Assert.assertEquals; import java.util.Arrays; import org.testng.annotations.Test; import com.opengamma.financial.AbstractBeanTestCase; import com.opengamma.financial.convention.businessday.BusinessDayConvention; import com.opengamma.financial.convention.businessday.BusinessDayConventions; import com.opengamma.financial.convention.daycount.DayCount; import com.opengamma.financial.convention.daycount.DayCounts; import com.opengamma.id.ExternalId; import com.opengamma.id.ExternalIdBundle; import com.opengamma.util.test.TestGroup; /** * Tests for {@link InflationLegConvention}. */ @Test(groups = TestGroup.UNIT) public class InflationLegConventionTest extends AbstractBeanTestCase { private static final String NAME = "name"; private static final ExternalIdBundle EIDS = ExternalIdBundle.of("eid", "1"); private static final BusinessDayConvention BDC = BusinessDayConventions.FOLLOWING; private static final DayCount DC = DayCounts.ACT_360; private static final boolean IS_EOM = true; private static final int MONTH_LAG = 6; private static final int SPOT_LAG = 2; private static final ExternalId PRICE_INDEX_CONVENTION = ExternalId.of("eid", "2"); private static final InflationLegConvention CONVENTION = new InflationLegConvention(NAME, EIDS, BDC, DC, IS_EOM, MONTH_LAG, SPOT_LAG, PRICE_INDEX_CONVENTION); @Override public JodaBeanProperties<InflationLegConvention> getJodaBeanProperties() { return new JodaBeanProperties<>(InflationLegConvention.class, Arrays.asList("name", "externalIdBundle", "businessDayConvention", "dayCount", "isEOM", "monthLag", "spotLag", "priceIndexConvention"), Arrays.<Object> asList(NAME, EIDS, BDC, DC, IS_EOM, MONTH_LAG, SPOT_LAG, PRICE_INDEX_CONVENTION), Arrays.<Object> asList("other", ExternalIdBundle.of("eid", "2"), BusinessDayConventions.MODIFIED_FOLLOWING, DayCounts.ACT_365, !IS_EOM, MONTH_LAG + 1, SPOT_LAG + 1, ExternalId.of("eid", "3"))); } /** * Tests the returned type. */ public void testType() { assertEquals(CONVENTION.getConventionType(), InflationLegConvention.TYPE); } /** * Tests that the accept() method visits the right method. */ public void testVisitor() { assertEquals(CONVENTION.accept(TestVisitor.INSTANCE), "visited"); } /** * */ private static final class TestVisitor extends FinancialConventionVisitorAdapter<String> { public static final TestVisitor INSTANCE = new TestVisitor(); private TestVisitor() { } @Override public String visitInflationLegConvention(final InflationLegConvention convention) { return "visited"; } } }
apache-2.0
landawn/AbacusUtil
src/com/landawn/abacus/util/SQLExecutor.java
410612
/* * Copyright (C) 2015 HaiYang Li * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.landawn.abacus.util; import java.io.Closeable; import java.io.IOException; import java.math.BigDecimal; import java.sql.Connection; import java.sql.Date; import java.sql.Driver; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import javax.sql.DataSource; import com.landawn.abacus.DataSet; import com.landawn.abacus.DataSourceManager; import com.landawn.abacus.DataSourceSelector; import com.landawn.abacus.DirtyMarker; import com.landawn.abacus.IsolationLevel; import com.landawn.abacus.annotation.Beta; import com.landawn.abacus.condition.And; import com.landawn.abacus.condition.Condition; import com.landawn.abacus.condition.ConditionFactory.CF; import com.landawn.abacus.condition.Equal; import com.landawn.abacus.dataSource.SQLDataSource; import com.landawn.abacus.exception.AbacusException; import com.landawn.abacus.exception.DuplicatedResultException; import com.landawn.abacus.exception.UncheckedSQLException; import com.landawn.abacus.logging.Logger; import com.landawn.abacus.logging.LoggerFactory; import com.landawn.abacus.parser.ParserUtil; import com.landawn.abacus.parser.ParserUtil.EntityInfo; import com.landawn.abacus.parser.ParserUtil.PropInfo; import com.landawn.abacus.type.Type; import com.landawn.abacus.util.Fn.FN; import com.landawn.abacus.util.Fn.Suppliers; import com.landawn.abacus.util.JdbcUtil.BiRowMapper; import com.landawn.abacus.util.JdbcUtil.RowMapper; import com.landawn.abacus.util.SQLBuilder.NAC; import com.landawn.abacus.util.SQLBuilder.NLC; import com.landawn.abacus.util.SQLBuilder.NSC; import com.landawn.abacus.util.SQLBuilder.SP; import com.landawn.abacus.util.SQLTransaction.CreatedBy; import com.landawn.abacus.util.StringUtil.Strings; import com.landawn.abacus.util.u.Nullable; import com.landawn.abacus.util.u.Optional; import com.landawn.abacus.util.u.OptionalBoolean; import com.landawn.abacus.util.u.OptionalByte; import com.landawn.abacus.util.u.OptionalChar; import com.landawn.abacus.util.u.OptionalDouble; import com.landawn.abacus.util.u.OptionalFloat; import com.landawn.abacus.util.u.OptionalInt; import com.landawn.abacus.util.u.OptionalLong; import com.landawn.abacus.util.u.OptionalShort; import com.landawn.abacus.util.function.BiConsumer; import com.landawn.abacus.util.function.Function; import com.landawn.abacus.util.function.Supplier; import com.landawn.abacus.util.stream.Collector; import com.landawn.abacus.util.stream.ObjIteratorEx; import com.landawn.abacus.util.stream.Stream; /** * SQLExecutor is a simple sql/jdbc utility class. SQL is supported with different format: <br /> * * <pre> * * <li> <code>INSERT INTO account (first_name, last_name, gui, last_update_time, create_time) VALUES (?, ?, ?, ?, ?)</code></li> * <li> <code>INSERT INTO account (first_name, last_name, gui, last_update_time, create_time) VALUES (#{firstName}, #{lastName}, #{gui}, #{lastUpdateTime}, #{createTime})</code></li> * <li> <code>INSERT INTO account (first_name, last_name, gui, last_update_time, create_time) VALUES (:firstName, :lastName, :gui, :lastUpdateTime, :createTime)</code></li> * * All these kinds of SQLs can be generated by <code>SQLBuilder</code> conveniently. Parameters with format of Object[]/List parameters are supported for parameterized SQL({@code id = ?}). * Parameters with format of Object[]/List/Map/Entity are supported for named parameterized SQL({@code id = :id}). * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * </pre> * * Here is sample of CRUD(create/read/update/delete): * <br />======================================================================== * <pre> * <code> * static final DataSource dataSource = JdbcUtil.createDataSource(...); * static final SQLExecutor sqlExecutor = new SQLExecutor(dataSource); * ... * Account account = createAccount(); * * // create * String sql_insert = NE.insert(GUI, FIRST_NAME, LAST_NAME, LAST_UPDATE_TIME, CREATE_TIME).into(Account.class).sql(); * N.println(sql_insert); * sqlExecutor.insert(sql_insert, account); * * // read * String sql_selectByGUI = NE.selectFrom(Account.class, N.asSet(DEVICES)).where(L.eq(GUI, L.QME)).sql(); * N.println(sql_selectByGUI); * Account dbAccount = sqlExecutor.findFirst(Account.class, sql_selectByGUI, account); * assertEquals(account.getFirstName(), dbAccount.getFirstName()); * * // update * String sql_updateByLastName = NE.update(Account.class).set(FIRST_NAME).where(L.eq(LAST_NAME, L.QME)).sql(); * N.println(sql_updateByLastName); * dbAccount.setFirstName("newFirstName"); * sqlExecutor.update(sql_updateByLastName, dbAccount); * * // delete * String sql_deleteByFirstName = NE.deleteFrom(Account.class).where(L.eq(FIRST_NAME, L.QME)).sql(); * N.println(sql_deleteByFirstName); * sqlExecutor.update(sql_deleteByFirstName, dbAccount); * * dbAccount = sqlExecutor.findFirst(Account.class, sql_selectByGUI, account); * assertNull(dbAccount); * </code> * </pre> * ======================================================================== * <br /> * <br /> * If {@code conn} argument is null or not specified, {@code SQLExecutor} is responsible to get the connection from the * internal {@code DataSource}, start and commit/roll back transaction for batch operations if needed, and close the * connection finally. otherwise it's user's responsibility to do such jobs if {@code conn} is specified and not null. <br /> * <br /> * * The general programming way with SQLExeucte is to execute sql scripts(generated by SQLBuilder) with array/list/map/entity by calling (batch)insert/update/delete/query/... methods. * if Transaction is required. it can be started: * <pre> * <code> * final SQLTransaction tran = sqlExecutor.beginTransaction(IsolationLevel.READ_COMMITTED); * * try { * // sqlExecutor.insert(...); * // sqlExecutor.update(...); * // sqlExecutor.query(...); * * tran.commit(); * } finally { * // The connection will be automatically closed after the transaction is committed or rolled back. * tran.rollbackIfNotCommitted(); * } * </code> * </pre> * * * Spring Transaction is also supported and Integrated. * If a method of this class is called where a Spring transaction is started with the {@code DataSource} inside this {@code SQLExecutor}, without {@code Connection} parameter specified, * the {@code Connection} started the Spring Transaction will be used. Otherwise a {@code Connection} directly from the inside {@code DataSource}(Connection pool) will be borrowed and used. * * * SQLExecutor is tread-safe.<br /><br /> * * @since 0.8 * * @author Haiyang Li * * @see <a href="./JdbcUtil.html">JdbcUtil</a> * * @see {@link com.landawn.abacus.annotation.ReadOnly} * @see {@link com.landawn.abacus.annotation.ReadOnlyId} * @see {@link com.landawn.abacus.annotation.NonUpdatable} * @see {@link com.landawn.abacus.annotation.Transient} * @see {@link com.landawn.abacus.annotation.Table} * @see {@link com.landawn.abacus.annotation.Column} * * @see <a href="http://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html">http://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html</a> * @see <a href="http://docs.oracle.com/javase/8/docs/api/java/sql/Statement.html">http://docs.oracle.com/javase/8/docs/api/java/sql/Statement.html</a> * @see <a href="http://docs.oracle.com/javase/8/docs/api/java/sql/PreparedStatement.html">http://docs.oracle.com/javase/8/docs/api/java/sql/PreparedStatement.html</a> * @see <a href="http://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html">http://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html</a> */ public class SQLExecutor implements Closeable { private static final Logger logger = LoggerFactory.getLogger(SQLExecutor.class); static final String ID = "id"; static final String QUERY_WITH_DATA_SOURCE = "queryWithDataSource"; private static final ResultExtractor<Boolean> EXISTS_RESULT_SET_EXTRACTOR = new ResultExtractor<Boolean>() { @Override public Boolean extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); return rs.next(); } }; private static final ResultExtractor<Integer> COUNT_RESULT_SET_EXTRACTOR = new ResultExtractor<Integer>() { @Override public Integer extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); int cnt = 0; while (rs.next()) { cnt++; } return cnt; } }; private static final ResultExtractor<OptionalBoolean> SINGLE_BOOLEAN_EXTRACTOR = new ResultExtractor<OptionalBoolean>() { @Override public OptionalBoolean extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return OptionalBoolean.of(rs.getBoolean(1)); } return OptionalBoolean.empty(); } }; private static final ResultExtractor<OptionalChar> SINGLE_CHAR_EXTRACTOR = new ResultExtractor<OptionalChar>() { @Override public OptionalChar extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { final String str = rs.getString(1); return OptionalChar.of(str == null || str.length() == 0 ? N.CHAR_0 : str.charAt(0)); } return OptionalChar.empty(); } }; private static final ResultExtractor<OptionalByte> SINGLE_BYTE_EXTRACTOR = new ResultExtractor<OptionalByte>() { @Override public OptionalByte extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return OptionalByte.of(rs.getByte(1)); } return OptionalByte.empty(); } }; private static final ResultExtractor<OptionalShort> SINGLE_SHORT_EXTRACTOR = new ResultExtractor<OptionalShort>() { @Override public OptionalShort extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return OptionalShort.of(rs.getShort(1)); } return OptionalShort.empty(); } }; private static final ResultExtractor<OptionalInt> SINGLE_INT_EXTRACTOR = new ResultExtractor<OptionalInt>() { @Override public OptionalInt extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return OptionalInt.of(rs.getInt(1)); } return OptionalInt.empty(); } }; private static final ResultExtractor<OptionalLong> SINGLE_LONG_EXTRACTOR = new ResultExtractor<OptionalLong>() { @Override public OptionalLong extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return OptionalLong.of(rs.getLong(1)); } return OptionalLong.empty(); } }; private static final ResultExtractor<OptionalFloat> SINGLE_FLOAT_EXTRACTOR = new ResultExtractor<OptionalFloat>() { @Override public OptionalFloat extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return OptionalFloat.of(rs.getFloat(1)); } return OptionalFloat.empty(); } }; private static final ResultExtractor<OptionalDouble> SINGLE_DOUBLE_EXTRACTOR = new ResultExtractor<OptionalDouble>() { @Override public OptionalDouble extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return OptionalDouble.of(rs.getDouble(1)); } return OptionalDouble.empty(); } }; private static final ResultExtractor<Nullable<BigDecimal>> SINGLE_BIG_DECIMAL_EXTRACTOR = new ResultExtractor<Nullable<BigDecimal>>() { @Override public Nullable<BigDecimal> extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return Nullable.of(rs.getBigDecimal(1)); } return Nullable.empty(); } }; private static final ResultExtractor<Nullable<String>> SINGLE_STRING_EXTRACTOR = new ResultExtractor<Nullable<String>>() { @Override public Nullable<String> extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return Nullable.of(rs.getString(1)); } return Nullable.empty(); } }; private static final ResultExtractor<Nullable<Date>> SINGLE_DATE_EXTRACTOR = new ResultExtractor<Nullable<Date>>() { @Override public Nullable<Date> extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return Nullable.of(rs.getDate(1)); } return Nullable.empty(); } }; private static final ResultExtractor<Nullable<Time>> SINGLE_TIME_EXTRACTOR = new ResultExtractor<Nullable<Time>>() { @Override public Nullable<Time> extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return Nullable.of(rs.getTime(1)); } return Nullable.empty(); } }; private static final ResultExtractor<Nullable<Timestamp>> SINGLE_TIMESTAMP_EXTRACTOR = new ResultExtractor<Nullable<Timestamp>>() { @Override public Nullable<Timestamp> extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return Nullable.of(rs.getTimestamp(1)); } return Nullable.empty(); } }; private static final JdbcUtil.RowMapper<Object> NO_GENERATED_KEY_EXTRACTOR = new JdbcUtil.RowMapper<Object>() { @Override public Object apply(final ResultSet rs) throws SQLException { return null; } }; private static final JdbcUtil.RowMapper<Object> SINGLE_GENERATED_KEY_EXTRACTOR = new JdbcUtil.RowMapper<Object>() { @Override public Object apply(final ResultSet rs) throws SQLException { return JdbcUtil.getColumnValue(rs, 1); } }; private static final JdbcUtil.RowMapper<Object> MULTI_GENERATED_KEY_EXTRACTOR = new JdbcUtil.RowMapper<Object>() { @Override public Object apply(final ResultSet rs) throws SQLException { final List<String> columnLabelList = JdbcUtil.getColumnLabelList(rs); if (columnLabelList.size() == 1) { return JdbcUtil.getColumnValue(rs, 1); } else { return JdbcUtil.BiRowMapper.TO_LINKED_HASH_MAP.apply(rs, columnLabelList); } } }; private static final JdbcUtil.BiRowMapper<Object> NO_BI_GENERATED_KEY_EXTRACTOR = new JdbcUtil.BiRowMapper<Object>() { @Override public Object apply(final ResultSet rs, final List<String> columnLabels) throws SQLException { return null; } }; private static final JdbcUtil.BiRowMapper<Object> SINGLE_BI_GENERATED_KEY_EXTRACTOR = new JdbcUtil.BiRowMapper<Object>() { @Override public Object apply(final ResultSet rs, final List<String> columnLabels) throws SQLException { return JdbcUtil.getColumnValue(rs, 1); } }; private static final JdbcUtil.BiRowMapper<Object> MULTI_BI_GENERATED_KEY_EXTRACTOR = new JdbcUtil.BiRowMapper<Object>() { @Override public Object apply(final ResultSet rs, final List<String> columnLabels) throws SQLException { final List<String> columnLabelList = JdbcUtil.getColumnLabelList(rs); if (columnLabelList.size() == 1) { return JdbcUtil.getColumnValue(rs, 1); } else { return JdbcUtil.BiRowMapper.TO_LINKED_HASH_MAP.apply(rs, columnLabelList); } } }; private static final ResultExtractor<ResultSet> RESULT_SET_EXTRACTOR = new ResultExtractor<ResultSet>() { @Override public ResultSet extractData(ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { return rs; } }; private static final int factor = Math.min(Math.max(1, IOUtil.MAX_MEMORY_IN_MB / 1024), 8); private static final int CACHED_SQL_LENGTH = 1024 * factor; private static final int SQL_CACHE_SIZE = 1000 * factor; private static final Map<String, List<String>> _sqlColumnLabelPool = new ConcurrentHashMap<>(); private final Map<String, List<String>> _tableColumnNamePool = new ConcurrentHashMap<>(); private final DataSource _ds; private final DataSourceManager _dsm; private final DataSourceSelector _dss; private final JdbcSettings _jdbcSettings; private final SQLMapper _sqlMapper; private final NamingPolicy _namingPolicy; private final AsyncExecutor _asyncExecutor; private final boolean _isReadOnly; private final String _dbProudctName; private final String _dbProudctVersion; private final DBVersion _dbVersion; private final IsolationLevel _defaultIsolationLevel; private final AsyncSQLExecutor _asyncSQLExecutor; private final Map<Class<?>, Mapper<?, ?>> mapperPool = new ConcurrentHashMap<>(); /** * * @param dataSource * @see JdbcUtil#createDataSource(String) * @see JdbcUtil#createDataSource(java.io.InputStream) */ public SQLExecutor(final DataSource dataSource) { this(dataSource, null); } /** * * @param dataSource * @param jdbcSettings * @see JdbcUtil#createDataSource(String) * @see JdbcUtil#createDataSource(java.io.InputStream) */ public SQLExecutor(final DataSource dataSource, final JdbcSettings jdbcSettings) { this(dataSource, jdbcSettings, null); } /** * * @param dataSource * @param jdbcSettings * @param sqlMapper * @see JdbcUtil#createDataSource(String) * @see JdbcUtil#createDataSource(java.io.InputStream) */ public SQLExecutor(final DataSource dataSource, final JdbcSettings jdbcSettings, final SQLMapper sqlMapper) { this(dataSource, jdbcSettings, sqlMapper, null); } /** * * @param dataSource * @param jdbcSettings * @param sqlMapper * @param namingPolicy * @see JdbcUtil#createDataSourceManager(String) * @see JdbcUtil#createDataSourceManager(java.io.InputStream) */ public SQLExecutor(final DataSource dataSource, final JdbcSettings jdbcSettings, final SQLMapper sqlMapper, final NamingPolicy namingPolicy) { this(dataSource, jdbcSettings, sqlMapper, namingPolicy, null); } /** * * @param dataSource * @param jdbcSettings * @param sqlMapper * @param asyncExecutor * @see JdbcUtil#createDataSource(String) * @see JdbcUtil#createDataSource(java.io.InputStream) */ public SQLExecutor(final DataSource dataSource, final JdbcSettings jdbcSettings, final SQLMapper sqlMapper, final NamingPolicy namingPolicy, final AsyncExecutor asyncExecutor) { this(null, dataSource, jdbcSettings, sqlMapper, namingPolicy, asyncExecutor, false); } /** * * @param dataSourceManager * @see JdbcUtil#createDataSourceManager(String) * @see JdbcUtil#createDataSourceManager(java.io.InputStream) */ public SQLExecutor(final DataSourceManager dataSourceManager) { this(dataSourceManager, null); } /** * * @param dataSourceManager * @param jdbcSettings * @see JdbcUtil#createDataSourceManager(String) * @see JdbcUtil#createDataSourceManager(java.io.InputStream) */ public SQLExecutor(final DataSourceManager dataSourceManager, final JdbcSettings jdbcSettings) { this(dataSourceManager, jdbcSettings, null); } /** * * @param dataSourceManager * @param jdbcSettings * @param sqlMapper * @see JdbcUtil#createDataSourceManager(String) * @see JdbcUtil#createDataSourceManager(java.io.InputStream) */ public SQLExecutor(final DataSourceManager dataSourceManager, final JdbcSettings jdbcSettings, final SQLMapper sqlMapper) { this(dataSourceManager, jdbcSettings, sqlMapper, null); } /** * * @param dataSourceManager * @param jdbcSettings * @param sqlMapper * @param namingPolicy * @see JdbcUtil#createDataSourceManager(String) * @see JdbcUtil#createDataSourceManager(java.io.InputStream) */ public SQLExecutor(final DataSourceManager dataSourceManager, final JdbcSettings jdbcSettings, final SQLMapper sqlMapper, final NamingPolicy namingPolicy) { this(dataSourceManager, jdbcSettings, sqlMapper, namingPolicy, null); } /** * * @param dataSourceManager * @param jdbcSettings * @param sqlMapper * @param asyncExecutor * @see JdbcUtil#createDataSourceManager(String) * @see JdbcUtil#createDataSourceManager(java.io.InputStream) */ public SQLExecutor(final DataSourceManager dataSourceManager, final JdbcSettings jdbcSettings, final SQLMapper sqlMapper, final NamingPolicy namingPolicy, final AsyncExecutor asyncExecutor) { this(dataSourceManager, null, jdbcSettings, sqlMapper, namingPolicy, asyncExecutor, false); } protected SQLExecutor(final DataSourceManager dataSourceManager, final DataSource dataSource, final JdbcSettings jdbcSettings, final SQLMapper sqlMapper, final NamingPolicy namingPolicy, final AsyncExecutor asyncExecutor, final boolean isReadOnly) { if (dataSourceManager == null) { this._ds = dataSource; this._dsm = null; this._dss = null; } else { this._ds = dataSourceManager.getPrimaryDataSource(); this._dsm = dataSourceManager; this._dss = dataSourceManager.getDataSourceSelector(); } this._jdbcSettings = (jdbcSettings == null) ? JdbcSettings.create() : jdbcSettings.copy(); if (_jdbcSettings.getBatchSize() == 0) { _jdbcSettings.setBatchSize(JdbcSettings.DEFAULT_BATCH_SIZE); } _jdbcSettings.freeze(); this._sqlMapper = sqlMapper == null ? new SQLMapper() : sqlMapper; this._namingPolicy = namingPolicy == null ? NamingPolicy.LOWER_CASE_WITH_UNDERSCORE : namingPolicy; this._asyncExecutor = asyncExecutor == null ? new AsyncExecutor(64, 300, TimeUnit.SECONDS) : asyncExecutor; this._isReadOnly = isReadOnly; int originalIsolationLevel; Connection conn = getConnection(); try { _dbProudctName = conn.getMetaData().getDatabaseProductName(); _dbProudctVersion = conn.getMetaData().getDatabaseProductVersion(); _dbVersion = JdbcUtil.getDBVersion(conn); originalIsolationLevel = conn.getTransactionIsolation(); } catch (SQLException e) { throw new UncheckedSQLException(e); } finally { closeConnection(conn); } final IsolationLevel tmp = this._ds instanceof SQLDataSource ? ((SQLDataSource) this._ds).getDefaultIsolationLevel() : IsolationLevel.DEFAULT; _defaultIsolationLevel = tmp == IsolationLevel.DEFAULT ? IsolationLevel.valueOf(originalIsolationLevel) : tmp; this._asyncSQLExecutor = new AsyncSQLExecutor(this, _asyncExecutor); } // public static SQLExecutor create(final String dataSourceFile) { // return new SQLExecutor(JdbcUtil.createDataSourceManager(dataSourceFile)); // } // // public static SQLExecutor create(final InputStream dataSourceInputStream) { // return new SQLExecutor(JdbcUtil.createDataSourceManager(dataSourceInputStream)); // } // // public static SQLExecutor create(final String url, final String user, final String password) { // return new SQLExecutor(JdbcUtil.createDataSource(url, user, password)); // } // // public static SQLExecutor create(final String driver, final String url, final String user, final String password) { // return new SQLExecutor(JdbcUtil.createDataSource(driver, url, user, password)); // } // // public static SQLExecutor create(final Class<? extends Driver> driverClass, final String url, final String user, final String password) { // return new SQLExecutor(JdbcUtil.createDataSource(driverClass, url, user, password)); // } // // /** // * // * @param props refer to Connection.xsd for the supported properties. // * @return // */ // public static SQLExecutor create(final Map<String, ?> props) { // return new SQLExecutor(JdbcUtil.createDataSource(props)); // } // // public static SQLExecutor create(final DataSource sqlDataSource) { // return new SQLExecutor(JdbcUtil.wrap(sqlDataSource)); // } // // public SQLMapper sqlMapper() { // return _sqlMapper; // } @Beta public static SQLExecutor w(final String url, final String user, final String password) { return new SQLExecutor(JdbcUtil.createDataSource(url, user, password)); } @Beta public static SQLExecutor w(final String driver, final String url, final String user, final String password) { return new SQLExecutor(JdbcUtil.createDataSource(driver, url, user, password)); } @Beta public static SQLExecutor w(final Class<? extends Driver> driverClass, final String url, final String user, final String password) { return new SQLExecutor(JdbcUtil.createDataSource(driverClass, url, user, password)); } /** * * @param targetClass * @return */ public <T, ID> Mapper<T, ID> mapper(final Class<T> targetClass) { Mapper<T, ID> mapper = (Mapper<T, ID>) mapperPool.get(targetClass); if (mapper == null) { N.checkArgument(ClassUtil.isEntity(targetClass), ClassUtil.getCanonicalClassName(targetClass) + " is not an entity class with getter/setter methods"); SQLBuilder.registerEntityPropColumnNameMap(targetClass); mapper = new Mapper<T, ID>(targetClass, this, this._namingPolicy); mapperPool.put(targetClass, mapper); } return mapper; } public AsyncSQLExecutor async() { return _asyncSQLExecutor; } public DataSource dataSource() { return _ds; } public JdbcSettings jdbcSettings() { return _jdbcSettings; } public String dbProudctName() { return _dbProudctName; } public String dbProudctVersion() { return _dbProudctVersion; } public DBVersion dbVersion() { return _dbVersion; } @SafeVarargs public final <ID> ID insert(final String sql, final Object... parameters) throws UncheckedSQLException { return insert(sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final <ID> ID insert(final String sql, final StatementSetter statementSetter, final Object... parameters) throws UncheckedSQLException { return insert(sql, statementSetter, null, parameters); } @SafeVarargs public final <ID> ID insert(final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { return insert(sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } @SafeVarargs public final <ID> ID insert(final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { return insert(sql, statementSetter, (JdbcUtil.RowMapper<ID>) getGeneratedKeyExtractor(jdbcSettings), jdbcSettings, parameters); } @SafeVarargs public final <ID> ID insert(final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<ID> autoGeneratedKeyExtractor, final JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { return insert(null, sql, statementSetter, autoGeneratedKeyExtractor, jdbcSettings, parameters); } @SafeVarargs public final <ID> ID insert(final Connection conn, final String sql, final Object... parameters) throws UncheckedSQLException { return insert(conn, sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final <ID> ID insert(final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) throws UncheckedSQLException { return insert(conn, sql, statementSetter, null, parameters); } public final <ID> ID insert(final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { return insert(conn, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } @SafeVarargs public final <ID> ID insert(final Connection conn, final String sql, StatementSetter statementSetter, JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { return insert(conn, sql, statementSetter, (JdbcUtil.RowMapper<ID>) getGeneratedKeyExtractor(jdbcSettings), jdbcSettings, parameters); } /** * @see #batchInsert(Connection, String, StatementSetter, JdbcSettings, String, Object[]) */ @SuppressWarnings({ "unchecked", "deprecation" }) @SafeVarargs public final <ID> ID insert(final Connection conn, final String sql, StatementSetter statementSetter, JdbcUtil.RowMapper<ID> autoGeneratedKeyExtractor, JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { final NamedSQL namedSQL = getNamedSQL(sql); final boolean isEntityOrMapParameter = isEntityOrMapParameter(namedSQL, parameters); final boolean isEntity = isEntityOrMapParameter && ClassUtil.isEntity(parameters[0].getClass()); final Collection<String> idPropNames = isEntity ? ClassUtil.getIdFieldNames(parameters[0].getClass()) : null; final boolean autoGeneratedKeys = isEntity == false || (N.notNullOrEmpty(idPropNames) && !namedSQL.getNamedParameters().containsAll(idPropNames)); statementSetter = checkStatementSetter(namedSQL, statementSetter); jdbcSettings = checkJdbcSettings(jdbcSettings, namedSQL, _sqlMapper.getAttrs(sql)); autoGeneratedKeyExtractor = checkGeneratedKeysExtractor(autoGeneratedKeyExtractor, namedSQL, parameters); DataSource ds = null; Connection localConn = null; Object result = null; PreparedStatement stmt = null; try { ds = getDataSource(namedSQL.getParameterizedSQL(), parameters, jdbcSettings); localConn = getConnection(conn, ds, jdbcSettings, SQLOperation.INSERT); stmt = prepareStatement(ds, localConn, namedSQL, statementSetter, jdbcSettings, autoGeneratedKeys, false, parameters); result = executeInsert(namedSQL, stmt, autoGeneratedKeyExtractor, autoGeneratedKeys); } catch (SQLException e) { String msg = AbacusException.getErrorMsg(e) + ". [SQL] " + namedSQL.getNamedSQL(); throw new UncheckedSQLException(msg, e); } finally { close(stmt); close(localConn, conn, ds); } if (isEntityOrMapParameter) { if (result != null) { Object parameter_0 = parameters[0]; if (parameter_0 instanceof Map) { // Don't update input map ? } else if (autoGeneratedKeys) { setIds(result, parameter_0, namedSQL); } } if (parameters[0] instanceof DirtyMarker) { ((DirtyMarker) parameters[0]).dirtyPropNames().clear(); } } return (ID) result; } private <ID> JdbcUtil.RowMapper<ID> getGeneratedKeyExtractor(JdbcSettings jdbcSettings) { if (jdbcSettings != null && ((N.notNullOrEmpty(jdbcSettings.getReturnedColumnIndexes()) && jdbcSettings.getReturnedColumnIndexes().length > 1) || (N.notNullOrEmpty(jdbcSettings.getReturnedColumnNames()) && jdbcSettings.getReturnedColumnNames().length > 1))) { return (JdbcUtil.RowMapper<ID>) MULTI_GENERATED_KEY_EXTRACTOR; } else { return (JdbcUtil.RowMapper<ID>) SINGLE_GENERATED_KEY_EXTRACTOR; } } private <ID> JdbcUtil.BiRowMapper<ID> getBiGeneratedKeyExtractor(JdbcSettings jdbcSettings) { if (jdbcSettings != null && ((N.notNullOrEmpty(jdbcSettings.getReturnedColumnIndexes()) && jdbcSettings.getReturnedColumnIndexes().length > 1) || (N.notNullOrEmpty(jdbcSettings.getReturnedColumnNames()) && jdbcSettings.getReturnedColumnNames().length > 1))) { return (JdbcUtil.BiRowMapper<ID>) MULTI_BI_GENERATED_KEY_EXTRACTOR; } else { return (JdbcUtil.BiRowMapper<ID>) SINGLE_BI_GENERATED_KEY_EXTRACTOR; } } @SuppressWarnings("rawtypes") static <ID> JdbcUtil.RowMapper<ID> checkGeneratedKeysExtractor(JdbcUtil.RowMapper<ID> autoGeneratedKeyExtractor, final NamedSQL namedSQL, final Object[] parameters) { if ((autoGeneratedKeyExtractor == null || autoGeneratedKeyExtractor == SINGLE_GENERATED_KEY_EXTRACTOR || autoGeneratedKeyExtractor == MULTI_GENERATED_KEY_EXTRACTOR) // && isEntityOrMapParameter(namedSQL, parameters) && ClassUtil.isEntity(parameters[0].getClass())) { final Class<?> cls = parameters[0].getClass(); @SuppressWarnings("deprecation") final List<String> idPropNames = ClassUtil.getIdFieldNames(cls); if (idPropNames.size() == 0) { return (JdbcUtil.RowMapper) NO_GENERATED_KEY_EXTRACTOR; } else if (idPropNames.size() == 1) { if (namedSQL.getNamedParameters().contains(idPropNames.get(0))) { return (JdbcUtil.RowMapper) NO_GENERATED_KEY_EXTRACTOR; } else { return (JdbcUtil.RowMapper) SINGLE_GENERATED_KEY_EXTRACTOR; } } else { List<String> generatedIdPropNames = null; for (String idPropName : idPropNames) { if (!namedSQL.getNamedParameters().contains(idPropName)) { if (generatedIdPropNames == null) { generatedIdPropNames = new ArrayList<>(); } generatedIdPropNames.add(idPropName); } } if (N.isNullOrEmpty(generatedIdPropNames)) { return (JdbcUtil.RowMapper) NO_GENERATED_KEY_EXTRACTOR; } else if (generatedIdPropNames.size() == 1) { return (JdbcUtil.RowMapper) SINGLE_GENERATED_KEY_EXTRACTOR; } else { return (JdbcUtil.RowMapper) MULTI_GENERATED_KEY_EXTRACTOR; } } } return autoGeneratedKeyExtractor; } @SuppressWarnings("rawtypes") static <ID> JdbcUtil.BiRowMapper<ID> checkBiGeneratedKeysExtractor(JdbcUtil.BiRowMapper<ID> autoGeneratedKeyExtractor, final NamedSQL namedSQL, final Object... parameters) { if ((autoGeneratedKeyExtractor == null || autoGeneratedKeyExtractor == SINGLE_BI_GENERATED_KEY_EXTRACTOR || autoGeneratedKeyExtractor == MULTI_BI_GENERATED_KEY_EXTRACTOR) // && isEntityOrMapParameter(namedSQL, parameters) && ClassUtil.isEntity(parameters[0].getClass())) { final Class<?> cls = parameters[0].getClass(); @SuppressWarnings("deprecation") final List<String> idPropNames = ClassUtil.getIdFieldNames(cls); if (idPropNames.size() == 0) { return (JdbcUtil.BiRowMapper) NO_BI_GENERATED_KEY_EXTRACTOR; } else if (idPropNames.size() == 1) { if (namedSQL.getNamedParameters().contains(idPropNames.get(0))) { return (JdbcUtil.BiRowMapper) NO_BI_GENERATED_KEY_EXTRACTOR; } else { return (JdbcUtil.BiRowMapper) SINGLE_BI_GENERATED_KEY_EXTRACTOR; } } else { List<String> generatedIdPropNames = null; for (String idPropName : idPropNames) { if (!namedSQL.getNamedParameters().contains(idPropName)) { if (generatedIdPropNames == null) { generatedIdPropNames = new ArrayList<>(); } generatedIdPropNames.add(idPropName); } } if (N.isNullOrEmpty(generatedIdPropNames)) { return (JdbcUtil.BiRowMapper) NO_BI_GENERATED_KEY_EXTRACTOR; } else if (generatedIdPropNames.size() == 1) { return (JdbcUtil.BiRowMapper) SINGLE_BI_GENERATED_KEY_EXTRACTOR; } else { return (JdbcUtil.BiRowMapper) MULTI_BI_GENERATED_KEY_EXTRACTOR; } } } return autoGeneratedKeyExtractor; } @SuppressWarnings("deprecation") static void setIds(final Object ids, final Object entity, final NamedSQL namedSQL) { final Class<?> entityClass = entity.getClass(); final EntityInfo entityInfo = ParserUtil.getEntityInfo(entityClass); final List<String> idPropNameSet = ClassUtil.getIdFieldNames(entityClass); final Collection<String> idPropNames = idPropNameSet.size() <= 1 ? idPropNameSet : N.difference(idPropNameSet, namedSQL.getNamedParameters()); PropInfo propInfo = null; try { for (String idPropName : idPropNames) { if (namedSQL.getNamedParameters().contains(idPropName)) { continue; } propInfo = entityInfo.getPropInfo(idPropName); if (propInfo != null && propInfo.getMethod != null && propInfo.setMethod != null) { final Object idPropValue = propInfo.getPropValue(entity); if (JdbcUtil.isDefaultIdPropValue(idPropValue)) { if (idPropNames.size() > 1) { if (ids instanceof Map && ((Map<String, Object>) ids).containsKey(idPropName)) { propInfo.setPropValue(entity, ((Map<String, Object>) ids).get(idPropName)); } } else { if (ids instanceof Map && !propInfo.setMethod.getParameterTypes()[0].isAssignableFrom(ids.getClass())) { if (((Map<String, Object>) ids).containsKey(idPropName)) { propInfo.setPropValue(entity, ((Map<String, Object>) ids).get(idPropName)); } else { propInfo.setPropValue(entity, ((Map<String, Object>) ids).values().iterator().next()); } } else { propInfo.setPropValue(entity, ids); } } } } else { if (logger.isWarnEnabled()) { logger.warn("Failed to set the returned id property to entity. no get/set method for id property (" + idPropName + ") found. "); } } } } catch (Exception e) { logger.error("Failed to set the returned id property to entity", e); } } protected <ID> ID executeInsert(final NamedSQL namedSQL, final PreparedStatement stmt, final JdbcUtil.RowMapper<ID> autoGeneratedKeyExtractor, final boolean autoGeneratedKeys) throws SQLException { if (_isReadOnly) { throw new AbacusException("This SQL Executor is configured for read-only"); } stmt.executeUpdate(); ID id = null; if (autoGeneratedKeys) { ResultSet rs = null; try { rs = stmt.getGeneratedKeys(); id = rs.next() ? autoGeneratedKeyExtractor.apply(rs) : null; } catch (SQLException e) { logger.error("Failed to retrieve the auto-generated Ids", e); } finally { close(rs); } } return id; } public <ID> List<ID> batchInsert(final String sql, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(sql, StatementSetter.DEFAULT, parametersList); } public <ID> List<ID> batchInsert(final String sql, final StatementSetter statementSetter, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(sql, statementSetter, null, parametersList); } public <ID> List<ID> batchInsert(final String sql, final JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(sql, StatementSetter.DEFAULT, jdbcSettings, parametersList); } public <ID> List<ID> batchInsert(final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(sql, statementSetter, (JdbcUtil.BiRowMapper<ID>) getBiGeneratedKeyExtractor(jdbcSettings), jdbcSettings, parametersList); } public <ID> List<ID> batchInsert(final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<ID> autoGeneratedKeyExtractor, final JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(null, sql, statementSetter, autoGeneratedKeyExtractor, jdbcSettings, parametersList); } public <ID> List<ID> batchInsert(final Connection conn, final String sql, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(conn, sql, StatementSetter.DEFAULT, parametersList); } public <ID> List<ID> batchInsert(final Connection conn, final String sql, final StatementSetter statementSetter, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(conn, sql, statementSetter, null, parametersList); } public <ID> List<ID> batchInsert(final Connection conn, final String sql, final JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(conn, sql, StatementSetter.DEFAULT, jdbcSettings, parametersList); } public <ID> List<ID> batchInsert(final Connection conn, final String sql, StatementSetter statementSetter, JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { return batchInsert(conn, sql, statementSetter, (JdbcUtil.BiRowMapper<ID>) getBiGeneratedKeyExtractor(jdbcSettings), jdbcSettings, parametersList); } @SuppressWarnings("deprecation") public <ID> List<ID> batchInsert(final Connection conn, final String sql, StatementSetter statementSetter, JdbcUtil.BiRowMapper<ID> autoGeneratedKeyExtractor, JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { N.checkArgNotNullOrEmpty(parametersList, "parametersList"); final NamedSQL namedSQL = getNamedSQL(sql); final Object parameters_0 = parametersList.get(0); final boolean isEntityOrMapParameter = isEntityOrMapParameter(namedSQL, parameters_0); final boolean isEntity = isEntityOrMapParameter && ClassUtil.isEntity(parameters_0.getClass()); final Collection<String> idPropNames = isEntity ? ClassUtil.getIdFieldNames(parameters_0.getClass()) : null; final boolean autoGeneratedKeys = isEntity == false || (N.notNullOrEmpty(idPropNames) && !namedSQL.getNamedParameters().containsAll(idPropNames)); statementSetter = checkStatementSetter(namedSQL, statementSetter); jdbcSettings = checkJdbcSettings(jdbcSettings, namedSQL, _sqlMapper.getAttrs(sql)); autoGeneratedKeyExtractor = checkBiGeneratedKeysExtractor(autoGeneratedKeyExtractor, namedSQL, parametersList.get(0)); final int len = parametersList.size(); final int batchSize = getBatchSize(jdbcSettings); List<ID> resultIdList = new ArrayList<>(len); DataSource ds = null; Connection localConn = null; PreparedStatement stmt = null; int originalIsolationLevel = 0; boolean autoCommit = true; try { ds = getDataSource(namedSQL.getParameterizedSQL(), parametersList, jdbcSettings); localConn = getConnection(conn, ds, jdbcSettings, SQLOperation.INSERT); try { originalIsolationLevel = localConn.getTransactionIsolation(); autoCommit = localConn.getAutoCommit(); } catch (SQLException e) { close(localConn, conn, ds); throw new UncheckedSQLException(e); } if ((conn == null) && (len > batchSize)) { localConn.setAutoCommit(false); setIsolationLevel(jdbcSettings, localConn); } stmt = prepareStatement(ds, localConn, namedSQL, statementSetter, jdbcSettings, autoGeneratedKeys, true, parametersList); if (len <= batchSize) { for (int i = 0; i < len; i++) { statementSetter.setParameters(namedSQL, stmt, N.asArray(parametersList.get(i))); stmt.addBatch(); } executeBatchInsert(resultIdList, namedSQL, stmt, autoGeneratedKeyExtractor, autoGeneratedKeys); } else { int num = 0; for (int i = 0; i < len; i++) { statementSetter.setParameters(namedSQL, stmt, N.asArray(parametersList.get(i))); stmt.addBatch(); num++; if ((num % batchSize) == 0) { executeBatchInsert(resultIdList, namedSQL, stmt, autoGeneratedKeyExtractor, autoGeneratedKeys); } } if ((num % batchSize) > 0) { executeBatchInsert(resultIdList, namedSQL, stmt, autoGeneratedKeyExtractor, autoGeneratedKeys); } } if ((conn == null) && (len > batchSize) && autoCommit == true) { localConn.commit(); } } catch (SQLException e) { if ((conn == null) && (len > batchSize) && autoCommit == true) { if (logger.isWarnEnabled()) { logger.warn("Trying to roll back ..."); } try { localConn.rollback(); if (logger.isWarnEnabled()) { logger.warn("succeeded to roll back"); } } catch (SQLException e1) { logger.error("Failed to roll back", e1); } } String msg = AbacusException.getErrorMsg(e) + ". [SQL] " + namedSQL.getNamedSQL(); throw new UncheckedSQLException(msg, e); } finally { if ((conn == null) && (len > batchSize)) { try { localConn.setAutoCommit(autoCommit); localConn.setTransactionIsolation(originalIsolationLevel); } catch (SQLException e) { logger.error("Failed to reset AutoCommit", e); } } close(stmt); close(localConn, conn, ds); } if (isEntityOrMapParameter(namedSQL, parametersList.get(0))) { if (N.notNullOrEmpty(resultIdList)) { if (resultIdList.size() == len && ClassUtil.isEntity(parametersList.get(0).getClass())) { for (int i = 0; i < len; i++) { setIds(resultIdList.get(i), parametersList.get(i), namedSQL); } } else { if (logger.isWarnEnabled()) { logger.warn( "Failed to set the returned id property to entity/map. because the size of returned key not equals the lenght of the input arrray"); } } } if (N.firstNonNull(parametersList).orNull() instanceof DirtyMarker) { for (Object parameters : parametersList) { ((DirtyMarker) parameters).dirtyPropNames().clear(); } } } return resultIdList; } private void setIsolationLevel(JdbcSettings jdbcSettings, Connection localConn) throws SQLException { final int isolationLevel = jdbcSettings.getIsolationLevel() == null || jdbcSettings.getIsolationLevel() == IsolationLevel.DEFAULT ? _defaultIsolationLevel.intValue() : jdbcSettings.getIsolationLevel().intValue(); if (isolationLevel == localConn.getTransactionIsolation()) { // ignore. } else { localConn.setTransactionIsolation(isolationLevel); } } protected <ID> void executeBatchInsert(final List<ID> resultIdList, final NamedSQL namedSQL, final PreparedStatement stmt, final JdbcUtil.BiRowMapper<ID> autoGeneratedKeyExtractor, final boolean autoGeneratedKeys) throws SQLException { if (_isReadOnly) { throw new AbacusException("This SQL Executor is configured for read-only"); } stmt.executeBatch(); if (autoGeneratedKeys) { ResultSet rs = null; try { rs = stmt.getGeneratedKeys(); final List<String> columnLabels = JdbcUtil.getColumnLabelList(rs); while (rs.next()) { resultIdList.add(autoGeneratedKeyExtractor.apply(rs, columnLabels)); } } catch (SQLException e) { logger.error("Failed to retrieve the auto-generated Ids", e); } finally { close(rs); } } stmt.clearBatch(); } @SafeVarargs public final int update(final String sql, final Object... parameters) throws UncheckedSQLException { return update(sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final int update(final String sql, final StatementSetter statementSetter, final Object... parameters) throws UncheckedSQLException { return update(sql, statementSetter, null, parameters); } @SafeVarargs public final int update(final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { return update(sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } @SafeVarargs public final int update(final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { return update(null, sql, statementSetter, jdbcSettings, parameters); } @SafeVarargs public final int update(final Connection conn, final String sql, final Object... parameters) throws UncheckedSQLException { return update(conn, sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final int update(final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) throws UncheckedSQLException { return update(conn, sql, statementSetter, null, parameters); } @SafeVarargs public final int update(final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { return update(conn, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * @see #batchUpdate(Connection, String, StatementSetter, JdbcSettings, Object[]) */ @SuppressWarnings("deprecation") @SafeVarargs public final int update(final Connection conn, final String sql, StatementSetter statementSetter, JdbcSettings jdbcSettings, final Object... parameters) throws UncheckedSQLException { final NamedSQL namedSQL = getNamedSQL(sql); statementSetter = checkStatementSetter(namedSQL, statementSetter); jdbcSettings = checkJdbcSettings(jdbcSettings, namedSQL, _sqlMapper.getAttrs(sql)); DataSource ds = null; Connection localConn = null; PreparedStatement stmt = null; try { ds = getDataSource(namedSQL.getParameterizedSQL(), parameters, jdbcSettings); localConn = getConnection(conn, ds, jdbcSettings, SQLOperation.UPDATE); stmt = prepareStatement(ds, localConn, namedSQL, statementSetter, jdbcSettings, false, false, parameters); final int result = executeUpdate(namedSQL, stmt); if (isEntityOrMapParameter(namedSQL, parameters)) { if (parameters[0] instanceof DirtyMarker) { ((DirtyMarker) parameters[0]).markDirty(namedSQL.getNamedParameters(), false); } } return result; } catch (SQLException e) { String msg = AbacusException.getErrorMsg(e) + ". [SQL] " + namedSQL.getNamedSQL(); throw new UncheckedSQLException(msg, e); } finally { close(stmt); close(localConn, conn, ds); } } protected int executeUpdate(final NamedSQL namedSQL, final PreparedStatement stmt) throws SQLException { if (_isReadOnly) { throw new AbacusException("This SQL Executor is configured for read-only"); } return stmt.executeUpdate(); } public int batchUpdate(final String sql, final List<?> parametersList) throws UncheckedSQLException { return batchUpdate(sql, StatementSetter.DEFAULT, parametersList); } public int batchUpdate(final String sql, final StatementSetter statementSetter, final List<?> parametersList) throws UncheckedSQLException { return batchUpdate(sql, statementSetter, null, parametersList); } public int batchUpdate(final String sql, final JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { return batchUpdate(sql, StatementSetter.DEFAULT, jdbcSettings, parametersList); } public int batchUpdate(final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { return batchUpdate(null, sql, statementSetter, jdbcSettings, parametersList); } public int batchUpdate(final Connection conn, final String sql, final List<?> parametersList) throws UncheckedSQLException { return batchUpdate(conn, sql, StatementSetter.DEFAULT, parametersList); } public int batchUpdate(final Connection conn, final String sql, final StatementSetter statementSetter, final List<?> parametersList) throws UncheckedSQLException { return batchUpdate(conn, sql, statementSetter, null, parametersList); } public int batchUpdate(final Connection conn, final String sql, final JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { return batchUpdate(conn, sql, StatementSetter.DEFAULT, jdbcSettings, parametersList); } /** * @see #batchUpdate(Connection, String, StatementSetter, JdbcSettings, Object[]) */ @SuppressWarnings("deprecation") public int batchUpdate(final Connection conn, final String sql, StatementSetter statementSetter, JdbcSettings jdbcSettings, final List<?> parametersList) throws UncheckedSQLException { final NamedSQL namedSQL = getNamedSQL(sql); statementSetter = checkStatementSetter(namedSQL, statementSetter); jdbcSettings = checkJdbcSettings(jdbcSettings, namedSQL, _sqlMapper.getAttrs(sql)); final int len = parametersList.size(); final int batchSize = getBatchSize(jdbcSettings); DataSource ds = null; Connection localConn = null; PreparedStatement stmt = null; int originalIsolationLevel = 0; boolean autoCommit = true; try { ds = getDataSource(namedSQL.getParameterizedSQL(), parametersList, jdbcSettings); localConn = getConnection(conn, ds, jdbcSettings, SQLOperation.UPDATE); try { originalIsolationLevel = localConn.getTransactionIsolation(); autoCommit = localConn.getAutoCommit(); } catch (SQLException e) { close(localConn, conn, ds); throw new UncheckedSQLException(e); } if ((conn == null) && (len > batchSize)) { localConn.setAutoCommit(false); setIsolationLevel(jdbcSettings, localConn); } stmt = prepareStatement(ds, localConn, namedSQL, statementSetter, jdbcSettings, false, true, parametersList); int result = 0; if (len <= batchSize) { for (int i = 0; i < len; i++) { statementSetter.setParameters(namedSQL, stmt, N.asArray(parametersList.get(i))); stmt.addBatch(); } result += executeBatchUpdate(namedSQL, stmt); } else { int num = 0; for (int i = 0; i < len; i++) { statementSetter.setParameters(namedSQL, stmt, N.asArray(parametersList.get(i))); stmt.addBatch(); num++; if ((num % batchSize) == 0) { result += executeBatchUpdate(namedSQL, stmt); } } if ((num % batchSize) > 0) { result += executeBatchUpdate(namedSQL, stmt); } } if ((conn == null) && (len > batchSize) && autoCommit == true) { localConn.commit(); } if (N.firstNonNull(parametersList).orNull() instanceof DirtyMarker) { for (Object parameters : parametersList) { ((DirtyMarker) parameters).markDirty(namedSQL.getNamedParameters(), false); } } return result; } catch (SQLException e) { if ((conn == null) && (len > batchSize) && autoCommit == true) { if (logger.isWarnEnabled()) { logger.warn("Trying to roll back ..."); } try { localConn.rollback(); if (logger.isWarnEnabled()) { logger.warn("succeeded to roll back"); } } catch (SQLException e1) { logger.error("Failed to roll back", e1); } } String msg = AbacusException.getErrorMsg(e) + ". [SQL] " + namedSQL.getNamedSQL(); throw new UncheckedSQLException(msg, e); } finally { if ((conn == null) && (len > batchSize)) { try { localConn.setAutoCommit(autoCommit); localConn.setTransactionIsolation(originalIsolationLevel); } catch (SQLException e) { logger.error("Failed to reset AutoCommit", e); } } close(stmt); close(localConn, conn, ds); } } protected int executeBatchUpdate(final NamedSQL namedSQL, final PreparedStatement stmt) throws SQLException { if (_isReadOnly) { throw new AbacusException("This SQL Executor is configured for read-only"); } final int[] results = stmt.executeBatch(); stmt.clearBatch(); if ((results == null) || (results.length == 0)) { return 0; } int sum = 0; for (int i = 0; i < results.length; i++) { sum += results[i]; } return sum; } // // mess up. To uncomment this method, also need to modify getNamingPolicy/setNamingPolicy in JdbcSettings. // int update(final EntityId entityId, final Map<String, Object> props) { // return update(null, entityId, props); // } // // // mess up. To uncomment this method, also need to modify getNamingPolicy/setNamingPolicy in JdbcSettings. // int update(final Connection conn, final EntityId entityId, final Map<String, Object> props) { // final Pair2 pair = generateUpdateSQL(entityId, props); // // return update(conn, sp.sql, sp.parameters); // } // // private Pair2 generateUpdateSQL(final EntityId entityId, final Map<String, Object> props) { // final Condition cond = EntityManagerUtil.entityId2Condition(entityId); // final NamingPolicy namingPolicy = _jdbcSettings.getNamingPolicy(); // // if (namingPolicy == null) { // return NE.update(entityId.entityName()).set(props).where(cond).pair(); // } // // switch (namingPolicy) { // case LOWER_CASE_WITH_UNDERSCORE: { // return NE.update(entityId.entityName()).set(props).where(cond).pair(); // } // // case UPPER_CASE_WITH_UNDERSCORE: { // return NE2.update(entityId.entityName()).set(props).where(cond).pair(); // } // // case CAMEL_CASE: { // return NE3.update(entityId.entityName()).set(props).where(cond).pair(); // } // // default: // throw new IllegalArgumentException("Unsupported naming policy"); // } // } // // // mess up. To uncomment this method, also need to modify getNamingPolicy/setNamingPolicy in JdbcSettings. // int delete(final EntityId entityId) { // return delete(null, entityId); // } // // // mess up. To uncomment this method, also need to modify getNamingPolicy/setNamingPolicy in JdbcSettings. // int delete(final Connection conn, final EntityId entityId) { // final Pair2 pair = generateDeleteSQL(entityId); // // return update(conn, sp.sql, sp.parameters); // } // // private Pair2 generateDeleteSQL(final EntityId entityId) { // final Condition cond = EntityManagerUtil.entityId2Condition(entityId); // final NamingPolicy namingPolicy = _jdbcSettings.getNamingPolicy(); // // if (namingPolicy == null) { // return NE.deleteFrom(entityId.entityName()).where(cond).pair(); // } // // switch (namingPolicy) { // case LOWER_CASE_WITH_UNDERSCORE: { // return NE.deleteFrom(entityId.entityName()).where(cond).pair(); // } // // case UPPER_CASE_WITH_UNDERSCORE: { // return NE2.deleteFrom(entityId.entityName()).where(cond).pair(); // } // // case CAMEL_CASE: { // return NE3.deleteFrom(entityId.entityName()).where(cond).pair(); // } // // default: // throw new IllegalArgumentException("Unsupported naming policy"); // } // } // // // mess up. To uncomment this method, also need to modify getNamingPolicy/setNamingPolicy in JdbcSettings. // boolean exists(final EntityId entityId) { // return exists(null, entityId); // } // // // mess up. To uncomment this method, also need to modify getNamingPolicy/setNamingPolicy in JdbcSettings. // boolean exists(final Connection conn, final EntityId entityId) { // final Pair2 pair = generateQuerySQL(entityId, NE._1_list); // // return query(conn, sp.sql, StatementSetter.DEFAULT, EXISTS_RESULT_SET_EXTRACTOR, null, sp.parameters); // } @SafeVarargs public final boolean exists(final String sql, final Object... parameters) { return exists(null, sql, parameters); } @SafeVarargs public final boolean exists(final Connection conn, final String sql, final Object... parameters) { return query(conn, sql, StatementSetter.DEFAULT, EXISTS_RESULT_SET_EXTRACTOR, null, parameters); } /** * * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @deprecated may be misused and it's inefficient. */ @Deprecated @SafeVarargs public final int count(final String sql, final Object... parameters) { return count(null, sql, parameters); } /** * * @param conn * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @deprecated may be misused and it's inefficient. */ @Deprecated @SafeVarargs public final int count(final Connection conn, final String sql, final Object... parameters) { return query(conn, sql, StatementSetter.DEFAULT, COUNT_RESULT_SET_EXTRACTOR, null, parameters); } /** * * @param targetClass * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Class<T> targetClass, final String sql, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(targetClass, sql, parameters)); } /** * * @param targetClass * @param sql * @param statementSetter * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(targetClass, sql, statementSetter, parameters)); } /** * * @param targetClass * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Class<T> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(targetClass, sql, jdbcSettings, parameters)); } /** * * @param targetClass * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(targetClass, sql, statementSetter, jdbcSettings, parameters)); } /** * * @param targetClass * @param conn * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Class<T> targetClass, final Connection conn, final String sql, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(targetClass, conn, sql, parameters)); } /** * * @param targetClass * @param conn * @param sql * @param statementSetter * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Class<T> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(targetClass, conn, sql, statementSetter, parameters)); } /** * * @param targetClass * @param conn * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Class<T> targetClass, final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(targetClass, conn, sql, jdbcSettings, parameters)); } /** * * @param targetClass * @param conn * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Class<T> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(targetClass, conn, sql, statementSetter, jdbcSettings, parameters)); } /** * v * @param sql * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final String sql, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(sql, rowMapper, parameters)); } /** * * @param sql * @param statementSetter * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ public final <T> Optional<T> get(final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(sql, statementSetter, rowMapper, parameters)); } /** * * @param sql * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ public final <T> Optional<T> get(final String sql, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(sql, rowMapper, jdbcSettings, parameters)); } /** * * @param sql * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(sql, statementSetter, rowMapper, jdbcSettings, parameters)); } /** * * @param conn * @param sql * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Connection conn, final String sql, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(conn, sql, rowMapper, parameters)); } /** * * @param conn * @param sql * @param statementSetter * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) { return Optional.ofNullable(gett(conn, sql, statementSetter, rowMapper, parameters)); } /** * * @param conn * @param sql * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Connection conn, final String sql, final JdbcUtil.RowMapper<T> rowMapper, JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(conn, sql, rowMapper, jdbcSettings, parameters)); } /** * * @param conn * @param sql * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> Optional<T> get(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return Optional.ofNullable(gett(conn, sql, statementSetter, rowMapper, jdbcSettings, parameters)); } /** * * @param targetClass * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final Class<T> targetClass, final String sql, final Object... parameters) throws DuplicatedResultException { return gett(targetClass, sql, StatementSetter.DEFAULT, parameters); } /** * * @param targetClass * @param sql * @param statementSetter * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final Object... parameters) throws DuplicatedResultException { return gett(targetClass, sql, statementSetter, null, parameters); } /** * * @param targetClass * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final Class<T> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return gett(targetClass, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * * @param targetClass * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return gett(targetClass, null, sql, statementSetter, jdbcSettings, parameters); } /** * * @param targetClass * @param conn * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final Class<T> targetClass, final Connection conn, final String sql, final Object... parameters) throws DuplicatedResultException { return gett(targetClass, conn, sql, StatementSetter.DEFAULT, parameters); } /** * * @param targetClass * @param conn * @param sql * @param statementSetter * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final Class<T> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) throws DuplicatedResultException { return gett(targetClass, conn, sql, statementSetter, null, parameters); } /** * * @param targetClass * @param conn * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final Class<T> targetClass, final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return gett(targetClass, conn, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * * @param targetClass * @param conn * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SuppressWarnings("unchecked") @SafeVarargs public final <T> T gett(final Class<T> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { N.checkArgNotNull(targetClass, "targetClass"); final JdbcUtil.RowMapper<T> rowMapper = new JdbcUtil.RowMapper<T>() { private final BiRowMapper<T> biRowMapper = BiRowMapper.to(targetClass); @Override public T apply(ResultSet rs) throws SQLException { return biRowMapper.apply(rs, JdbcUtil.getColumnLabelList(rs)); } }; return gett(conn, sql, statementSetter, rowMapper, jdbcSettings, parameters); } /** * * @param sql * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final String sql, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) throws DuplicatedResultException { return gett(sql, StatementSetter.DEFAULT, rowMapper, parameters); } /** * * @param sql * @param statementSetter * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) throws DuplicatedResultException { return gett(sql, statementSetter, rowMapper, null, parameters); } /** * * @param sql * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final String sql, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return gett(sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } /** * * @param sql * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return gett(null, sql, statementSetter, rowMapper, jdbcSettings, parameters); } /** * * @param conn * @param sql * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <T> T gett(final Connection conn, final String sql, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) throws DuplicatedResultException { return gett(conn, sql, StatementSetter.DEFAULT, rowMapper, parameters); } /** * * @param conn * @param sql * @param statementSetter * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ public final <T> T gett(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) throws DuplicatedResultException { return gett(conn, sql, statementSetter, rowMapper, null, parameters); } /** * * @param conn * @param sql * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ public final <T> T gett(final Connection conn, final String sql, final JdbcUtil.RowMapper<T> rowMapper, JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return gett(conn, sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } /** *v * @param conn * @param sql * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SuppressWarnings("unchecked") @SafeVarargs public final <T> T gett(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { N.checkArgNotNull(rowMapper, "rowMapper"); final ResultExtractor<T> resultExtractor = new ResultExtractor<T>() { @Override public T extractData(ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { int offset = jdbcSettings.getOffset(); if (offset > 0) { JdbcUtil.skip(rs, offset); } T result = null; if (rs.next()) { result = Objects.requireNonNull(rowMapper.apply(rs)); if (rs.next()) { throw new DuplicatedResultException("More than one records found by sql: " + sql); } } return result; } }; return query(conn, sql, statementSetter, resultExtractor, jdbcSettings, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final Class<T> targetClass, final String sql, final Object... parameters) { return findFirst(targetClass, sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final Object... parameters) { return findFirst(targetClass, sql, statementSetter, null, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final Class<T> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return findFirst(targetClass, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return findFirst(targetClass, null, sql, statementSetter, jdbcSettings, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final Class<T> targetClass, final Connection conn, final String sql, final Object... parameters) { return findFirst(targetClass, conn, sql, StatementSetter.DEFAULT, parameters); } public final <T> Optional<T> findFirst(final Class<T> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) { return findFirst(targetClass, conn, sql, statementSetter, null, parameters); } public final <T> Optional<T> findFirst(final Class<T> targetClass, final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return findFirst(targetClass, conn, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * Just fetch the result in the 1st row. {@code null} is returned if no result is found. This method will try to * convert the column value to the type of mapping entity property if the mapping entity property is not assignable * from column value. * * Remember to add {@code limit} condition if big result will be returned by the query. * * @param targetClass * @param conn * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SuppressWarnings("unchecked") @SafeVarargs public final <T> Optional<T> findFirst(final Class<T> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { N.checkArgNotNull(targetClass, "targetClass"); final JdbcUtil.RowMapper<T> rowMapper = new JdbcUtil.RowMapper<T>() { private final BiRowMapper<T> biRowMapper = BiRowMapper.to(targetClass); @Override public T apply(ResultSet rs) throws SQLException { return biRowMapper.apply(rs, JdbcUtil.getColumnLabelList(rs)); } }; return findFirst(conn, sql, statementSetter, rowMapper, jdbcSettings, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final String sql, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) { return findFirst(sql, StatementSetter.DEFAULT, rowMapper, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) { return findFirst(sql, statementSetter, rowMapper, null, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final String sql, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return findFirst(sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return findFirst(null, sql, statementSetter, rowMapper, jdbcSettings, parameters); } @SafeVarargs public final <T> Optional<T> findFirst(final Connection conn, final String sql, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) { return findFirst(conn, sql, StatementSetter.DEFAULT, rowMapper, parameters); } public final <T> Optional<T> findFirst(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final Object... parameters) { return findFirst(conn, sql, statementSetter, rowMapper, null, parameters); } public final <T> Optional<T> findFirst(final Connection conn, final String sql, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return findFirst(conn, sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } /** * * Remember to add {@code limit} condition if big result will be returned by the query. * * @param conn * @param sql * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SuppressWarnings("unchecked") @SafeVarargs public final <T> Optional<T> findFirst(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcUtil.RowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { N.checkArgNotNull(rowMapper, "rowMapper"); final ResultExtractor<T> resultExtractor = new ResultExtractor<T>() { @Override public T extractData(ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { int offset = jdbcSettings.getOffset(); if (offset > 0) { JdbcUtil.skip(rs, offset); } return rs.next() ? Objects.requireNonNull(rowMapper.apply(rs)) : null; } }; return Optional.ofNullable(query(conn, sql, statementSetter, resultExtractor, jdbcSettings, parameters)); } @SafeVarargs public final <T> List<T> list(final Class<T> targetClass, final String sql, final Object... parameters) { return list(targetClass, sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final <T> List<T> list(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final Object... parameters) { return list(targetClass, sql, statementSetter, null, parameters); } @SafeVarargs public final <T> List<T> list(final Class<T> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return list(targetClass, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } @SafeVarargs public final <T> List<T> list(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return list(targetClass, null, sql, statementSetter, jdbcSettings, parameters); } @SafeVarargs public final <T> List<T> list(final Class<T> targetClass, final Connection conn, final String sql, final Object... parameters) { return list(targetClass, conn, sql, StatementSetter.DEFAULT, parameters); } public final <T> List<T> list(final Class<T> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) { return list(targetClass, conn, sql, statementSetter, null, parameters); } public final <T> List<T> list(final Class<T> targetClass, final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return list(targetClass, conn, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * * @param targetClass * @param conn * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SuppressWarnings("unchecked") @SafeVarargs public final <T> List<T> list(final Class<T> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return list(conn, sql, statementSetter, BiRowMapper.to(targetClass), jdbcSettings, parameters); } @SafeVarargs public final <T> List<T> list(final String sql, final JdbcUtil.BiRowMapper<T> rowMapper, final Object... parameters) { return list(sql, StatementSetter.DEFAULT, rowMapper, parameters); } @SafeVarargs public final <T> List<T> list(final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final Object... parameters) { return list(sql, statementSetter, rowMapper, null, parameters); } @SafeVarargs public final <T> List<T> list(final String sql, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return list(sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } @SafeVarargs public final <T> List<T> list(final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return list(null, sql, statementSetter, rowMapper, jdbcSettings, parameters); } @SafeVarargs public final <T> List<T> list(final Connection conn, final String sql, final JdbcUtil.BiRowMapper<T> rowMapper, final Object... parameters) { return list(conn, sql, StatementSetter.DEFAULT, rowMapper, parameters); } public final <T> List<T> list(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final Object... parameters) { return list(conn, sql, statementSetter, rowMapper, null, parameters); } public final <T> List<T> list(final Connection conn, final String sql, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return list(conn, sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } /** * * @param conn * @param sql * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SuppressWarnings("unchecked") @SafeVarargs public final <T> List<T> list(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { N.checkArgNotNull(rowMapper); final ResultExtractor<List<T>> resultExtractor = new ResultExtractor<List<T>>() { @Override public List<T> extractData(ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { int offset = jdbcSettings.getOffset(); int count = jdbcSettings.getCount(); if (offset > 0) { JdbcUtil.skip(rs, offset); } final List<T> result = new ArrayList<>(N.min(count, 16)); final List<String> columnLabels = JdbcUtil.getColumnLabelList(rs); while (count-- > 0 && rs.next()) { result.add(rowMapper.apply(rs, columnLabels)); } return result; } }; return query(conn, sql, statementSetter, resultExtractor, jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param targetClass * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> List<T> listAll(final Class<T> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return listAll(targetClass, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param targetClass * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> List<T> listAll(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return listAll(sql, statementSetter, BiRowMapper.to(targetClass), jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param targetClass * @param sqls * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> List<T> listAll(final Class<T> targetClass, final List<String> sqls, final JdbcSettings jdbcSettings, final Object... parameters) { return listAll(targetClass, sqls, null, jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param targetClass * @param sqls * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> List<T> listAll(final Class<T> targetClass, final List<String> sqls, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return listAll(sqls, statementSetter, BiRowMapper.to(targetClass), jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sql * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> List<T> listAll(final String sql, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return listAll(sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sql * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> List<T> listAll(final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { checkJdbcSettingsForAllQuery(jdbcSettings); if (jdbcSettings == null || N.isNullOrEmpty(jdbcSettings.getQueryWithDataSources())) { return list(sql, statementSetter, rowMapper, jdbcSettings, parameters); } final Collection<String> dss = jdbcSettings.getQueryWithDataSources(); List<List<T>> resultList = null; if (jdbcSettings.isQueryInParallel()) { resultList = Stream.of(dss).map(new Function<String, JdbcSettings>() { @Override public JdbcSettings apply(String ds) { final JdbcSettings newJdbcSettings = jdbcSettings.copy(); newJdbcSettings.setQueryWithDataSources(null); newJdbcSettings.setQueryWithDataSource(ds); return newJdbcSettings; } }).parallel(dss.size()).map(new Function<JdbcSettings, List<T>>() { @Override public List<T> apply(JdbcSettings newJdbcSettings) { return list(sql, statementSetter, rowMapper, newJdbcSettings, parameters); } }).toList(); } else { final JdbcSettings newJdbcSettings = jdbcSettings.copy(); newJdbcSettings.setQueryWithDataSources(null); resultList = new ArrayList<>(dss.size()); for (String ds : dss) { newJdbcSettings.setQueryWithDataSource(ds); resultList.add(list(sql, statementSetter, rowMapper, newJdbcSettings, parameters)); } } return N.concat(resultList); } private void checkJdbcSettingsForAllQuery(JdbcSettings jdbcSettings) { if (jdbcSettings != null && (jdbcSettings.getOffset() != 0 || jdbcSettings.getCount() != Integer.MAX_VALUE)) { throw new IllegalArgumentException("Can't set 'offset' or 'count' for findAll/queryAll/streamAll methods"); } } /** * Execute one or more queries in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sqls * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> List<T> listAll(final List<String> sqls, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return listAll(sqls, null, rowMapper, jdbcSettings, parameters); } /** * Execute one or more queries in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sqls * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> List<T> listAll(final List<String> sqls, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { if (sqls.size() == 1) { return listAll(sqls.get(0), statementSetter, rowMapper, jdbcSettings, parameters); } List<List<T>> resultList = null; if (jdbcSettings != null && jdbcSettings.isQueryInParallel()) { resultList = Stream.of(sqls).parallel(sqls.size()).map(new Function<String, List<T>>() { @Override public List<T> apply(String sql) { return listAll(sql, statementSetter, rowMapper, jdbcSettings, parameters); } }).toList(); } else { resultList = new ArrayList<>(sqls.size()); for (String sql : sqls) { resultList.add(listAll(sql, statementSetter, rowMapper, jdbcSettings, parameters)); } } return N.concat(resultList); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final OptionalBoolean queryForBoolean(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_BOOLEAN_EXTRACTOR, null, parameters); } /** * * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final OptionalChar queryForChar(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_CHAR_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final OptionalByte queryForByte(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_BYTE_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final OptionalShort queryForShort(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_SHORT_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final OptionalInt queryForInt(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_INT_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final OptionalLong queryForLong(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_LONG_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final OptionalFloat queryForFloat(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_FLOAT_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final OptionalDouble queryForDouble(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_DOUBLE_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final Nullable<BigDecimal> queryForBigDecimal(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_BIG_DECIMAL_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, Connection, String, Object...). */ @SafeVarargs public final Nullable<String> queryForString(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_STRING_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, String, Object...). */ @SafeVarargs public final Nullable<Date> queryForDate(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_DATE_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, String, Object...). */ @SafeVarargs public final Nullable<Time> queryForTime(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_TIME_EXTRACTOR, null, parameters); } /** * @see SQLExecutor#queryForSingleResult(Class, String, Object...). */ @SafeVarargs public final Nullable<Timestamp> queryForTimestamp(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, SINGLE_TIMESTAMP_EXTRACTOR, null, parameters); } @SafeVarargs public final <V> Nullable<V> queryForSingleResult(final Class<V> targetClass, final String sql, final Object... parameters) { return queryForSingleResult(targetClass, sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final <V> Nullable<V> queryForSingleResult(final Class<V> targetClass, final String sql, final StatementSetter statementSetter, final Object... parameters) { return queryForSingleResult(targetClass, sql, statementSetter, null, parameters); } @SafeVarargs public final <V> Nullable<V> queryForSingleResult(final Class<V> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return queryForSingleResult(targetClass, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } @SafeVarargs public final <V> Nullable<V> queryForSingleResult(final Class<V> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return queryForSingleResult(targetClass, null, sql, statementSetter, jdbcSettings, parameters); } @SafeVarargs public final <V> Nullable<V> queryForSingleResult(final Class<V> targetClass, final Connection conn, final String sql, final Object... parameters) { return queryForSingleResult(targetClass, conn, sql, StatementSetter.DEFAULT, parameters); } public final <V> Nullable<V> queryForSingleResult(final Class<V> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) { return queryForSingleResult(targetClass, conn, sql, statementSetter, null, parameters); } public final <V> Nullable<V> queryForSingleResult(final Class<V> targetClass, final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return queryForSingleResult(targetClass, conn, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * Returns a {@code Nullable} describing the value in the first row/column if it exists, otherwise return an empty {@code Nullable}. * <br /> * * Special note for type conversion for {@code boolean} or {@code Boolean} type: {@code true} is returned if the * {@code String} value of the target column is {@code "true"}, case insensitive. or it's an integer with value > 0. * Otherwise, {@code false} is returned. * * Remember to add {@code limit} condition if big result will be returned by the query. * * @param targetClass * set result type to avoid the NullPointerException if result is null and T is primitive type * "int, long. short ... char, boolean..". * @param conn * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. */ @SuppressWarnings("unchecked") @SafeVarargs public final <V> Nullable<V> queryForSingleResult(final Class<V> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return query(conn, sql, statementSetter, createSingleResultExtractor(targetClass), jdbcSettings, parameters); } private final ObjectPool<Class<?>, ResultExtractor<Nullable<?>>> singleResultExtractorPool = new ObjectPool<>(64); private <V> ResultExtractor<Nullable<V>> createSingleResultExtractor(final Class<V> targetClass) { @SuppressWarnings("rawtypes") ResultExtractor result = singleResultExtractorPool.get(targetClass); if (result == null) { result = new ResultExtractor<Nullable<V>>() { @Override public Nullable<V> extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { return Nullable.of(N.convert(JdbcUtil.getColumnValue(rs, 1), targetClass)); } return Nullable.empty(); } }; singleResultExtractorPool.put(targetClass, result); } return result; } /** * * @param targetClass * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <V> Nullable<V> queryForUniqueResult(final Class<V> targetClass, final String sql, final Object... parameters) throws DuplicatedResultException { return queryForUniqueResult(targetClass, sql, StatementSetter.DEFAULT, parameters); } /** * * @param targetClass * @param sql * @param statementSetter * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <V> Nullable<V> queryForUniqueResult(final Class<V> targetClass, final String sql, final StatementSetter statementSetter, final Object... parameters) throws DuplicatedResultException { return queryForUniqueResult(targetClass, sql, statementSetter, null, parameters); } /** * * @param targetClass * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <V> Nullable<V> queryForUniqueResult(final Class<V> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return queryForUniqueResult(targetClass, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * * @param targetClass * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <V> Nullable<V> queryForUniqueResult(final Class<V> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return queryForUniqueResult(targetClass, null, sql, statementSetter, jdbcSettings, parameters); } /** * * @param targetClass * @param conn * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ @SafeVarargs public final <V> Nullable<V> queryForUniqueResult(final Class<V> targetClass, final Connection conn, final String sql, final Object... parameters) throws DuplicatedResultException { return queryForUniqueResult(targetClass, conn, sql, StatementSetter.DEFAULT, parameters); } /** * * @param targetClass * @param conn * @param sql * @param statementSetter * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ public final <V> Nullable<V> queryForUniqueResult(final Class<V> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) throws DuplicatedResultException { return queryForUniqueResult(targetClass, conn, sql, statementSetter, null, parameters); } /** * * @param targetClass * @param conn * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return * @throws DuplicatedResultException if two or more records are found. */ public final <V> Nullable<V> queryForUniqueResult(final Class<V> targetClass, final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return queryForUniqueResult(targetClass, conn, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * Returns a {@code Nullable} describing the value in the first row/column if it exists, otherwise return an empty {@code Nullable}. * And throws {@code DuplicatedResultException} if more than one record found. * <br /> * * Special note for type conversion for {@code boolean} or {@code Boolean} type: {@code true} is returned if the * {@code String} value of the target column is {@code "true"}, case insensitive. or it's an integer with value > 0. * Otherwise, {@code false} is returned. * * Remember to add {@code limit} condition if big result will be returned by the query. * * @param targetClass * set result type to avoid the NullPointerException if result is null and T is primitive type * "int, long. short ... char, boolean..". * @param conn * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @throws DuplicatedResultException if more than one record found. */ @SuppressWarnings("unchecked") @SafeVarargs public final <V> Nullable<V> queryForUniqueResult(final Class<V> targetClass, final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) throws DuplicatedResultException { return query(conn, sql, statementSetter, createUniqueResultExtractor(targetClass), jdbcSettings, parameters); } private final ObjectPool<Class<?>, ResultExtractor<Nullable<?>>> uniqueResultExtractorPool = new ObjectPool<>(64); private <V> ResultExtractor<Nullable<V>> createUniqueResultExtractor(final Class<V> targetClass) { @SuppressWarnings("rawtypes") ResultExtractor result = uniqueResultExtractorPool.get(targetClass); if (result == null) { result = new ResultExtractor<Nullable<V>>() { @Override public Nullable<V> extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { JdbcUtil.skip(rs, jdbcSettings.getOffset()); if (rs.next()) { final Nullable<V> result = Nullable.of(N.convert(JdbcUtil.getColumnValue(rs, 1), targetClass)); if (result.isPresent() && rs.next()) { throw new DuplicatedResultException("At least two results found: " + Strings.concat(result.get(), ", ", N.convert(JdbcUtil.getColumnValue(rs, 1), targetClass))); } return result; } return Nullable.empty(); } }; uniqueResultExtractorPool.put(targetClass, result); } return result; } @SafeVarargs public final DataSet query(final String sql, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final DataSet query(final String sql, final StatementSetter statementSetter, final Object... parameters) { return query(sql, statementSetter, (JdbcSettings) null, parameters); } @SafeVarargs public final DataSet query(final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } @SafeVarargs public final DataSet query(final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return query(sql, statementSetter, ResultExtractor.DATA_SET, jdbcSettings, parameters); } @SafeVarargs public final <T> T query(final String sql, final ResultExtractor<T> resultExtractor, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, resultExtractor, parameters); } @SafeVarargs public final <T> T query(final String sql, final StatementSetter statementSetter, final ResultExtractor<T> resultExtractor, final Object... parameters) { return query(sql, statementSetter, resultExtractor, null, parameters); } @SafeVarargs public final <T> T query(final String sql, final ResultExtractor<T> resultExtractor, final JdbcSettings jdbcSettings, final Object... parameters) { return query(sql, StatementSetter.DEFAULT, resultExtractor, jdbcSettings, parameters); } /** * Remember to close the <code>ResultSet</code>, <code>Statement</code> and <code>Connection</code> if the return type <code>T</code> is <code>ResultSet</code> or <code>RowIterator</code>. * * If <code>T</code> is <code>RowIterator</code>, call <code>rowIterator.close()</code> to close <code>ResultSet</code>, <code>Statement</code> and <code>Connection</code>. * <br></br> * If <code>T</code> is <code>ResultSet</code>, call below codes to close <code>ResultSet</code>, <code>Statement</code> and <code>Connection</code>. * * <pre> * <code> * Connection conn = null; * Statement stmt = null; * * try { * stmt = rs.getStatement(); * conn = stmt.getConnection(); * } catch (SQLException e) { * // TODO. * } finally { * JdbcUtil.closeQuietly(rs, stmt, conn); * } * </code> * </pre> * * @param sql * @param statementSetter * @param resultExtractor * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> T query(final String sql, final StatementSetter statementSetter, final ResultExtractor<T> resultExtractor, final JdbcSettings jdbcSettings, final Object... parameters) { return query(null, sql, statementSetter, resultExtractor, jdbcSettings, parameters); } @SafeVarargs public final DataSet query(final Connection conn, final String sql, final Object... parameters) { return query(conn, sql, StatementSetter.DEFAULT, parameters); } @SafeVarargs public final DataSet query(final Connection conn, final String sql, final StatementSetter statementSetter, final Object... parameters) { return query(conn, sql, statementSetter, (JdbcSettings) null, parameters); } @SafeVarargs public final DataSet query(final Connection conn, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return query(conn, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } @SafeVarargs public final DataSet query(final Connection conn, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return query(conn, sql, statementSetter, ResultExtractor.DATA_SET, jdbcSettings, parameters); } @SafeVarargs public final <T> T query(final Connection conn, final String sql, final ResultExtractor<T> resultExtractor, final Object... parameters) { return query(conn, sql, StatementSetter.DEFAULT, resultExtractor, parameters); } @SafeVarargs public final <T> T query(final Connection conn, final String sql, final StatementSetter statementSetter, final ResultExtractor<T> resultExtractor, final Object... parameters) { return query(conn, sql, statementSetter, resultExtractor, null, parameters); } @SafeVarargs public final <T> T query(final Connection conn, final String sql, final ResultExtractor<T> resultExtractor, final JdbcSettings jdbcSettings, final Object... parameters) { return query(conn, sql, StatementSetter.DEFAULT, resultExtractor, jdbcSettings, parameters); } /** * Remember to close the <code>ResultSet</code>, <code>Statement</code> and <code>Connection</code> if the return type <code>T</code> is <code>ResultSet</code> or <code>RowIterator</code>. * * If <code>T</code> is <code>RowIterator</code>, call <code>rowIterator.close()</code> to close <code>ResultSet</code>, <code>Statement</code> and <code>Connection</code>. * <br></br> * If <code>T</code> is <code>ResultSet</code>, call below codes to close <code>ResultSet</code>, <code>Statement</code> and <code>Connection</code>. * * <pre> * <code> * Connection conn = null; * Statement stmt = null; * * try { * stmt = rs.getStatement(); * conn = stmt.getConnection(); * } catch (SQLException e) { * // TODO. * } finally { * JdbcUtil.closeQuietly(rs, stmt, conn); * } * </code> * </pre> * * @param conn * @param sql * @param statementSetter * @param resultExtractor * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> T query(final Connection conn, final String sql, final StatementSetter statementSetter, final ResultExtractor<T> resultExtractor, final JdbcSettings jdbcSettings, final Object... parameters) { return query(null, conn, sql, statementSetter, new ResultSetExtractor<T>() { @Override public T extractData(Class<?> targetClass, NamedSQL namedSQL, ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { return resultExtractor.extractData(rs, jdbcSettings); } }, jdbcSettings, parameters); } protected <T> T query(final Class<T> targetClass, final Connection conn, final String sql, StatementSetter statementSetter, ResultSetExtractor<T> resultExtractor, JdbcSettings jdbcSettings, final Object... parameters) { final NamedSQL namedSQL = getNamedSQL(sql); statementSetter = checkStatementSetter(namedSQL, statementSetter); resultExtractor = checkResultSetExtractor(namedSQL, resultExtractor); jdbcSettings = checkJdbcSettings(jdbcSettings, namedSQL, _sqlMapper.getAttrs(sql)); T result = null; DataSource ds = null; Connection localConn = null; PreparedStatement stmt = null; ResultSet rs = null; try { ds = getDataSource(namedSQL.getParameterizedSQL(), parameters, jdbcSettings); localConn = getConnection(conn, ds, jdbcSettings, SQLOperation.SELECT); stmt = prepareStatement(ds, localConn, namedSQL, statementSetter, jdbcSettings, false, false, parameters); if (jdbcSettings == null || jdbcSettings.getFetchDirection() == -1) { stmt.setFetchDirection(ResultSet.FETCH_FORWARD); } rs = stmt.executeQuery(); result = resultExtractor.extractData(targetClass, namedSQL, rs, jdbcSettings); } catch (SQLException e) { String msg = AbacusException.getErrorMsg(e) + ". [SQL] " + namedSQL.getNamedSQL(); throw new UncheckedSQLException(msg, e); } finally { if (result instanceof ResultSet) { if (conn == null) { try { close(rs, stmt); } finally { close(localConn, conn, ds); } throw new UnsupportedOperationException( "The return type of 'ResultSetExtractor' can't be 'ResultSet' when Connection is not specified as input parameter."); } // delay. } else { try { close(rs, stmt); } finally { close(localConn, conn, ds); } } } return result; } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final DataSet queryAll(final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return queryAll(sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final DataSet queryAll(final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { checkJdbcSettingsForAllQuery(jdbcSettings); if (jdbcSettings == null || N.isNullOrEmpty(jdbcSettings.getQueryWithDataSources())) { return query(sql, statementSetter, jdbcSettings, parameters); } final Collection<String> dss = jdbcSettings.getQueryWithDataSources(); if (jdbcSettings.isQueryInParallel()) { final List<DataSet> resultList = Stream.of(dss).map(new Function<String, JdbcSettings>() { @Override public JdbcSettings apply(String ds) { final JdbcSettings newJdbcSettings = jdbcSettings.copy(); newJdbcSettings.setQueryWithDataSources(null); newJdbcSettings.setQueryWithDataSource(ds); return newJdbcSettings; } }).parallel(dss.size()).map(new Function<JdbcSettings, DataSet>() { @Override public DataSet apply(JdbcSettings newJdbcSettings) { return query(sql, statementSetter, newJdbcSettings, parameters); } }).toList(); return DataSetUtil.merge(resultList); } else { final JdbcSettings newJdbcSettings = jdbcSettings.copy(); newJdbcSettings.setQueryWithDataSources(null); final List<DataSet> resultList = new ArrayList<>(dss.size()); for (String ds : dss) { newJdbcSettings.setQueryWithDataSource(ds); resultList.add(query(sql, statementSetter, newJdbcSettings, parameters)); } return DataSetUtil.merge(resultList); } } /** * Execute one or more queries in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sqls * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final DataSet queryAll(final List<String> sqls, final JdbcSettings jdbcSettings, final Object... parameters) { return queryAll(sqls, null, jdbcSettings, parameters); } /** * Execute one or more queries in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sqls * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final DataSet queryAll(final List<String> sqls, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { if (sqls.size() == 1) { return queryAll(sqls.get(0), statementSetter, jdbcSettings, parameters); } if (jdbcSettings != null && jdbcSettings.isQueryInParallel()) { final List<DataSet> resultList = Stream.of(sqls).parallel(sqls.size()).map(new Function<String, DataSet>() { @Override public DataSet apply(String sql) { return queryAll(sql, statementSetter, jdbcSettings, parameters); } }).toList(); return DataSetUtil.merge(resultList); } else { final List<DataSet> resultList = new ArrayList<>(sqls.size()); for (String sql : sqls) { resultList.add(queryAll(sql, statementSetter, jdbcSettings, parameters)); } return DataSetUtil.merge(resultList); } } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param targetClass * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> stream(final Class<T> targetClass, final String sql, final Object... parameters) { return stream(targetClass, sql, StatementSetter.DEFAULT, parameters); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param targetClass * @param sql * @param statementSetter * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> stream(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final Object... parameters) { return stream(targetClass, sql, statementSetter, null, parameters); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param targetClass * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ public final <T> Stream<T> stream(final Class<T> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return stream(targetClass, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> stream(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return stream(sql, statementSetter, BiRowMapper.to(targetClass), jdbcSettings, parameters); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param sql * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> stream(final String sql, final JdbcUtil.BiRowMapper<T> rowMapper, final Object... parameters) { return stream(sql, StatementSetter.DEFAULT, rowMapper, parameters); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param sql * @param statementSetter * @param rowMapper * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> stream(final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final Object... parameters) { return stream(sql, statementSetter, rowMapper, null, parameters); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param sql * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> stream(final String sql, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return stream(sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param sql * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> stream(final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { final ObjIteratorEx<T> lazyIter = ObjIteratorEx.of(new Supplier<ObjIteratorEx<T>>() { private ObjIteratorEx<T> internalIter = null; @Override public ObjIteratorEx<T> get() { if (internalIter == null) { final Connection inputConn = null; final JdbcSettings newJdbcSettings = jdbcSettings == null ? _jdbcSettings.copy() : jdbcSettings.copy(); final int offset = newJdbcSettings.getOffset(); final int count = newJdbcSettings.getCount(); newJdbcSettings.setOffset(0); newJdbcSettings.setCount(Integer.MAX_VALUE); final boolean streamTransactionIndependent = newJdbcSettings.streamTransactionIndependent(); final NamedSQL namedSQL = NamedSQL.parse(sql); final DataSource ds = getDataSource(namedSQL.getParameterizedSQL(), parameters, newJdbcSettings); final Connection localConn = streamTransactionIndependent ? getConnection(ds) : getConnection(inputConn, ds, newJdbcSettings, SQLOperation.SELECT); final ResultSet rs = SQLExecutor.this.query(localConn, sql, statementSetter, RESULT_SET_EXTRACTOR, newJdbcSettings, parameters); internalIter = new ObjIteratorEx<T>() { private boolean skipped = false; private boolean hasNext = false; private int cnt = 0; private List<String> columnLabels = null; @Override public boolean hasNext() { if (skipped == false) { skip(); } if (hasNext == false) { try { if (cnt++ < count && rs.next()) { hasNext = true; } } catch (SQLException e) { throw new UncheckedSQLException(e); } } return hasNext; } @Override public T next() { if (hasNext() == false) { throw new NoSuchElementException(); } try { final T result = rowMapper.apply(rs, columnLabels); hasNext = false; return result; } catch (SQLException e) { throw new UncheckedSQLException(e); } } @Override public void skip(long n) { N.checkArgNotNegative(n, "n"); if (skipped == false) { skip(); } final long m = hasNext ? n - 1 : n; hasNext = false; try { JdbcUtil.skip(rs, Math.min(m, count - cnt)); } catch (SQLException e) { throw new UncheckedSQLException(e); } } @Override public long count() { if (skipped == false) { skip(); } long result = hasNext ? 1 : 0; hasNext = false; try { while (cnt++ < count && rs.next()) { result++; } } catch (SQLException e) { throw new UncheckedSQLException(e); } return result; } @Override public void close() { try { JdbcUtil.closeQuietly(rs, true, false); } finally { if (streamTransactionIndependent) { JdbcUtil.closeQuietly(localConn); } else { SQLExecutor.this.close(localConn, inputConn, ds); } } } private void skip() { if (skipped == false) { skipped = true; try { columnLabels = JdbcUtil.getColumnLabelList(rs); if (offset > 0) { JdbcUtil.skip(rs, offset); } } catch (SQLException e) { throw new UncheckedSQLException(e); } } } }; } return internalIter; } }); return Stream.of(lazyIter).onClose(new Runnable() { @Override public void run() { lazyIter.close(); } }); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param targetClass * @param sql * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> streamAll(final Class<T> targetClass, final String sql, final JdbcSettings jdbcSettings, final Object... parameters) { return streamAll(targetClass, sql, StatementSetter.DEFAULT, jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> streamAll(final Class<T> targetClass, final String sql, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return streamAll(sql, statementSetter, BiRowMapper.to(targetClass), jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sql * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> streamAll(final String sql, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return streamAll(sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, parameters); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sql * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> streamAll(final String sql, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { checkJdbcSettingsForAllQuery(jdbcSettings); if (jdbcSettings == null || N.isNullOrEmpty(jdbcSettings.getQueryWithDataSources())) { return stream(sql, statementSetter, rowMapper, jdbcSettings, parameters); } final Collection<String> dss = jdbcSettings.getQueryWithDataSources(); return Stream.of(dss).map(new Function<String, JdbcSettings>() { @Override public JdbcSettings apply(String ds) { return jdbcSettings.copy().setQueryWithDataSources(null).setQueryWithDataSource(ds); } }).__(new Function<Stream<JdbcSettings>, Stream<JdbcSettings>>() { @Override public Stream<JdbcSettings> apply(Stream<JdbcSettings> s) { return jdbcSettings.isQueryInParallel() ? s.parallel(dss.size()) : s; } }).flatMap(new Function<JdbcSettings, Stream<T>>() { @Override public Stream<T> apply(JdbcSettings newJdbcSettings) { return stream(sql, statementSetter, rowMapper, newJdbcSettings, parameters); } }); } /** * Execute one or more queries in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param targetClass * @param sqls * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> streamAll(final Class<T> targetClass, final List<String> sqls, final JdbcSettings jdbcSettings, final Object... parameters) { return streamAll(targetClass, sqls, null, jdbcSettings, parameters); } /** * Execute one or more queries in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sqls * @param statementSetter * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> streamAll(final Class<T> targetClass, final List<String> sqls, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final Object... parameters) { return streamAll(sqls, statementSetter, BiRowMapper.to(targetClass), jdbcSettings, parameters); } /** * Execute one or more queries in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sqls * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> streamAll(final List<String> sqls, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { return streamAll(sqls, null, rowMapper, jdbcSettings, parameters); } /** * Execute one or more queries in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param sqls * @param statementSetter * @param rowMapper * @param jdbcSettings * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * @return */ @SafeVarargs public final <T> Stream<T> streamAll(final List<String> sqls, final StatementSetter statementSetter, final JdbcUtil.BiRowMapper<T> rowMapper, final JdbcSettings jdbcSettings, final Object... parameters) { if (sqls.size() == 1) { return streamAll(sqls.get(0), statementSetter, rowMapper, jdbcSettings, parameters); } final boolean isQueryInParallel = jdbcSettings != null && jdbcSettings.isQueryInParallel(); return Stream.of(sqls).__(new Function<Stream<String>, Stream<String>>() { @Override public Stream<String> apply(Stream<String> s) { return isQueryInParallel ? s.parallel(sqls.size()) : s; } }).flatMap(new Function<String, Stream<T>>() { @Override public Stream<T> apply(String sql) { return streamAll(sql, statementSetter, rowMapper, jdbcSettings, parameters); } }); } public final void execute(final String sql, final Object... parameters) throws UncheckedSQLException { execute(null, sql, parameters); } /** * Execute the sql with the specified parameters. * * @param conn * @param sql * @param parameters it can be {@code Object[]/List} for (named) parameterized query, or {@code Map<String, Object>/Entity} for named parameterized query. * DO NOT use primitive array {@code boolean[]/char[]/byte[]/short[]/int[]/long[]/float[]/double[]} for passing multiple parameters. * * @see java.sql.PreparedStatement#execute() */ @SafeVarargs public final void execute(final Connection conn, final String sql, final Object... parameters) throws UncheckedSQLException { final NamedSQL namedSQL = getNamedSQL(sql); final StatementSetter statementSetter = checkStatementSetter(namedSQL, null); final JdbcSettings jdbcSettings = checkJdbcSettings(null, namedSQL, _sqlMapper.getAttrs(sql)); final SQLOperation sqlOperation = JdbcUtil.getSQLOperation(namedSQL.getParameterizedSQL()); DataSource ds = null; Connection localConn = null; PreparedStatement stmt = null; try { ds = getDataSource(namedSQL.getParameterizedSQL(), parameters, jdbcSettings); localConn = getConnection(conn, ds, jdbcSettings, sqlOperation); stmt = prepareStatement(ds, localConn, namedSQL, statementSetter, jdbcSettings, false, false, parameters); stmt.execute(); } catch (SQLException e) { String msg = AbacusException.getErrorMsg(e) + ". [SQL] " + namedSQL.getNamedSQL(); throw new UncheckedSQLException(msg, e); } finally { close(stmt); close(localConn, conn, ds); } } /** * Refer to {@code beginTransaction(IsolationLevel, boolean, JdbcSettings)}. * * @return * @see #beginTransaction(IsolationLevel, boolean, JdbcSettings) */ public SQLTransaction beginTransaction() { return beginTransaction(IsolationLevel.DEFAULT); } /** * Refer to {@code beginTransaction(IsolationLevel, boolean, JdbcSettings)}. * * @param isolationLevel * @return * @see #beginTransaction(IsolationLevel, boolean, JdbcSettings) */ public SQLTransaction beginTransaction(final IsolationLevel isolationLevel) { return beginTransaction(isolationLevel, false); } /** * Refer to {@code beginTransaction(IsolationLevel, boolean, JdbcSettings)}. * * @param forUpdateOnly * @return * @see #beginTransaction(IsolationLevel, boolean, JdbcSettings) */ public SQLTransaction beginTransaction(final boolean forUpdateOnly) { return beginTransaction(IsolationLevel.DEFAULT, forUpdateOnly); } /** * Refer to {@code beginTransaction(IsolationLevel, boolean, JdbcSettings)}. * * @param isolationLevel * @param forUpdateOnly * @return * @see #beginTransaction(IsolationLevel, boolean, JdbcSettings) */ public SQLTransaction beginTransaction(IsolationLevel isolationLevel, boolean forUpdateOnly) { return beginTransaction(isolationLevel, forUpdateOnly, null); } /** * If this method is called where a transaction is started by {@code JdbcUtil.beginTransaction} or in {@code Spring} with the same {@code DataSource} in the same thread, * the {@code Connection} started the Transaction will be used here. * That's to say the transaction started by {@code JdbcUtil.beginTransaction} or in {@code Spring} will have the final control on commit/roll back over the {@code Connection}. * <br /> * Otherwise a {@code Connection} directly from the specified {@code DataSource}(Connection pool) will be borrowed and used. * <br /> * Transactions started by {@code SQLExecutor.beginTransaction} won't be shared by {@code JdbcUtil.beginTransaction} or Spring. * * <br /> * <br /> * * The connection opened in the transaction will be automatically closed after the transaction is committed or rolled back. * DON'T close it again by calling the close method. * <br /> * <br /> <<<<<<< .working * The transaction will be shared cross the instances of {@code SQLExecutor} by the methods called with same {@code DataSource} in the same thread. ||||||| .merge-left.r3730 * The transaction will be shared cross the instances of {@code SQLExecutor} by the methods called in the same thread with same {@code DataSource}. ======= * The transaction will be shared cross the instances of {@code SQLExecutor/JdbcUtil.Dao/CrudDao} by the methods called in the same thread with same {@code DataSource}. >>>>>>> .merge-right.r3729 * * <br /> * <br /> * * The general programming way with SQLExeucte is to execute sql scripts(generated by SQLBuilder) with array/list/map/entity by calling (batch)insert/update/delete/query/... methods. * if transaction is required, it can be started: * * <pre> * <code> * final SQLTransaction tran = sqlExecutor.beginTransaction(IsolationLevel.READ_COMMITTED); * try { * // sqlExecutor.insert(...); * // sqlExecutor.update(...); * // sqlExecutor.query(...); * * tran.commit(); * } finally { * // The connection will be automatically closed after the transaction is committed or rolled back. * tran.rollbackIfNotCommitted(); * } * </code> * </pre> * * * @param isolationLevel * @param forUpdateOnly * @param jdbcSettings * @return */ public SQLTransaction beginTransaction(final IsolationLevel isolationLevel, final boolean forUpdateOnly, final JdbcSettings jdbcSettings) { N.checkArgNotNull(isolationLevel, "isolationLevel"); final IsolationLevel isolation = isolationLevel == IsolationLevel.DEFAULT ? _defaultIsolationLevel : isolationLevel; final DataSource ds = jdbcSettings != null && jdbcSettings.getQueryWithDataSource() != null ? getDataSource(N.EMPTY_STRING, N.EMPTY_OBJECT_ARRAY, jdbcSettings) : _ds; SQLTransaction tran = SQLTransaction.getTransaction(ds, CreatedBy.SQL_EXECUTOR); if (tran == null) { tran = SQLTransaction.getTransaction(ds, CreatedBy.JDBC_UTIL); } if (tran == null) { Connection conn = null; boolean noException = false; try { conn = getConnection(ds); tran = new SQLTransaction(ds, conn, isolation, CreatedBy.SQL_EXECUTOR, true); tran.incrementAndGetRef(isolation, forUpdateOnly); noException = true; } catch (SQLException e) { throw new UncheckedSQLException(e); } finally { if (noException == false) { close(conn, ds); } } logger.info("Create a new SQLTransaction(id={})", tran.id()); SQLTransaction.putTransaction(tran); } else { logger.info("Reusing the existing SQLTransaction(id={})", tran.id()); tran.incrementAndGetRef(isolation, forUpdateOnly); } return tran; } public DBSequence getDBSequence(final String tableName, final String seqName) { return new DBSequence(this, tableName, seqName, 0, 1000); } /** * Supports global sequence by db table. * * @param tableName * @param seqName * @param startVal * @param seqBufferSize the numbers to allocate/reserve from database table when cached numbers are used up. * @return */ public DBSequence getDBSequence(final String tableName, final String seqName, final long startVal, final int seqBufferSize) { return new DBSequence(this, tableName, seqName, startVal, seqBufferSize); } /** * Supports global lock by db table * * @param tableName * @return */ public DBLock getDBLock(final String tableName) { return new DBLock(this, tableName); } public boolean doesTableExist(final String tableName) { Connection conn = getConnection(); try { return JdbcUtil.doesTableExist(conn, tableName); } finally { closeConnection(conn); } } /** * Returns {@code true} if succeed to create table, otherwise {@code false} is returned. * * @param tableName * @param schema * @return */ public boolean createTableIfNotExists(final String tableName, final String schema) { Connection conn = getConnection(); try { return JdbcUtil.createTableIfNotExists(conn, tableName, schema); } finally { closeConnection(conn); } } /** * Returns {@code true} if succeed to drop table, otherwise {@code false} is returned. * * @param tableName * @return */ public boolean dropTableIfExists(final String tableName) { Connection conn = getConnection(); try { return JdbcUtil.dropTableIfExists(conn, tableName); } finally { closeConnection(conn); } } /** * * @param tableName * @return */ public List<String> getColumnNameList(final String tableName) { List<String> columnNameList = _tableColumnNamePool.get(tableName); if (columnNameList == null) { Connection conn = getConnection(); try { columnNameList = ImmutableList.of(JdbcUtil.getColumnNameList(conn, tableName)); _tableColumnNamePool.put(tableName, columnNameList); } catch (SQLException e) { throw new UncheckedSQLException(e); } finally { closeConnection(conn); } } return columnNameList; } public Connection getConnection() { return getConnection(_ds); } public void closeConnection(final Connection conn) { close(conn, _ds); } protected DataSource getDataSource(final String sql, final Object[] parameters, final JdbcSettings jdbcSettings) { if (_dsm == null || _dss == null) { if ((jdbcSettings != null) && (jdbcSettings.getQueryWithDataSource() != null || N.notNullOrEmpty(jdbcSettings.getQueryWithDataSources()))) { throw new IllegalArgumentException( "No data source is available with name: " + (jdbcSettings.getQueryWithDataSource() != null ? jdbcSettings.getQueryWithDataSource() : N.toString(jdbcSettings.getQueryWithDataSources()))); } return _ds; } else { if ((jdbcSettings == null) || (jdbcSettings.getQueryWithDataSource() == null)) { return _dss.select(_dsm, null, sql, parameters, null); } else { return _dss.select(_dsm, null, sql, parameters, N.asProps(QUERY_WITH_DATA_SOURCE, jdbcSettings.getQueryWithDataSource())); } } } protected DataSource getDataSource(final String sql, final List<?> parametersList, final JdbcSettings jdbcSettings) { if (_dsm == null || _dss == null) { if ((jdbcSettings != null) && (jdbcSettings.getQueryWithDataSource() != null || N.notNullOrEmpty(jdbcSettings.getQueryWithDataSources()))) { throw new IllegalArgumentException( "No data source is available with name: " + (jdbcSettings.getQueryWithDataSource() != null ? jdbcSettings.getQueryWithDataSource() : N.toString(jdbcSettings.getQueryWithDataSources()))); } return _ds; } else { if ((jdbcSettings == null) || (jdbcSettings.getQueryWithDataSource() == null)) { return _dss.select(_dsm, null, sql, parametersList, null); } else { return _dss.select(_dsm, null, sql, parametersList, N.asProps(QUERY_WITH_DATA_SOURCE, jdbcSettings.getQueryWithDataSource())); } } } protected Connection getConnection(final Connection inputConn, final DataSource ds, final JdbcSettings jdbcSettings, final SQLOperation op) { if (inputConn != null) { return inputConn; } SQLTransaction tran = SQLTransaction.getTransaction(ds, CreatedBy.SQL_EXECUTOR); if (tran == null) { tran = SQLTransaction.getTransaction(ds, CreatedBy.JDBC_UTIL); } if (tran != null && (tran.isForUpdateOnly() == false || op != SQLOperation.SELECT)) { return tran.connection(); } return getConnection(ds); } protected Connection getConnection(final DataSource ds) { return JdbcUtil.getConnection(ds); } protected PreparedStatement prepareStatement(final DataSource ds, final Connection localConn, final NamedSQL namedSQL, final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final boolean autoGeneratedKeys, final boolean isBatch, final Object... parameters) throws SQLException { String sql = namedSQL.getParameterizedSQL(); if (ds instanceof com.landawn.abacus.DataSource) { final com.landawn.abacus.DataSource ds2 = (com.landawn.abacus.DataSource) ds; if (isBatch) { sql = ds2.getSliceSelector().select(null, sql, (List<?>) parameters[0], null); } else { sql = ds2.getSliceSelector().select(null, sql, parameters, null); } } logSQL(sql, jdbcSettings, parameters); final PreparedStatement stmt = prepareStatement(localConn, sql, autoGeneratedKeys, jdbcSettings); setParameters(namedSQL, stmt, statementSetter, isBatch, parameters); return stmt; } protected PreparedStatement prepareStatement(final Connection conn, String sql, final boolean autoGeneratedKeys, final JdbcSettings jdbcSettings) throws SQLException { PreparedStatement stmt = null; if (jdbcSettings == null) { stmt = conn.prepareStatement(sql, autoGeneratedKeys ? Statement.RETURN_GENERATED_KEYS : Statement.NO_GENERATED_KEYS); } else { if (N.notNullOrEmpty(jdbcSettings.getReturnedColumnIndexes())) { // if (jdbcSettings.getReturnedColumnIndexes().length != 1) { // throw new IllegalArgumentException("only 1 generated key is supported At present"); // } stmt = conn.prepareStatement(sql, jdbcSettings.getReturnedColumnIndexes()); } else if (N.notNullOrEmpty(jdbcSettings.getReturnedColumnNames())) { // if (jdbcSettings.getReturnedColumnNames().length != 1) { // throw new IllegalArgumentException("only 1 generated key is supported At present"); // } stmt = conn.prepareStatement(sql, jdbcSettings.getReturnedColumnNames()); } else if (jdbcSettings.isAutoGeneratedKeys() || autoGeneratedKeys) { stmt = conn.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS); } else if ((jdbcSettings.getResultSetType() != -1) || (jdbcSettings.getResultSetConcurrency() != -1) || (jdbcSettings.getResultSetHoldability() != -1)) { int resultSetType = (jdbcSettings.getResultSetType() == -1) ? JdbcSettings.DEFAULT_RESULT_SET_TYPE : jdbcSettings.getResultSetType(); int resultSetConcurrency = (jdbcSettings.getResultSetConcurrency() == -1) ? JdbcSettings.DEFAULT_RESULT_SET_CONCURRENCY : jdbcSettings.getResultSetConcurrency(); if (jdbcSettings.getResultSetHoldability() != -1) { stmt = conn.prepareStatement(sql, resultSetType, resultSetConcurrency, jdbcSettings.getResultSetHoldability()); } else { stmt = conn.prepareStatement(sql, resultSetType, resultSetConcurrency); } } else { stmt = conn.prepareStatement(sql); } if (jdbcSettings.getFetchSize() != -1) { stmt.setFetchSize(jdbcSettings.getFetchSize()); } if (jdbcSettings.getMaxRows() != -1) { stmt.setMaxRows(jdbcSettings.getMaxRows()); } if (jdbcSettings.getMaxFieldSize() != -1) { stmt.setMaxFieldSize(jdbcSettings.getMaxFieldSize()); } if (jdbcSettings.getFetchDirection() != -1) { stmt.setFetchDirection(jdbcSettings.getFetchDirection()); } if (jdbcSettings.getQueryTimeout() != -1) { stmt.setQueryTimeout(jdbcSettings.getQueryTimeout()); } } return stmt; } // protected CallableStatement prepareCallableStatement(final DataSource ds, final Connection localConn, final NamedSQL namedSQL, // final StatementSetter statementSetter, final JdbcSettings jdbcSettings, final boolean autoGeneratedKeys, final boolean isBatch, // final Object... parameters) throws SQLException { // String sql = namedSQL.getPureSQL(); // // if (isBatch) { // sql = ds.getSliceSelector().select(null, sql, (List<?>) parameters[0], null); // } else { // sql = ds.getSliceSelector().select(null, sql, parameters, null); // } // // logSQL(sql, jdbcSettings, parameters); // // final CallableStatement stmt = prepareCallableStatement(localConn, sql, jdbcSettings); // // setParameters(namedSQL, stmt, statementSetter, isBatch, parameters); // // return stmt; // } // // protected CallableStatement prepareCallableStatement(final Connection conn, String sql, final JdbcSettings jdbcSettings) throws SQLException { // CallableStatement stmt = null; // // if (jdbcSettings == null) { // stmt = conn.prepareCall(sql); // } else { // if ((jdbcSettings.getResultSetType() != -1) || (jdbcSettings.getResultSetConcurrency() != -1) || (jdbcSettings.getResultSetHoldability() != -1)) { // int resultSetType = (jdbcSettings.getResultSetType() == -1) ? JdbcSettings.DEFAULT_RESULT_SET_TYPE : jdbcSettings.getResultSetType(); // // int resultSetConcurrency = (jdbcSettings.getResultSetConcurrency() == -1) ? JdbcSettings.DEFAULT_RESULT_SET_CONCURRENCY // : jdbcSettings.getResultSetConcurrency(); // // if (jdbcSettings.getResultSetHoldability() != -1) { // stmt = conn.prepareCall(sql, resultSetType, resultSetConcurrency, jdbcSettings.getResultSetHoldability()); // } else { // stmt = conn.prepareCall(sql, resultSetType, resultSetConcurrency); // } // } else { // stmt = conn.prepareCall(sql); // } // // if (jdbcSettings.getFetchSize() != -1) { // stmt.setFetchSize(jdbcSettings.getFetchSize()); // } // // if (jdbcSettings.getMaxRows() != -1) { // stmt.setMaxRows(jdbcSettings.getMaxRows()); // } // // if (jdbcSettings.getMaxFieldSize() != -1) { // stmt.setMaxFieldSize(jdbcSettings.getMaxFieldSize()); // } // // if (jdbcSettings.getFetchDirection() != -1) { // stmt.setFetchDirection(jdbcSettings.getFetchDirection()); // } // // if (jdbcSettings.getQueryTimeout() != -1) { // stmt.setQueryTimeout(jdbcSettings.getQueryTimeout()); // } // } // return stmt; // } protected void setParameters(final NamedSQL namedSQL, final PreparedStatement stmt, final StatementSetter statementSetter, final boolean isBatch, final Object... parameters) throws SQLException { if (isBatch || N.isNullOrEmpty(parameters)) { // ignore } else { statementSetter.setParameters(namedSQL, stmt, parameters); } } protected void logSQL(String sql, final JdbcSettings jdbcSettings, final Object... parameters) { if ((jdbcSettings != null) && (jdbcSettings.isLogSQL() || jdbcSettings.isLogSQLWithParameters()) && logger.isInfoEnabled()) { if (jdbcSettings.isLogSQLWithParameters()) { logger.info(sql + " {" + StringUtil.join(parameters, ", ") + "}"); } else { logger.info(sql); } } } protected void close(final ResultSet rs) { JdbcUtil.closeQuietly(rs); } protected void close(final PreparedStatement stmt) { JdbcUtil.closeQuietly(stmt); } protected void close(final ResultSet rs, final PreparedStatement stmt) { JdbcUtil.closeQuietly(rs, stmt); } protected void close(final Connection localConn, final Connection inputConn, final DataSource ds) { if (inputConn == null) { SQLTransaction tran = SQLTransaction.getTransaction(ds, CreatedBy.SQL_EXECUTOR); if (tran == null) { tran = SQLTransaction.getTransaction(ds, CreatedBy.JDBC_UTIL); } if (tran != null && tran.connection() == localConn) { // ignore. } else { close(localConn, ds); } } } protected void close(final Connection conn, final DataSource ds) { JdbcUtil.releaseConnection(conn, ds); } /** * Close the underline data source */ @Override public void close() throws IOException { try { if (_ds != null && _ds instanceof com.landawn.abacus.DataSource) { final com.landawn.abacus.DataSource ds = (com.landawn.abacus.DataSource) _ds; if (ds.isClosed() == false) { ds.close(); } } } finally { if (_dsm != null && _dsm.isClosed() == false) { _dsm.close(); } } } protected int getBatchSize(final JdbcSettings jdbcSettings) { return ((jdbcSettings == null) || (jdbcSettings.getBatchSize() < 0)) ? JdbcSettings.DEFAULT_BATCH_SIZE : jdbcSettings.getBatchSize(); } protected StatementSetter checkStatementSetter(final NamedSQL namedSQL, StatementSetter statementSetter) { if (statementSetter == null) { statementSetter = StatementSetter.DEFAULT; } return statementSetter; } @SuppressWarnings("unchecked") protected <T> ResultSetExtractor<T> checkResultSetExtractor(final NamedSQL namedSQL, ResultSetExtractor<T> resultExtractor) { if (resultExtractor == null) { resultExtractor = (ResultSetExtractor<T>) ResultExtractor.DATA_SET; } return resultExtractor; } protected JdbcSettings checkJdbcSettings(final JdbcSettings jdbcSettings, final NamedSQL namedSQL, final Map<String, String> attrs) { JdbcSettings newJdbcSettings = null; if (jdbcSettings == null) { newJdbcSettings = setJdbcSettingsForNamedSQL(_jdbcSettings, namedSQL, attrs); } else { newJdbcSettings = setJdbcSettingsForNamedSQL(jdbcSettings, namedSQL, attrs); } if ((newJdbcSettings.getOffset() < 0) || (newJdbcSettings.getCount() < 0)) { throw new IllegalArgumentException("offset or count can't be less than 0: " + newJdbcSettings.getOffset() + ", " + newJdbcSettings.getCount()); } return newJdbcSettings; } protected JdbcSettings setJdbcSettingsForNamedSQL(JdbcSettings jdbcSettings, final NamedSQL namedSQL, final Map<String, String> attrs) { if ((namedSQL == null) || N.isNullOrEmpty(attrs)) { return jdbcSettings; } else { jdbcSettings = jdbcSettings.copy(); String attr = attrs.get(SQLMapper.BATCH_SIZE); if (attr != null) { jdbcSettings.setBatchSize(N.parseInt(attr)); } attr = attrs.get(SQLMapper.FETCH_SIZE); if (attr != null) { jdbcSettings.setFetchSize(N.parseInt(attr)); } attr = attrs.get(SQLMapper.RESULT_SET_TYPE); if (attr != null) { Integer resultSetType = SQLMapper.RESULT_SET_TYPE_MAP.get(attr); if (resultSetType == null) { throw new IllegalArgumentException("Result set type: '" + attr + "' is not supported"); } jdbcSettings.setResultSetType(resultSetType); } attr = attrs.get(SQLMapper.TIMEOUT); if (attr != null) { jdbcSettings.setQueryTimeout(N.parseInt(attr)); } return jdbcSettings; } } protected NamedSQL getNamedSQL(final String sql) { N.checkArgNotNull(sql, "sql"); NamedSQL namedSQL = null; if (_sqlMapper != null) { namedSQL = _sqlMapper.get(sql); } if (namedSQL == null) { namedSQL = NamedSQL.parse(sql); } return namedSQL; } /** * * @param sql should be prepared sql because it will be cached. * @param rs * @return * @throws SQLException */ protected static List<String> getColumnLabelList(final String sql, final ResultSet rs) throws SQLException { List<String> labelList = N.notNullOrEmpty(sql) ? _sqlColumnLabelPool.get(sql) : null; if (labelList == null) { labelList = ImmutableList.of(JdbcUtil.getColumnLabelList(rs)); if (N.notNullOrEmpty(sql) && sql.length() <= CACHED_SQL_LENGTH) { if (_sqlColumnLabelPool.size() >= SQL_CACHE_SIZE) { final List<String> tmp = new ArrayList<>(_sqlColumnLabelPool.keySet()); Maps.removeKeys(_sqlColumnLabelPool, tmp.subList(0, (int) (tmp.size() * 0.25))); } _sqlColumnLabelPool.put(sql, labelList); } } return labelList; } protected static boolean isEntityOrMapParameter(final NamedSQL namedSQL, final Object... parameters) { if (N.isNullOrEmpty(namedSQL.getNamedParameters())) { return false; } if (N.isNullOrEmpty(parameters) || (parameters.length != 1) || (parameters[0] == null)) { return false; } if (parameters[0] instanceof Map || (ClassUtil.isEntity(parameters[0].getClass()))) { return true; } return false; } /** * * @param <T> * @param <ID> * * @see {@link com.landawn.abacus.annotation.ReadOnly} * @see {@link com.landawn.abacus.annotation.ReadOnlyId} * @see {@link com.landawn.abacus.annotation.NonUpdatable} * @see {@link com.landawn.abacus.annotation.Transient} * @see {@link com.landawn.abacus.annotation.Table} * @see {@link com.landawn.abacus.annotation.Column} * * @see <a href="http://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html">http://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html</a> * @see <a href="http://docs.oracle.com/javase/8/docs/api/java/sql/Statement.html">http://docs.oracle.com/javase/8/docs/api/java/sql/Statement.html</a> * @see <a href="http://docs.oracle.com/javase/8/docs/api/java/sql/PreparedStatement.html">http://docs.oracle.com/javase/8/docs/api/java/sql/PreparedStatement.html</a> * @see <a href="http://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html">http://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html</a> */ public static final class Mapper<T, ID> { static final List<String> EXISTS_SELECT_PROP_NAMES = ImmutableList.of(NSC._1); static final List<String> COUNT_SELECT_PROP_NAMES = ImmutableList.of(NSC.COUNT_ALL); private final Class<T> targetClass; private final Type<T> targetType; private final List<String> propNameList; private final Set<String> propNameSet; private final List<String> defaultSelectPropNameList; private final String idPropName; private final List<String> idPropNameList; private final Set<String> idPropNameSet; private final SQLExecutor sqlExecutor; private final NamingPolicy namingPolicy; private final Condition idCond; private final String sql_exists_by_id; private final String sql_get_by_id; private final String sql_insert_with_id; private final String sql_insert_without_id; private final String sql_update_by_id; private final String sql_delete_by_id; private final AsyncMapper<T, ID> asyncMapper; // TODO cache more sqls to improve performance. Mapper(final Class<T> targetClass, final SQLExecutor sqlExecutor, final NamingPolicy namingPolicy) { this.sqlExecutor = sqlExecutor; this.namingPolicy = namingPolicy; @SuppressWarnings("deprecation") final List<String> idPropNames = ClassUtil.getIdFieldNames(targetClass, false); N.checkArgNotNullOrEmpty(idPropNames, "Target class: " + ClassUtil.getCanonicalClassName(targetClass) + " must at least has one id property annotated by @Id or @ReadOnlyId on field or class"); // N.checkArgument(idPropNames.size() == 1, "Only one id is supported at present. But Entity class {} has {} ids: {}", targetClass, idPropNames.size(), // idPropNames); this.targetClass = targetClass; this.targetType = N.typeOf(targetClass); this.propNameList = ImmutableList.copyOf(ClassUtil.getPropGetMethodList(targetClass).keySet()); this.propNameSet = ImmutableSet.of(N.newLinkedHashSet(ClassUtil.getPropGetMethodList(targetClass).keySet())); this.idPropName = idPropNames.get(0); this.idPropNameList = ImmutableList.copyOf(idPropNames); this.idPropNameSet = ImmutableSet.copyOf(idPropNames); this.defaultSelectPropNameList = ImmutableList.copyOf(SQLBuilder.getSelectPropNamesByClass(targetClass, false, null)); Condition cond = null; if (idPropNameList.size() == 1) { cond = CF.eq(idPropName); } else { final And and = CF.and(); for (String idName : idPropNameList) { and.add(CF.eq(idName)); } cond = and; } this.idCond = cond; this.sql_exists_by_id = this.prepareQuery(SQLBuilder._1_list, idCond).sql; this.sql_get_by_id = this.prepareQuery(defaultSelectPropNameList, idCond).sql; this.sql_insert_with_id = this.prepareInsertSql(SQLBuilder.getInsertPropNamesByClass(targetClass, null)); this.sql_insert_without_id = this.prepareInsertSql(SQLBuilder.getInsertPropNamesByClass(targetClass, idPropNameSet)); this.sql_update_by_id = this.prepareUpdateSql(SQLBuilder.getUpdatePropNamesByClass(targetClass, idPropNameSet)); this.sql_delete_by_id = this.prepareDelete(idCond).sql; this.asyncMapper = new AsyncMapper<T, ID>(this, sqlExecutor._asyncExecutor); } Class<T> targetClass() { return targetClass; } Type<T> targetType() { return targetType; } List<String> idPropNameList() { return idPropNameList; } Set<String> idPropNameSet() { return idPropNameSet; } List<String> propNameList() { return propNameList; } Set<String> propNameSet() { return propNameSet; } public AsyncMapper<T, ID> async() { return asyncMapper; } /** * * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. * @return */ public boolean exists(final ID id) { checkId(id); return sqlExecutor.queryForInt(sql_exists_by_id, id).orElse(0) > 0; } public boolean exists(final Condition whereCause) { return exists(null, whereCause); } public boolean exists(final Connection conn, final ID id) { checkId(id); return sqlExecutor.queryForSingleResult(int.class, conn, sql_exists_by_id, id).orElse(0) > 0; } public boolean exists(final Connection conn, final Condition whereCause) { final SP sp = prepareQuery(EXISTS_SELECT_PROP_NAMES, whereCause, 1); return sqlExecutor.exists(conn, sp.sql, sp.parameters.toArray()); } public int count(final Condition whereCause) { return count(null, whereCause); } public int count(final Connection conn, final Condition whereCause) { final SP sp = prepareQuery(COUNT_SELECT_PROP_NAMES, whereCause); return sqlExecutor.count(conn, sp.sql, sp.parameters.toArray()); } /** * * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. * @return * @throws DuplicatedResultException if two or more records are found. */ public Optional<T> get(final ID id) throws DuplicatedResultException { return Optional.ofNullable(gett(id)); } // /** // * // * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. // * @param selectPropNames // * @return // * @deprecated replaced by {@code get(id, Arrays.asList(selectPropNames)} // */ // @Deprecated // @SafeVarargs // public final Optional<T> get(final ID id, final String... selectPropNames) { // return Optional.ofNullable(gett(id, selectPropNames)); // } /** * * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. * @param selectPropNames * @return * @throws DuplicatedResultException if two or more records are found. */ public Optional<T> get(final ID id, final Collection<String> selectPropNames) throws DuplicatedResultException { return Optional.ofNullable(gett(id, selectPropNames)); } /** * * @param conn * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. * @param selectPropNames * @return * @throws DuplicatedResultException if two or more records are found. */ public Optional<T> get(final Connection conn, final ID id, final Collection<String> selectPropNames) throws DuplicatedResultException { return Optional.ofNullable(gett(conn, id, selectPropNames)); } /** * * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. * @return * @throws DuplicatedResultException if two or more records are found. */ public T gett(final ID id) throws DuplicatedResultException { return gett(id, (Collection<String>) null); } // /** // * // * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. // * @param selectPropNames // * @return // * @deprecated replaced by {@code gett(id, Arrays.asList(selectPropNames)} // */ // @Deprecated // @SafeVarargs // public final T gett(final ID id, final String... selectPropNames) { // return gett(id, Arrays.asList(selectPropNames)); // } /** * * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. * @param selectPropNames * @return * @throws DuplicatedResultException if two or more records are found. */ public T gett(final ID id, final Collection<String> selectPropNames) throws DuplicatedResultException { return gett(null, id, selectPropNames); } /** * * @param conn * @param id which could be {@code Number}/{@code String}... or {@code Entity}/{@code Map} for composed id. * @param selectPropNames * @return * @throws DuplicatedResultException if two or more records are found. */ public T gett(final Connection conn, final ID id, final Collection<String> selectPropNames) throws DuplicatedResultException { checkId(id); if (N.isNullOrEmpty(selectPropNames)) { return sqlExecutor.gett(targetClass, conn, sql_get_by_id, id); } else { final SP sp = prepareQuery(selectPropNames, idCond); return sqlExecutor.gett(targetClass, conn, sp.sql, id); } } /** * * @param ids * @return */ public List<T> batchGet(final List<ID> ids) { return batchGet(ids, (Collection<String>) null); } /** * * @param ids * @param selectPropNames * @return */ public List<T> batchGet(final List<ID> ids, final Collection<String> selectPropNames) { return batchGet(ids, selectPropNames, JdbcSettings.DEFAULT_BATCH_SIZE); } /** * * @param ids * @param selectPropNames * @param batchSize * @return */ public List<T> batchGet(final List<ID> ids, final Collection<String> selectPropNames, final int batchSize) { return batchGet(null, ids, selectPropNames, batchSize); } /** * * @param conn * @param ids * @param selectPropNames * @param batchSize * @return */ public List<T> batchGet(final Connection conn, final List<ID> ids, final Collection<String> selectPropNames, final int batchSize) { N.checkArgPositive(batchSize, "batchSize"); if (N.isNullOrEmpty(ids)) { return new ArrayList<>(); } N.checkArgument(idPropNameList.size() > 1 || !(ids.get(0) instanceof Map || isEntity(ids.get(0))), "Input 'ids' can not be Maps or entities for single id "); final List<T> entities = new ArrayList<>(ids.size()); if (idPropNameList.size() == 1) { String sql = prepareQuery(selectPropNames, idCond).sql; sql = sql.substring(0, sql.lastIndexOf('=')) + "IN "; if (ids.size() >= batchSize) { final Joiner joiner = Joiner.with(", ", "(", ")").reuseCachedBuffer(true); for (int i = 0; i < batchSize; i++) { joiner.append('?'); } String inSQL = sql + joiner.toString(); for (int i = 0, to = ids.size() - batchSize; i <= to; i += batchSize) { entities.addAll(sqlExecutor.list(targetClass, conn, inSQL, null, null, ids.subList(i, i + batchSize).toArray())); } } if (ids.size() % batchSize != 0) { final int remaining = ids.size() % batchSize; final Joiner joiner = Joiner.with(", ", "(", ")").reuseCachedBuffer(true); for (int i = 0; i < remaining; i++) { joiner.append('?'); } String inSQL = sql + joiner.toString(); entities.addAll(sqlExecutor.list(targetClass, conn, inSQL, null, null, ids.subList(ids.size() - remaining, ids.size()).toArray())); } } else { final boolean isMap = ids.get(0) instanceof Map; if (ids.size() >= batchSize) { for (int i = 0, to = ids.size() - batchSize; i <= to; i += batchSize) { if (isMap) { entities.addAll(list(CF.eqAndOr((List<Map<String, ?>>) ids.subList(i, i + batchSize)))); } else { entities.addAll(list(CF.eqAndOr(ids.subList(i, i + batchSize), idPropNameList))); } } } if (ids.size() % batchSize != 0) { final int remaining = ids.size() % batchSize; if (isMap) { entities.addAll(list(CF.eqAndOr((List<Map<String, ?>>) ids.subList(ids.size() - remaining, ids.size())))); } else { entities.addAll(list(CF.eqAndOr(ids.subList(ids.size() - remaining, ids.size()), idPropNameList))); } } } return entities; } public Optional<T> findFirst(final Condition whereCause) { return findFirst((Collection<String>) null, whereCause); } public Optional<T> findFirst(final Collection<String> selectPropNames, final Condition whereCause) { return findFirst(selectPropNames, whereCause, null); } public Optional<T> findFirst(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return findFirst(null, selectPropNames, whereCause, jdbcSettings); } public Optional<T> findFirst(final Connection conn, final Condition whereCause) { return findFirst(conn, null, whereCause); } public Optional<T> findFirst(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause) { return findFirst(conn, selectPropNames, whereCause, null); } public Optional<T> findFirst(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.findFirst(targetClass, conn, sp.sql, StatementSetter.DEFAULT, jdbcSettings, sp.parameters.toArray()); } public <R> Optional<R> findFirst(String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return findFirst(selectPropName, rowMapper, whereCause, null); } public <R> Optional<R> findFirst(String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return findFirst(null, selectPropName, rowMapper, whereCause, jdbcSettings); } public <R> Optional<R> findFirst(final Connection conn, String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return findFirst(conn, selectPropName, rowMapper, whereCause, null); } public <R> Optional<R> findFirst(final Connection conn, String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return findFirst(conn, Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } public <R> Optional<R> findFirst(String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return findFirst(selectPropName, rowMapper, whereCause, null); } public <R> Optional<R> findFirst(String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return findFirst(null, selectPropName, rowMapper, whereCause, jdbcSettings); } public <R> Optional<R> findFirst(final Connection conn, String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return findFirst(conn, selectPropName, rowMapper, whereCause, null); } public <R> Optional<R> findFirst(final Connection conn, String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return findFirst(conn, Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } public <R> Optional<R> findFirst(Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return findFirst(selectPropNames, rowMapper, whereCause, null); } public <R> Optional<R> findFirst(Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return findFirst(null, selectPropNames, rowMapper, whereCause, jdbcSettings); } public <R> Optional<R> findFirst(final Connection conn, Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return findFirst(conn, selectPropNames, rowMapper, whereCause, null); } public <R> Optional<R> findFirst(final Connection conn, Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.findFirst(conn, sp.sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, sp.parameters.toArray()); } public <R> Optional<R> findFirst(Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return findFirst(selectPropNames, rowMapper, whereCause, null); } public <R> Optional<R> findFirst(Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return findFirst(null, selectPropNames, rowMapper, whereCause, jdbcSettings); } public <R> Optional<R> findFirst(final Connection conn, Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return findFirst(conn, selectPropNames, rowMapper, whereCause, null); } public <R> Optional<R> findFirst(final Connection conn, Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); final ResultExtractor<R> resultExtractor = new ResultExtractor<R>() { @Override public R extractData(ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { int offset = jdbcSettings.getOffset(); if (offset > 0) { JdbcUtil.skip(rs, offset); } return rs.next() ? Objects.requireNonNull(rowMapper.apply(rs, JdbcUtil.getColumnLabelList(rs))) : null; } }; return Optional.ofNullable(sqlExecutor.query(conn, sp.sql, StatementSetter.DEFAULT, resultExtractor, jdbcSettings, sp.parameters.toArray())); } public List<T> list(final Condition whereCause) { return list((Collection<String>) null, whereCause); } public List<T> list(final Collection<String> selectPropNames, final Condition whereCause) { return list(selectPropNames, whereCause, null); } public List<T> list(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return list(null, selectPropNames, whereCause, jdbcSettings); } public List<T> list(final Connection conn, final Condition whereCause) { return list(conn, null, whereCause); } public List<T> list(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause) { return list(conn, selectPropNames, whereCause, null); } public List<T> list(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.list(targetClass, conn, sp.sql, StatementSetter.DEFAULT, jdbcSettings, sp.parameters.toArray()); } public <R> List<R> list(String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return list(selectPropName, rowMapper, whereCause, null); } public <R> List<R> list(String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return list(null, selectPropName, rowMapper, whereCause, jdbcSettings); } public <R> List<R> list(final Connection conn, String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return list(conn, selectPropName, rowMapper, whereCause, null); } public <R> List<R> list(final Connection conn, String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return list(conn, Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } public <R> List<R> list(String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return list(selectPropName, rowMapper, whereCause, null); } public <R> List<R> list(String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return list(null, selectPropName, rowMapper, whereCause, jdbcSettings); } public <R> List<R> list(final Connection conn, String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return list(conn, selectPropName, rowMapper, whereCause, null); } public <R> List<R> list(final Connection conn, String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return list(conn, Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } public <R> List<R> list(Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return list(selectPropNames, rowMapper, whereCause, null); } public <R> List<R> list(Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return list(null, selectPropNames, rowMapper, whereCause, jdbcSettings); } public <R> List<R> list(final Connection conn, Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return list(conn, selectPropNames, rowMapper, whereCause, null); } public <R> List<R> list(final Connection conn, Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { N.checkArgNotNull(rowMapper); final JdbcUtil.BiRowMapper<R> biRowMapper = new JdbcUtil.BiRowMapper<R>() { @Override public R apply(final ResultSet rs, final List<String> columnLabels) throws SQLException { return rowMapper.apply(rs); } }; return list(conn, selectPropNames, biRowMapper, whereCause, jdbcSettings); } public <R> List<R> list(Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return list(selectPropNames, rowMapper, whereCause, null); } public <R> List<R> list(Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return list(null, selectPropNames, rowMapper, whereCause, jdbcSettings); } public <R> List<R> list(final Connection conn, Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return list(conn, selectPropNames, rowMapper, whereCause, null); } public <R> List<R> list(final Connection conn, Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.list(conn, sp.sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, sp.parameters.toArray()); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param whereCause * @param jdbcSettings * @return * @see SQLExecutor#listAll(Class, String, StatementSetter, JdbcSettings, Object...) */ public List<T> listAll(final Condition whereCause, final JdbcSettings jdbcSettings) { return listAll(null, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropNames * @param whereCause * @param jdbcSettings * @return * @see SQLExecutor#listAll(Class, String, StatementSetter, JdbcSettings, Object...) */ public List<T> listAll(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.listAll(targetClass, sp.sql, StatementSetter.DEFAULT, jdbcSettings, sp.parameters.toArray()); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropName * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> List<R> listAll(String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return listAll(Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropName * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> List<R> listAll(String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return listAll(Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropNames * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> List<R> listAll(Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { N.checkArgNotNull(rowMapper); final JdbcUtil.BiRowMapper<R> biRowMapper = new JdbcUtil.BiRowMapper<R>() { @Override public R apply(final ResultSet rs, final List<String> columnLabels) throws SQLException { return rowMapper.apply(rs); } }; return listAll(selectPropNames, biRowMapper, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropNames * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> List<R> listAll(Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.listAll(sp.sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, sp.parameters.toArray()); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param whereCause * @return */ public Stream<T> stream(final Condition whereCause) { return stream((Collection<String>) null, whereCause); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropNames * @param whereCause * @return */ public Stream<T> stream(final Collection<String> selectPropNames, final Condition whereCause) { return stream(selectPropNames, whereCause, null); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropNames * @param whereCause * @param jdbcSettings * @return */ public Stream<T> stream(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.stream(targetClass, sp.sql, StatementSetter.DEFAULT, jdbcSettings, sp.parameters.toArray()); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropName * @param rowMapper * @param whereCause * @return */ public <R> Stream<R> stream(String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return stream(selectPropName, rowMapper, whereCause, null); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropName * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> Stream<R> stream(String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return stream(Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropName * @param rowMapper * @param whereCause * @return */ public <R> Stream<R> stream(String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return stream(selectPropName, rowMapper, whereCause, null); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropName * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> Stream<R> stream(String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return stream(Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropNames * @param rowMapper * @param whereCause * @return */ public <R> Stream<R> stream(Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return stream(selectPropNames, rowMapper, whereCause, null); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropNames * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> Stream<R> stream(Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { N.checkArgNotNull(rowMapper); final JdbcUtil.BiRowMapper<R> biRowMapper = new JdbcUtil.BiRowMapper<R>() { @Override public R apply(final ResultSet rs, final List<String> columnLabels) throws SQLException { return rowMapper.apply(rs); } }; return stream(selectPropNames, biRowMapper, whereCause, jdbcSettings); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropNames * @param rowMapper * @param whereCause * @return */ public <R> Stream<R> stream(Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return stream(selectPropNames, rowMapper, whereCause, null); } /** * Lazy execution, lazy fetch. The query execution and record fetching only happen when a terminal operation of the stream is called. * * @param selectPropNames * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> Stream<R> stream(Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.stream(sp.sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, sp.parameters.toArray()); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param whereCause * @param jdbcSettings * @return * @see SQLExecutor#streamAll(Class, String, StatementSetter, JdbcSettings, Object...) */ public Stream<T> streamAll(final Condition whereCause, final JdbcSettings jdbcSettings) { return streamAll(null, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropNames * @param whereCause * @param jdbcSettings * @return * @see SQLExecutor#streamAll(Class, String, StatementSetter, JdbcSettings, Object...) */ public Stream<T> streamAll(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.streamAll(targetClass, sp.sql, StatementSetter.DEFAULT, jdbcSettings, sp.parameters.toArray()); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropName * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> Stream<R> streamAll(String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return streamAll(Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropName * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> Stream<R> streamAll(String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return streamAll(Arrays.asList(selectPropName), rowMapper, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropNames * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> Stream<R> streamAll(Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { N.checkArgNotNull(rowMapper); final JdbcUtil.BiRowMapper<R> biRowMapper = new JdbcUtil.BiRowMapper<R>() { @Override public R apply(final ResultSet rs, final List<String> columnLabels) throws SQLException { return rowMapper.apply(rs); } }; return streamAll(selectPropNames, biRowMapper, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropNames * @param rowMapper * @param whereCause * @param jdbcSettings * @return */ public <R> Stream<R> streamAll(Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.streamAll(sp.sql, StatementSetter.DEFAULT, rowMapper, jdbcSettings, sp.parameters.toArray()); } public DataSet query(final Condition whereCause) { return query((Collection<String>) null, whereCause); } public DataSet query(final Collection<String> selectPropNames, final Condition whereCause) { return query(selectPropNames, whereCause, null); } public DataSet query(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return query(null, selectPropNames, whereCause, jdbcSettings); } public DataSet query(final Connection conn, final Condition whereCause) { return query(conn, null, whereCause); } public DataSet query(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause) { return query(conn, selectPropNames, whereCause, null); } public DataSet query(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.query(conn, sp.sql, StatementSetter.DEFAULT, null, jdbcSettings, sp.parameters.toArray()); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param whereCause * @param jdbcSettings * @return * @see SQLExecutor#queryAll(String, StatementSetter, JdbcSettings, Object...) */ public DataSet queryAll(final Condition whereCause, final JdbcSettings jdbcSettings) { return queryAll(null, whereCause, jdbcSettings); } /** * Execute the query in one or more data sources specified by {@code jdbcSettings} and merge the results. * It's designed for partition. * * @param selectPropNames * @param whereCause * @param jdbcSettings * @return * @see SQLExecutor#queryAll(String, StatementSetter, JdbcSettings, Object...) */ public DataSet queryAll(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(selectPropNames, whereCause); return sqlExecutor.queryAll(sp.sql, StatementSetter.DEFAULT, jdbcSettings, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public OptionalBoolean queryForBoolean(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForBoolean(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public OptionalChar queryForChar(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForChar(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public OptionalByte queryForByte(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForByte(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public OptionalShort queryForShort(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForShort(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public OptionalInt queryForInt(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForInt(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public OptionalLong queryForLong(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForLong(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public OptionalFloat queryForFloat(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForFloat(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public OptionalDouble queryForDouble(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForDouble(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public Nullable<BigDecimal> queryForBigDecimal(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForBigDecimal(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public Nullable<String> queryForString(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForString(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public Nullable<java.sql.Date> queryForDate(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForDate(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public Nullable<java.sql.Time> queryForTime(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForTime(sp.sql, sp.parameters.toArray()); } /** * * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public Nullable<java.sql.Timestamp> queryForTimestamp(final String selectPropName, final Condition whereCause) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForTimestamp(sp.sql, sp.parameters.toArray()); } /** * * @param targetValueClass * @param selectPropName * @param id * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public <V> Nullable<V> queryForSingleResult(final Class<V> targetValueClass, final String selectPropName, final ID id) { return queryForSingleResult(targetValueClass, selectPropName, id2Cond(id, false)); } /** * * @param targetValueClass * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public <V> Nullable<V> queryForSingleResult(final Class<V> targetValueClass, final String selectPropName, final Condition whereCause) { return queryForSingleResult(targetValueClass, selectPropName, whereCause, null); } /** * * @param targetValueClass * @param selectPropName * @param whereCause * @param jdbcSettings * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public <V> Nullable<V> queryForSingleResult(final Class<V> targetValueClass, final String selectPropName, final Condition whereCause, final JdbcSettings jdbcSettings) { return queryForSingleResult(targetValueClass, null, selectPropName, whereCause, jdbcSettings); } /** * * @param targetValueClass * @param conn * @param selectPropName * @param id * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public <V> Nullable<V> queryForSingleResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final ID id) { return queryForSingleResult(targetValueClass, conn, selectPropName, id2Cond(id, false)); } /** * * @param targetValueClass * @param conn * @param selectPropName * @param whereCause * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public <V> Nullable<V> queryForSingleResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final Condition whereCause) { return queryForSingleResult(targetValueClass, conn, selectPropName, whereCause, null); } /** * Returns a {@code Nullable} describing the value in the first row/column if it exists, otherwise return an empty {@code Nullable}. * * @param targetValueClass * @param conn * @param selectPropName * @param whereCause * @param jdbcSettings * @return * @see Mapper#queryForSingleResult(Class, Connection, String, Condition, JdbcSettings) */ public <V> Nullable<V> queryForSingleResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final Condition whereCause, final JdbcSettings jdbcSettings) { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForSingleResult(targetValueClass, conn, sp.sql, StatementSetter.DEFAULT, jdbcSettings, sp.parameters.toArray()); } /** * * @param targetValueClass * @param selectPropName * @param id * @return * @throws DuplicatedResultException if two or more records are found. */ public <V> Nullable<V> queryForUniqueResult(final Class<V> targetValueClass, final String selectPropName, final ID id) throws DuplicatedResultException { return queryForUniqueResult(targetValueClass, selectPropName, id2Cond(id, false)); } /** * * @param targetValueClass * @param selectPropName * @param whereCause * @return * @throws DuplicatedResultException */ public <V> Nullable<V> queryForUniqueResult(final Class<V> targetValueClass, final String selectPropName, final Condition whereCause) throws DuplicatedResultException { return queryForUniqueResult(targetValueClass, selectPropName, whereCause, null); } /** * * @param targetValueClass * @param selectPropName * @param whereCause * @param jdbcSettings * @return * @throws DuplicatedResultException if two or more records are found. */ public <V> Nullable<V> queryForUniqueResult(final Class<V> targetValueClass, final String selectPropName, final Condition whereCause, final JdbcSettings jdbcSettings) throws DuplicatedResultException { return queryForUniqueResult(targetValueClass, null, selectPropName, whereCause, jdbcSettings); } /** * * @param targetValueClass * @param conn * @param selectPropName * @param id * @return * @throws DuplicatedResultException if two or more records are found. */ public <V> Nullable<V> queryForUniqueResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final ID id) throws DuplicatedResultException { return queryForUniqueResult(targetValueClass, conn, selectPropName, id2Cond(id, false)); } /** * * @param targetValueClass * @param conn * @param selectPropName * @param whereCause * @return * @throws DuplicatedResultException if two or more records are found. */ public <V> Nullable<V> queryForUniqueResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final Condition whereCause) throws DuplicatedResultException { return queryForUniqueResult(targetValueClass, conn, selectPropName, whereCause, null); } /** * * @param targetValueClass * @param conn * @param selectPropName * @param whereCause * @param jdbcSettings * @return * @throws DuplicatedResultException if two or more records are found. */ public <V> Nullable<V> queryForUniqueResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final Condition whereCause, final JdbcSettings jdbcSettings) throws DuplicatedResultException { final SP sp = prepareQuery(Arrays.asList(selectPropName), whereCause, 1); return sqlExecutor.queryForUniqueResult(targetValueClass, conn, sp.sql, StatementSetter.DEFAULT, jdbcSettings, sp.parameters.toArray()); } private SP prepareQuery(final Collection<String> selectPropNames, final Condition whereCause) { return prepareQuery(selectPropNames, whereCause, 0); } private SP prepareQuery(Collection<String> selectPropNames, final Condition whereCause, final int count) { if (N.isNullOrEmpty(selectPropNames)) { selectPropNames = defaultSelectPropNameList; } SQLBuilder sqlBuilder = null; switch (namingPolicy) { case LOWER_CASE_WITH_UNDERSCORE: if (N.isNullOrEmpty(selectPropNames)) { sqlBuilder = NSC.selectFrom(targetClass).where(whereCause); } else { sqlBuilder = NSC.select(selectPropNames).from(targetClass).where(whereCause); } break; case UPPER_CASE_WITH_UNDERSCORE: if (N.isNullOrEmpty(selectPropNames)) { sqlBuilder = NAC.selectFrom(targetClass).where(whereCause); } else { sqlBuilder = NAC.select(selectPropNames).from(targetClass).where(whereCause); } break; case LOWER_CAMEL_CASE: if (N.isNullOrEmpty(selectPropNames)) { sqlBuilder = NLC.selectFrom(targetClass).where(whereCause); } else { sqlBuilder = NLC.select(selectPropNames).from(targetClass).where(whereCause); } break; default: throw new RuntimeException("Unsupported naming policy: " + namingPolicy); } if (count > 0 && count < Integer.MAX_VALUE) { switch (sqlExecutor.dbVersion()) { case ORACLE: case SQL_SERVER: // Do nothing because limit is not supported. break; default: sqlBuilder.limit(count); } } return sqlBuilder.pair(); } /** * Insert the specified entity into data store, and set back the auto-generated id to the specified entity if there is the auto-generated id. * * @param entity * @return the auto-generated id or null if there is no auto-generated id. */ public ID insert(final T entity) { return insert(null, entity); } public ID insert(final T entity, final Collection<String> propNamesToInsert) { return insert(null, entity, propNamesToInsert); } /** * * @param props * @return the auto-generated id or null if there is no auto-generated id. */ public ID insert(final Map<String, Object> props) { return insert(null, props); } public ID insert(final Connection conn, final T entity) { return insert(conn, entity, null); } public ID insert(final Connection conn, final T entity, final Collection<String> propNamesToInsert) { N.checkArgNotNull(entity); final String sql = prepareInsertSql(entity, propNamesToInsert); return sqlExecutor.insert(conn, sql, entity); } /** * @param conn * @param props * @return the auto-generated id or null if there is no auto-generated id. */ public ID insert(final Connection conn, final Map<String, Object> props) { N.checkArgNotNull(props); final String sql = prepareInsertSql(props); return sqlExecutor.insert(conn, sql, props); } public List<ID> batchInsert(final Collection<? extends T> entities) { return batchInsert(entities, JdbcSettings.DEFAULT_BATCH_SIZE); } public List<ID> batchInsert(final Collection<? extends T> entities, final int batchSize) { return batchInsert(entities, batchSize, IsolationLevel.DEFAULT); } public List<ID> batchInsert(final Collection<? extends T> entities, final int batchSize, final IsolationLevel isolationLevel) { return batchInsert(entities, null, batchSize, isolationLevel); } public List<ID> batchInsert(final Collection<? extends T> entities, final Collection<String> propNamesToInsert, final int batchSize, final IsolationLevel isolationLevel) { return batchInsert(null, entities, propNamesToInsert, batchSize, isolationLevel); } public List<ID> batchInsert(final Connection conn, final Collection<? extends T> entities) { return batchInsert(conn, entities, JdbcSettings.DEFAULT_BATCH_SIZE); } public List<ID> batchInsert(final Connection conn, final Collection<? extends T> entities, final int batchSize) { return batchInsert(conn, entities, null, batchSize); } /** * Insert All the records by batch operation. And set back auto-generated ids to the specified entities if there are the auto-generated ids. * * @param conn * @param entities which must have the same properties set for insertion. * @param batchSize Default value is 200. * @param entities * @param propNamesToInsert * @param batchSize * @return the auto-generated id list or an empty list if there is no auto-generated id. */ public List<ID> batchInsert(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToInsert, final int batchSize) { return batchInsert(conn, entities, propNamesToInsert, batchSize, IsolationLevel.DEFAULT); } @SuppressWarnings("deprecation") private List<ID> batchInsert(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToInsert, final int batchSize, final IsolationLevel isolationLevel) { N.checkArgPositive(batchSize, "batchSize"); if (N.isNullOrEmpty(entities)) { return new ArrayList<>(); } final T entity = N.firstOrNullIfEmpty(entities); Collection<String> insertingPropNames = null; if (N.isNullOrEmpty(propNamesToInsert) && N.isDirtyMarker(entity.getClass())) { insertingPropNames = new HashSet<>(); for (T e : entities) { insertingPropNames.addAll(((DirtyMarker) e).signedPropNames()); } } else { final boolean isDefaultIdPropValue = JdbcUtil.isDefaultIdPropValue(ClassUtil.getPropValue(entity, idPropName)); for (String idPropName : idPropNameList) { for (T e : entities) { if (isDefaultIdPropValue != JdbcUtil.isDefaultIdPropValue(ClassUtil.getPropValue(e, idPropName))) { throw new IllegalArgumentException( "Inconsistent id properties initialiaztion. Id properties are set in some entities, but not in others"); } } } insertingPropNames = propNamesToInsert; } final String sql = prepareInsertSql(entity, insertingPropNames); final JdbcSettings jdbcSettings = JdbcSettings.create().setBatchSize(batchSize).setIsolationLevel(isolationLevel); final List<?> parametersList = entities instanceof List ? (List<?>) entities : new ArrayList<>(entities); return sqlExecutor.batchInsert(conn, sql, StatementSetter.DEFAULT, jdbcSettings, parametersList); } @SuppressWarnings("deprecation") private String prepareInsertSql(final T entity, final Collection<String> propNamesToInsert) { checkEntity(entity); final boolean isDirtyMarkerEntity = ClassUtil.isDirtyMarker(entity.getClass()); Collection<String> insertingPropNames = propNamesToInsert; if (N.isNullOrEmpty(propNamesToInsert)) { if (isDirtyMarkerEntity) { insertingPropNames = ((DirtyMarker) entity).signedPropNames(); } else { boolean isDefaultIdPropValue = true; for (String idPropName : idPropNameList) { if (!JdbcUtil.isDefaultIdPropValue(ClassUtil.getPropValue(entity, idPropName))) { isDefaultIdPropValue = false; break; } } return isDefaultIdPropValue ? sql_insert_without_id : sql_insert_with_id; } } return prepareInsertSql(insertingPropNames); } private String prepareInsertSql(final Map<String, Object> props) { N.checkArgument(N.notNullOrEmpty(props), "props"); return prepareInsertSql(props.keySet()); } private String prepareInsertSql(final Collection<String> insertingPropNames) { N.checkArgument(N.notNullOrEmpty(insertingPropNames), "insertingPropNames"); switch (namingPolicy) { case LOWER_CASE_WITH_UNDERSCORE: return NSC.insert(insertingPropNames).into(targetClass).sql(); case UPPER_CASE_WITH_UNDERSCORE: return NAC.insert(insertingPropNames).into(targetClass).sql(); case LOWER_CAMEL_CASE: return NLC.insert(insertingPropNames).into(targetClass).sql(); default: throw new RuntimeException("Unsupported naming policy: " + namingPolicy); } } /** * Execute {@code add} and return the added entity if the record doesn't, otherwise, {@code update} is executed and updated db record is returned. * * @param entity * @return */ public T upsert(final T entity) { final T dbEntity = idPropNameList.size() == 1 ? gett((ID) ClassUtil.getPropValue(entity, idPropName)) : gett((ID) entity); if (dbEntity == null) { insert(entity); return entity; } else { N.merge(entity, dbEntity, false, idPropNameSet); update(dbEntity); return dbEntity; } } /** * Execute {@code add} and return the added entity if the record doesn't, otherwise, {@code update} is executed and updated db record is returned. * * @param entity * @param whereCause to verify if the record exists or not. * @return */ public T upsert(final T entity, final Condition whereCause) { N.checkArgNotNull(whereCause, "whereCause"); final T dbEntity = findFirst(whereCause).orNull(); if (dbEntity == null) { insert(entity); return entity; } else { N.merge(entity, dbEntity, false, idPropNameSet); update(dbEntity); return dbEntity; } } @SuppressWarnings("deprecation") public boolean refresh(final T entity) { final Collection<String> propNamesToRefresh = ClassUtil.isDirtyMarker(entity.getClass()) ? ((DirtyMarker) entity).signedPropNames() : defaultSelectPropNameList; return refresh(entity, propNamesToRefresh); } /** * * @param entity * @param propNamesToRefresh * @return {@code false} if no record found by the ids in the specified {@code entity}. */ @SuppressWarnings("deprecation") public boolean refresh(final T entity, Collection<String> propNamesToRefresh) { if (N.isNullOrEmpty(propNamesToRefresh)) { return idPropNameList.size() == 1 ? exists((ID) ClassUtil.getPropValue(entity, idPropName)) : exists((ID) entity); } final T dbEntity = idPropNameList.size() == 1 ? gett((ID) ClassUtil.getPropValue(entity, idPropName), propNamesToRefresh) : gett((ID) entity, propNamesToRefresh); if (dbEntity == null) { return false; } else { N.merge(dbEntity, entity); if (ClassUtil.isDirtyMarker(entity.getClass())) { ((DirtyMarker) entity).markDirty(propNamesToRefresh, false); } return true; } } public int update(final T entity) { return update(entity, (Collection<String>) null); } public int update(final T entity, final Collection<String> propNamesToUpdate) { return update((Connection) null, entity, propNamesToUpdate); } public int update(final Map<String, Object> props, final ID id) { return update((Connection) null, props, id); } public int update(final Map<String, Object> props, final Condition whereCause) { return update((Connection) null, props, whereCause); } public int update(final Connection conn, final T entity) { return update(conn, entity, (Collection<String>) null); } @SuppressWarnings("deprecation") public int update(final Connection conn, final T entity, final Collection<String> propNamesToUpdate) { N.checkArgNotNull(entity); if (propNamesToUpdate == null && ClassUtil.isDirtyMarker(entity.getClass()) && !((DirtyMarker) entity).isDirty()) { return 0; } final String sql = prepareUpdateSql(entity, propNamesToUpdate); final int updateCount = sqlExecutor.update(conn, sql, entity); // postUpdate(entity, propNamesToUpdate); return updateCount; } public int update(final Connection conn, final Map<String, Object> props, final ID id) { N.checkArgNotNull(id); return update(conn, props, id2Cond(id, false)); } public int update(final Connection conn, final Map<String, Object> props, final Condition whereCause) { N.checkArgNotNull(props); N.checkArgNotNull(whereCause); if (N.isNullOrEmpty(props)) { return 0; } final SP sp = prepareUpdate(props, whereCause); return sqlExecutor.update(conn, sp.sql, sp.parameters.toArray()); } /** * Update All the records by batch operation. * * @param entities which must have the same properties set for update. * @return */ public int batchUpdate(final Collection<? extends T> entities) { return batchUpdate(entities, (Collection<String>) null); } /** * * @param entities * @param propNamesToUpdate * @return */ public int batchUpdate(final Collection<? extends T> entities, final Collection<String> propNamesToUpdate) { return batchUpdate(entities, propNamesToUpdate, JdbcSettings.DEFAULT_BATCH_SIZE); } public int batchUpdate(final Collection<? extends T> entities, final int batchSize) { return batchUpdate(entities, (Collection<String>) null, batchSize); } /** * Update All the records by batch operation. * * @param entities which must have the same properties set for update. * @param propNamesToUpdate * @param batchSize Default value is 200. * @return */ public int batchUpdate(final Collection<? extends T> entities, final Collection<String> propNamesToUpdate, final int batchSize) { return batchUpdate(entities, propNamesToUpdate, batchSize, IsolationLevel.DEFAULT); } public int batchUpdate(final Collection<? extends T> entities, final int batchSize, final IsolationLevel isolationLevel) { return batchUpdate(entities, (Collection<String>) null, batchSize, isolationLevel); } public int batchUpdate(final Collection<? extends T> entities, final Collection<String> propNamesToUpdate, final int batchSize, final IsolationLevel isolationLevel) { return batchUpdate((Connection) null, entities, propNamesToUpdate, batchSize, isolationLevel); } public int batchUpdate(final Connection conn, final Collection<? extends T> entities) { return batchUpdate(conn, entities, (Collection<String>) null); } public int batchUpdate(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToUpdate) { return batchUpdate(conn, entities, propNamesToUpdate, JdbcSettings.DEFAULT_BATCH_SIZE); } public int batchUpdate(final Connection conn, final Collection<? extends T> entities, final int batchSize) { return batchUpdate(conn, entities, (Collection<String>) null, batchSize); } public int batchUpdate(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToUpdate, final int batchSize) { return batchUpdate(conn, entities, propNamesToUpdate, batchSize, IsolationLevel.DEFAULT); } @SuppressWarnings("deprecation") private int batchUpdate(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToUpdate, final int batchSize, final IsolationLevel isolationLevel) { N.checkArgPositive(batchSize, "batchSize"); if (N.isNullOrEmpty(entities)) { return 0; } final T entity = N.firstOrNullIfEmpty(entities); Collection<String> updatingPropNames = null; if (N.isNullOrEmpty(propNamesToUpdate) && N.isDirtyMarker(entity.getClass())) { updatingPropNames = new HashSet<>(); for (T e : entities) { updatingPropNames.addAll(((DirtyMarker) e).dirtyPropNames()); } } else { updatingPropNames = propNamesToUpdate; } final String sql = prepareUpdateSql(entity, updatingPropNames); final JdbcSettings jdbcSettings = JdbcSettings.create().setBatchSize(batchSize).setIsolationLevel(isolationLevel); final List<?> parametersList = entities instanceof List ? (List<?>) entities : new ArrayList<>(entities); final int updateCount = sqlExecutor.batchUpdate(conn, sql, StatementSetter.DEFAULT, jdbcSettings, parametersList); // if (N.firstNonNull(entities).orNull() instanceof DirtyMarker) { // for (Object entity : entities) { // postUpdate(entity, propNamesToUpdate); // } // } return updateCount; } @SuppressWarnings("deprecation") private String prepareUpdateSql(final T entity, final Collection<String> propNamesToUpdate) { checkEntity(entity); final boolean isDirtyMarkerEntity = ClassUtil.isDirtyMarker(entity.getClass()); Collection<String> updatingpropNames = propNamesToUpdate; if (N.isNullOrEmpty(propNamesToUpdate)) { if (isDirtyMarkerEntity) { updatingpropNames = ((DirtyMarker) entity).dirtyPropNames(); } else { return sql_update_by_id; } } return prepareUpdateSql(updatingpropNames); } private SP prepareUpdate(final Map<String, Object> props, final Condition whereCause) { N.checkArgument(N.notNullOrEmpty(props), "props"); switch (namingPolicy) { case LOWER_CASE_WITH_UNDERSCORE: return NSC.update(targetClass).set(props).where(whereCause).pair(); case UPPER_CASE_WITH_UNDERSCORE: return NAC.update(targetClass).set(props).where(whereCause).pair(); case LOWER_CAMEL_CASE: return NLC.update(targetClass).set(props).where(whereCause).pair(); default: throw new RuntimeException("Unsupported naming policy: " + namingPolicy); } } private String prepareUpdateSql(final Collection<String> propNamesToUpdate) { N.checkArgument(N.notNullOrEmpty(propNamesToUpdate), "propNamesToUpdate"); Condition cond = null; if (idPropNameList.size() == 1) { cond = CF.eq(idPropName); } else { And and = new And(); for (String idPropName : idPropNameList) { and.add(CF.eq(idPropName)); } cond = and; } switch (namingPolicy) { case LOWER_CASE_WITH_UNDERSCORE: return NSC.update(targetClass).set(propNamesToUpdate).where(cond).sql(); case UPPER_CASE_WITH_UNDERSCORE: return NAC.update(targetClass).set(propNamesToUpdate).where(cond).sql(); case LOWER_CAMEL_CASE: return NLC.update(targetClass).set(propNamesToUpdate).where(cond).sql(); default: throw new RuntimeException("Unsupported naming policy: " + namingPolicy); } } // @SuppressWarnings("deprecation") // private void postUpdate(final Object entity, final Collection<String> propNamesToUpdate) { // if (ClassUtil.isDirtyMarker(entity.getClass())) { // if (propNamesToUpdate == null) { // ((DirtyMarker) entity).markDirty(false); // } else { // ((DirtyMarker) entity).markDirty(propNamesToUpdate, false); // } // } // } public int delete(final Condition whereCause) { return delete(null, whereCause); } public int delete(final Connection conn, final Condition whereCause) { N.checkArgNotNull(whereCause); if (idPropNameList.size() == 1 && whereCause instanceof Equal && ((Equal) whereCause).getPropName().equals(idPropName)) { final ID id = ((Equal) whereCause).getPropValue(); return sqlExecutor.update(conn, sql_delete_by_id, id); } final SP sp = prepareDelete(whereCause); return sqlExecutor.update(conn, sp.sql, sp.parameters.toArray()); } /** * * @param entity * @return */ public int delete(final T entity) { return delete(null, entity); } /** * * @param conn * @param entity * @return */ public int delete(final Connection conn, final T entity) { N.checkArgNotNull(entity); checkEntity(entity); return sqlExecutor.update(conn, sql_delete_by_id, entity); } /** * Delete all the records by batch operation. * * @param entities * @return */ public int batchDelete(final Collection<T> entities) { return batchDelete(entities, JdbcSettings.DEFAULT_BATCH_SIZE); } /** * Delete all the records by batch operation. * * @param entities * @param batchSize Default value is 200. * @return */ public int batchDelete(final Collection<T> entities, final int batchSize) { return batchDelete(entities, batchSize, IsolationLevel.DEFAULT); } /** * Delete all the records by batch operation. * * @param entities * @param batchSize Default value is 200. * @param isolationLevel * @return */ public int batchDelete(final Collection<T> entities, final int batchSize, final IsolationLevel isolationLevel) { return batchDelete(null, entities, batchSize, isolationLevel); } /** * Delete all the records by batch operation. * * @param conn * @param entities * @return */ public int batchDelete(final Connection conn, final Collection<T> entities) { return batchDelete(conn, entities, JdbcSettings.DEFAULT_BATCH_SIZE); } /** * Delete all the records by batch operation. * * @param conn * @param entities * @param batchSize * @return */ public int batchDelete(final Connection conn, final Collection<T> entities, final int batchSize) { return batchDelete(conn, entities, batchSize, IsolationLevel.DEFAULT); } private int batchDelete(final Connection conn, final Collection<T> entities, final int batchSize, final IsolationLevel isolationLevel) { N.checkArgPositive(batchSize, "batchSize"); if (N.isNullOrEmpty(entities)) { return 0; } final List<T> ids = entities instanceof List ? ((List<T>) entities) : N.newArrayList(entities); final JdbcSettings jdbcSettings = JdbcSettings.create().setBatchSize(batchSize).setIsolationLevel(isolationLevel); return sqlExecutor.batchUpdate(conn, sql_delete_by_id, jdbcSettings, ids); } private SP prepareDelete(final Condition whereCause) { SP sp = null; switch (namingPolicy) { case LOWER_CASE_WITH_UNDERSCORE: sp = NSC.deleteFrom(targetClass).where(whereCause).pair(); break; case UPPER_CASE_WITH_UNDERSCORE: sp = NAC.deleteFrom(targetClass).where(whereCause).pair(); break; case LOWER_CAMEL_CASE: sp = NLC.deleteFrom(targetClass).where(whereCause).pair(); break; default: throw new RuntimeException("Unsupported naming policy: " + namingPolicy); } return sp; } public int deleteById(final ID id) { return deleteById(null, id); } public int deleteById(final Connection conn, final ID id) { N.checkArgNotNull(id); checkId(id); return sqlExecutor.update(conn, sql_delete_by_id, id); } /** * Delete all the records by batch operation. * * @param ids * @return */ public int batchDeleteByIds(final Collection<ID> ids) { return batchDeleteByIds(ids, JdbcSettings.DEFAULT_BATCH_SIZE); } /** * Delete all the records by batch operation. * * @param ids * @param batchSize Default value is 200. * @return */ public int batchDeleteByIds(final Collection<ID> ids, final int batchSize) { return batchDeleteByIds(ids, batchSize, IsolationLevel.DEFAULT); } /** * Delete all the records by batch operation. * * @param ids * @param batchSize Default value is 200. * @param isolationLevel * @return */ public int batchDeleteByIds(final Collection<ID> ids, final int batchSize, final IsolationLevel isolationLevel) { return batchDeleteByIds(null, ids, batchSize, isolationLevel); } /** * Delete all the records by batch operation. * * @param conn * @param ids * @return */ public int batchDeleteByIds(final Connection conn, final Collection<ID> ids) { return batchDeleteByIds(conn, ids, JdbcSettings.DEFAULT_BATCH_SIZE); } /** * Delete all the records by batch operation. * * @param conn * @param ids * @param batchSize * @return */ public int batchDeleteByIds(final Connection conn, final Collection<ID> ids, final int batchSize) { return batchDeleteByIds(conn, ids, batchSize, IsolationLevel.DEFAULT); } private int batchDeleteByIds(final Connection conn, final Collection<ID> ids, final int batchSize, final IsolationLevel isolationLevel) { N.checkArgPositive(batchSize, "batchSize"); if (N.isNullOrEmpty(ids)) { return 0; } final List<ID> listOfIds = ids instanceof List ? ((List<ID>) ids) : N.newArrayList(ids); checkId(listOfIds.get(0)); final JdbcSettings jdbcSettings = JdbcSettings.create().setBatchSize(batchSize).setIsolationLevel(isolationLevel); return sqlExecutor.batchUpdate(conn, sql_delete_by_id, jdbcSettings, listOfIds); } private void checkId(final Object id) { N.checkArgument(idPropNameList.size() > 1 || !(id instanceof Map || isEntity(id)), "Input 'id' can not be Map or entity for single id "); } private boolean isEntity(Object obj) { return obj != null && ClassUtil.isEntity(obj.getClass()); } private Condition id2Cond(final Object id, boolean isIdOrEntity) { if (isIdOrEntity == false) { checkId(id); } if (idPropNameList.size() == 1) { if (id instanceof Map) { return CF.eq(idPropName, ((Map<String, Object>) id).get(idPropName)); } else if (isEntity(id)) { return CF.eq(idPropName, ClassUtil.getPropValue(id, idPropName)); } else { return CF.eq(idPropName, id); } } if (id instanceof Map) { return CF.eqAnd((Map<String, Object>) id); } else if (isEntity(id)) { return CF.eqAnd(id, idPropNameList); } else { throw new IllegalArgumentException("Not supported id type: " + (id == null ? "null" : ClassUtil.getClassName(id.getClass()))); } } private void checkEntity(final Object entity) { final Class<?> cls = entity.getClass(); if (ClassUtil.isEntity(cls)) { N.checkArgument(targetClass.isAssignableFrom(cls), "Delete wrong type: " + ClassUtil.getCanonicalClassName(cls) + " in " + toString()); } } public String toStirng() { return "Mapper[" + ClassUtil.getCanonicalClassName(targetClass) + "]"; } } public static final class AsyncMapper<T, ID> { private final Mapper<T, ID> mapper; private final AsyncExecutor asyncExecutor; AsyncMapper(Mapper<T, ID> mapper, AsyncExecutor asyncExecutor) { this.mapper = mapper; this.asyncExecutor = asyncExecutor; } public ContinuableFuture<Boolean> exists(final ID id) { return asyncExecutor.execute(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return mapper.exists(id); } }); } public ContinuableFuture<Boolean> exists(final Condition whereCause) { return asyncExecutor.execute(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return mapper.exists(whereCause); } }); } public ContinuableFuture<Boolean> exists(final Connection conn, final ID id) { return asyncExecutor.execute(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return mapper.exists(conn, id); } }); } public ContinuableFuture<Boolean> exists(final Connection conn, final Condition whereCause) { return asyncExecutor.execute(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return mapper.exists(conn, whereCause); } }); } public ContinuableFuture<Integer> count(final Condition whereCause) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.count(whereCause); } }); } public ContinuableFuture<Integer> count(final Connection conn, final Condition whereCause) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.count(conn, whereCause); } }); } public ContinuableFuture<Optional<T>> get(final ID id) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.get(id); } }); } // /** // * // * @param id // * @param selectPropNames // * @return // * @deprecated replaced by {@code get(id, Arrays.asList(selectPropNames)} // */ // @Deprecated // @SafeVarargs // public final ContinuableFuture<Optional<T>> get(final ID id, final String... selectPropNames) { // return asyncExecutor.execute(new Callable<Optional<T>>() { // @Override // public Optional<T> call() throws Exception { // return mapper.get(id, selectPropNames); // } // }); // } public ContinuableFuture<Optional<T>> get(final ID id, final Collection<String> selectPropNames) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.get(id, selectPropNames); } }); } public ContinuableFuture<Optional<T>> get(final Connection conn, final ID id, final Collection<String> selectPropNames) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.get(conn, id, selectPropNames); } }); } public ContinuableFuture<T> gett(final ID id) { return asyncExecutor.execute(new Callable<T>() { @Override public T call() throws Exception { return mapper.gett(id); } }); } // /** // * // * @param id // * @param selectPropNames // * @return // * @deprecated replaced by {@code gett(id, Arrays.asList(selectPropNames)} // */ // @Deprecated // @SafeVarargs // public final ContinuableFuture<T> gett(final ID id, final String... selectPropNames) { // return asyncExecutor.execute(new Callable<T>() { // @Override // public T call() throws Exception { // return mapper.gett(id, selectPropNames); // } // }); // } public ContinuableFuture<T> gett(final ID id, final Collection<String> selectPropNames) { return asyncExecutor.execute(new Callable<T>() { @Override public T call() throws Exception { return mapper.gett(id, selectPropNames); } }); } public ContinuableFuture<T> gett(final Connection conn, final ID id, final Collection<String> selectPropNames) { return asyncExecutor.execute(new Callable<T>() { @Override public T call() throws Exception { return mapper.gett(conn, id, selectPropNames); } }); } public ContinuableFuture<List<T>> batchGet(final List<ID> ids) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.batchGet(ids); } }); } // /** // * // * @param ids // * @param selectPropNames // * @return // * @deprecated replaced by {@code batchGet(ids, Arrays.asList(selectPropNames)} // */ // @Deprecated // public ContinuableFuture<List<T>> batchGet(final List<?> ids, final String... selectPropNames) { // return asyncExecutor.execute(new Callable<List<T>>() { // @Override // public List<T> call() throws Exception { // return mapper.batchGet(ids, selectPropNames); // } // }); // } public ContinuableFuture<List<T>> batchGet(final List<ID> ids, final Collection<String> selectPropNames) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.batchGet(ids, selectPropNames); } }); } public ContinuableFuture<List<T>> batchGet(final List<ID> ids, final Collection<String> selectPropNames, final int batchSize) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.batchGet(ids, selectPropNames, batchSize); } }); } public ContinuableFuture<List<T>> batchGet(final Connection conn, final List<ID> ids, final Collection<String> selectPropNames, final int batchSize) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.batchGet(conn, ids, selectPropNames, batchSize); } }); } public ContinuableFuture<Optional<T>> findFirst(final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.findFirst(whereCause); } }); } public ContinuableFuture<Optional<T>> findFirst(final Collection<String> selectPropNames, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.findFirst(selectPropNames, whereCause); } }); } public ContinuableFuture<Optional<T>> findFirst(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.findFirst(selectPropNames, whereCause, jdbcSettings); } }); } public ContinuableFuture<Optional<T>> findFirst(final Connection conn, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.findFirst(conn, whereCause); } }); } public ContinuableFuture<Optional<T>> findFirst(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.findFirst(conn, selectPropNames, whereCause); } }); } public ContinuableFuture<Optional<T>> findFirst(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<T>>() { @Override public Optional<T> call() throws Exception { return mapper.findFirst(conn, selectPropNames, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Connection conn, final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(conn, selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Connection conn, final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(conn, selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Connection conn, final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(conn, selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Connection conn, final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(conn, selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Connection conn, final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(conn, selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Connection conn, final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(conn, selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Connection conn, final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(conn, selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Optional<R>> findFirst(final Connection conn, final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Optional<R>>() { @Override public Optional<R> call() throws Exception { return mapper.findFirst(conn, selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public ContinuableFuture<List<T>> list(final Condition whereCause) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.list(whereCause); } }); } public ContinuableFuture<List<T>> list(final Collection<String> selectPropNames, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.list(selectPropNames, whereCause); } }); } public ContinuableFuture<List<T>> list(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.list(selectPropNames, whereCause, jdbcSettings); } }); } public ContinuableFuture<List<T>> list(final Connection conn, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.list(conn, whereCause); } }); } public ContinuableFuture<List<T>> list(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.list(conn, selectPropNames, whereCause); } }); } public ContinuableFuture<List<T>> list(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.list(conn, selectPropNames, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> list(final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<List<R>> list(final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> list(final Connection conn, final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(conn, selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<List<R>> list(final Connection conn, final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(conn, selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> list(final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<List<R>> list(final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> list(final Connection conn, final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(conn, selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<List<R>> list(final Connection conn, final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(conn, selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> list(final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<List<R>> list(final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> list(final Connection conn, final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(conn, selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<List<R>> list(final Connection conn, final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(conn, selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> list(final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<List<R>> list(final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> list(final Connection conn, final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(conn, selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<List<R>> list(final Connection conn, final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.list(conn, selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public ContinuableFuture<List<T>> listAll(final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.listAll(whereCause, jdbcSettings); } }); } public ContinuableFuture<List<T>> listAll(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<T>>() { @Override public List<T> call() throws Exception { return mapper.listAll(selectPropNames, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> listAll(final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.listAll(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> listAll(final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.listAll(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> listAll(final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.listAll(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<List<R>> listAll(final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<List<R>>() { @Override public List<R> call() throws Exception { return mapper.listAll(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public ContinuableFuture<Stream<T>> stream(final Condition whereCause) { return asyncExecutor.execute(new Callable<Stream<T>>() { @Override public Stream<T> call() throws Exception { return mapper.stream(whereCause); } }); } public ContinuableFuture<Stream<T>> stream(final Collection<String> selectPropNames, final Condition whereCause) { return asyncExecutor.execute(new Callable<Stream<T>>() { @Override public Stream<T> call() throws Exception { return mapper.stream(selectPropNames, whereCause); } }); } public ContinuableFuture<Stream<T>> stream(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<T>>() { @Override public Stream<T> call() throws Exception { return mapper.stream(selectPropNames, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Stream<R>> stream(final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.stream(selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Stream<R>> stream(final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.stream(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Stream<R>> stream(final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.stream(selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Stream<R>> stream(final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.stream(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Stream<R>> stream(final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.stream(selectPropName, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Stream<R>> stream(final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.stream(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Stream<R>> stream(final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.stream(selectPropNames, rowMapper, whereCause); } }); } public <R> ContinuableFuture<Stream<R>> stream(final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.stream(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public ContinuableFuture<Stream<T>> streamAll(final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<T>>() { @Override public Stream<T> call() throws Exception { return mapper.streamAll(whereCause, jdbcSettings); } }); } public ContinuableFuture<Stream<T>> streamAll(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<T>>() { @Override public Stream<T> call() throws Exception { return mapper.streamAll(selectPropNames, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Stream<R>> streamAll(final String selectPropName, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.streamAll(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Stream<R>> streamAll(final String selectPropName, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.streamAll(selectPropName, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Stream<R>> streamAll(final Collection<String> selectPropNames, final JdbcUtil.RowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.streamAll(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public <R> ContinuableFuture<Stream<R>> streamAll(final Collection<String> selectPropNames, final JdbcUtil.BiRowMapper<R> rowMapper, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Stream<R>>() { @Override public Stream<R> call() throws Exception { return mapper.streamAll(selectPropNames, rowMapper, whereCause, jdbcSettings); } }); } public ContinuableFuture<DataSet> query(final Condition whereCause) { return asyncExecutor.execute(new Callable<DataSet>() { @Override public DataSet call() throws Exception { return mapper.query(whereCause); } }); } public ContinuableFuture<DataSet> query(final Collection<String> selectPropNames, final Condition whereCause) { return asyncExecutor.execute(new Callable<DataSet>() { @Override public DataSet call() throws Exception { return mapper.query(selectPropNames, whereCause); } }); } public ContinuableFuture<DataSet> query(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<DataSet>() { @Override public DataSet call() throws Exception { return mapper.query(selectPropNames, whereCause, jdbcSettings); } }); } public ContinuableFuture<DataSet> query(final Connection conn, final Condition whereCause) { return asyncExecutor.execute(new Callable<DataSet>() { @Override public DataSet call() throws Exception { return mapper.query(conn, whereCause); } }); } public ContinuableFuture<DataSet> query(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause) { return asyncExecutor.execute(new Callable<DataSet>() { @Override public DataSet call() throws Exception { return mapper.query(conn, selectPropNames, whereCause); } }); } public ContinuableFuture<DataSet> query(final Connection conn, final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<DataSet>() { @Override public DataSet call() throws Exception { return mapper.query(conn, selectPropNames, whereCause, jdbcSettings); } }); } public ContinuableFuture<DataSet> queryAll(final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<DataSet>() { @Override public DataSet call() throws Exception { return mapper.queryAll(whereCause, jdbcSettings); } }); } public ContinuableFuture<DataSet> queryAll(final Collection<String> selectPropNames, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<DataSet>() { @Override public DataSet call() throws Exception { return mapper.queryAll(selectPropNames, whereCause, jdbcSettings); } }); } public ContinuableFuture<OptionalBoolean> queryForBoolean(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<OptionalBoolean>() { @Override public OptionalBoolean call() throws Exception { return mapper.queryForBoolean(selectPropName, whereCause); } }); } public ContinuableFuture<OptionalByte> queryForByte(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<OptionalByte>() { @Override public OptionalByte call() throws Exception { return mapper.queryForByte(selectPropName, whereCause); } }); } public ContinuableFuture<OptionalShort> queryForShort(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<OptionalShort>() { @Override public OptionalShort call() throws Exception { return mapper.queryForShort(selectPropName, whereCause); } }); } public ContinuableFuture<OptionalInt> queryForInt(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<OptionalInt>() { @Override public OptionalInt call() throws Exception { return mapper.queryForInt(selectPropName, whereCause); } }); } public ContinuableFuture<OptionalLong> queryForLong(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<OptionalLong>() { @Override public OptionalLong call() throws Exception { return mapper.queryForLong(selectPropName, whereCause); } }); } public ContinuableFuture<OptionalFloat> queryForFloat(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<OptionalFloat>() { @Override public OptionalFloat call() throws Exception { return mapper.queryForFloat(selectPropName, whereCause); } }); } public ContinuableFuture<OptionalDouble> queryForDouble(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<OptionalDouble>() { @Override public OptionalDouble call() throws Exception { return mapper.queryForDouble(selectPropName, whereCause); } }); } public ContinuableFuture<Nullable<BigDecimal>> queryForBigDecimal(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<BigDecimal>>() { @Override public Nullable<BigDecimal> call() throws Exception { return mapper.queryForBigDecimal(selectPropName, whereCause); } }); } public ContinuableFuture<Nullable<String>> queryForString(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<String>>() { @Override public Nullable<String> call() throws Exception { return mapper.queryForString(selectPropName, whereCause); } }); } public ContinuableFuture<Nullable<java.sql.Date>> queryForDate(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<java.sql.Date>>() { @Override public Nullable<java.sql.Date> call() throws Exception { return mapper.queryForDate(selectPropName, whereCause); } }); } public ContinuableFuture<Nullable<java.sql.Time>> queryForTime(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<java.sql.Time>>() { @Override public Nullable<java.sql.Time> call() throws Exception { return mapper.queryForTime(selectPropName, whereCause); } }); } public ContinuableFuture<Nullable<java.sql.Timestamp>> queryForTimestamp(final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<java.sql.Timestamp>>() { @Override public Nullable<java.sql.Timestamp> call() throws Exception { return mapper.queryForTimestamp(selectPropName, whereCause); } }); } public <V> ContinuableFuture<Nullable<V>> queryForSingleResult(final Class<V> targetValueClass, final String selectPropName, final ID id) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForSingleResult(targetValueClass, selectPropName, id); } }); } public <V> ContinuableFuture<Nullable<V>> queryForSingleResult(final Class<V> targetValueClass, final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForSingleResult(targetValueClass, selectPropName, whereCause); } }); } public <V> ContinuableFuture<Nullable<V>> queryForSingleResult(final Class<V> targetValueClass, final String selectPropName, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForSingleResult(targetValueClass, selectPropName, whereCause, jdbcSettings); } }); } public <V> ContinuableFuture<Nullable<V>> queryForSingleResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final ID id) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForSingleResult(targetValueClass, conn, selectPropName, id); } }); } public <V> ContinuableFuture<Nullable<V>> queryForSingleResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForSingleResult(targetValueClass, conn, selectPropName, whereCause); } }); } public <V> ContinuableFuture<Nullable<V>> queryForSingleResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForSingleResult(targetValueClass, conn, selectPropName, whereCause, jdbcSettings); } }); } public <V> ContinuableFuture<Nullable<V>> queryForUniqueResult(final Class<V> targetValueClass, final String selectPropName, final ID id) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForUniqueResult(targetValueClass, selectPropName, id); } }); } public <V> ContinuableFuture<Nullable<V>> queryForUniqueResult(final Class<V> targetValueClass, final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForUniqueResult(targetValueClass, selectPropName, whereCause); } }); } public <V> ContinuableFuture<Nullable<V>> queryForUniqueResult(final Class<V> targetValueClass, final String selectPropName, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForUniqueResult(targetValueClass, selectPropName, whereCause, jdbcSettings); } }); } public <V> ContinuableFuture<Nullable<V>> queryForUniqueResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final ID id) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForUniqueResult(targetValueClass, conn, selectPropName, id); } }); } public <V> ContinuableFuture<Nullable<V>> queryForUniqueResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final Condition whereCause) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForUniqueResult(targetValueClass, conn, selectPropName, whereCause); } }); } public <V> ContinuableFuture<Nullable<V>> queryForUniqueResult(final Class<V> targetValueClass, final Connection conn, final String selectPropName, final Condition whereCause, final JdbcSettings jdbcSettings) { return asyncExecutor.execute(new Callable<Nullable<V>>() { @Override public Nullable<V> call() throws Exception { return mapper.queryForUniqueResult(targetValueClass, conn, selectPropName, whereCause, jdbcSettings); } }); } public ContinuableFuture<ID> insert(final T entity) { return asyncExecutor.execute(new Callable<ID>() { @Override public ID call() throws Exception { return mapper.insert(entity); } }); } public ContinuableFuture<ID> insert(final T entity, final Collection<String> propNamesToInsert) { return asyncExecutor.execute(new Callable<ID>() { @Override public ID call() throws Exception { return mapper.insert(entity, propNamesToInsert); } }); } public ContinuableFuture<ID> insert(final Map<String, Object> props) { return asyncExecutor.execute(new Callable<ID>() { @Override public ID call() throws Exception { return mapper.insert(props); } }); } public ContinuableFuture<ID> insert(final Connection conn, final T entity) { return asyncExecutor.execute(new Callable<ID>() { @Override public ID call() throws Exception { return mapper.insert(conn, entity); } }); } public ContinuableFuture<ID> insert(final Connection conn, final T entity, final Collection<String> propNamesToInsert) { return asyncExecutor.execute(new Callable<ID>() { @Override public ID call() throws Exception { return mapper.insert(conn, entity, propNamesToInsert); } }); } public ContinuableFuture<ID> insert(final Connection conn, final Map<String, Object> props) { return asyncExecutor.execute(new Callable<ID>() { @Override public ID call() throws Exception { return mapper.insert(conn, props); } }); } // @Deprecated // public <ID> ContinuableFuture<List<ID>> insertAll(final Collection<? extends T> entities) { // return asyncExecutor.execute(new Callable<List<ID>>() { // @Override // public List<ID> call() throws Exception { // return mapper.insertAll(entities); // } // }); // } // // @Deprecated // public <ID> ContinuableFuture<List<ID>> insertAll(final Collection<? extends T> entities, final IsolationLevel isolationLevel) { // return asyncExecutor.execute(new Callable<List<ID>>() { // @Override // public List<ID> call() throws Exception { // return mapper.insertAll(entities, isolationLevel); // } // }); // } // // @Deprecated // public <ID> ContinuableFuture<List<ID>> insertAll(final Connection conn, final Collection<? extends T> entities) { // return asyncExecutor.execute(new Callable<List<ID>>() { // @Override // public List<ID> call() throws Exception { // return mapper.insertAll(conn, entities); // } // }); // } public ContinuableFuture<List<ID>> batchInsert(final Collection<? extends T> entities) { return asyncExecutor.execute(new Callable<List<ID>>() { @Override public List<ID> call() throws Exception { return mapper.batchInsert(entities); } }); } public ContinuableFuture<List<ID>> batchInsert(final Collection<? extends T> entities, final int batchSize) { return asyncExecutor.execute(new Callable<List<ID>>() { @Override public List<ID> call() throws Exception { return mapper.batchInsert(entities, batchSize); } }); } public ContinuableFuture<List<ID>> batchInsert(final Collection<? extends T> entities, final int batchSize, final IsolationLevel isolationLevel) { return asyncExecutor.execute(new Callable<List<ID>>() { @Override public List<ID> call() throws Exception { return mapper.batchInsert(entities, batchSize, isolationLevel); } }); } public ContinuableFuture<List<ID>> batchInsert(final Collection<? extends T> entities, final Collection<String> propNamesToInsert, final int batchSize, final IsolationLevel isolationLevel) { return asyncExecutor.execute(new Callable<List<ID>>() { @Override public List<ID> call() throws Exception { return mapper.batchInsert(entities, propNamesToInsert, batchSize, isolationLevel); } }); } public ContinuableFuture<List<ID>> batchInsert(final Connection conn, final Collection<? extends T> entities) { return asyncExecutor.execute(new Callable<List<ID>>() { @Override public List<ID> call() throws Exception { return mapper.batchInsert(conn, entities); } }); } public ContinuableFuture<List<ID>> batchInsert(final Connection conn, final Collection<? extends T> entities, final int batchSize) { return asyncExecutor.execute(new Callable<List<ID>>() { @Override public List<ID> call() throws Exception { return mapper.batchInsert(conn, entities, batchSize); } }); } public ContinuableFuture<List<ID>> batchInsert(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToInsert, final int batchSize) { return asyncExecutor.execute(new Callable<List<ID>>() { @Override public List<ID> call() throws Exception { return mapper.batchInsert(conn, entities, propNamesToInsert, batchSize); } }); } public ContinuableFuture<T> upsert(final T entity) { return asyncExecutor.execute(new Callable<T>() { @Override public T call() throws Exception { return mapper.upsert(entity); } }); } public ContinuableFuture<T> upsert(final T entity, final Condition whereCause) { return asyncExecutor.execute(new Callable<T>() { @Override public T call() throws Exception { return mapper.upsert(entity, whereCause); } }); } public ContinuableFuture<Boolean> refresh(final T entity) { return asyncExecutor.execute(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return mapper.refresh(entity); } }); } public ContinuableFuture<Boolean> refresh(final T entity, final Collection<String> propNamesToUpdate) { return asyncExecutor.execute(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return mapper.refresh(entity, propNamesToUpdate); } }); } public ContinuableFuture<Integer> update(final T entity) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.update(entity); } }); } public ContinuableFuture<Integer> update(final T entity, final Collection<String> propNamesToUpdate) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.update(entity, propNamesToUpdate); } }); } public ContinuableFuture<Integer> update(final Map<String, Object> props, final ID id) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.update(props, id); } }); } public ContinuableFuture<Integer> update(final Map<String, Object> props, final Condition whereCause) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.update(props, whereCause); } }); } public ContinuableFuture<Integer> update(final Connection conn, final T entity) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.update(conn, entity); } }); } public ContinuableFuture<Integer> update(final Connection conn, final T entity, final Collection<String> propNamesToUpdate) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.update(conn, entity, propNamesToUpdate); } }); } public ContinuableFuture<Integer> update(final Connection conn, final Map<String, Object> props, final ID id) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.update(conn, props, id); } }); } public ContinuableFuture<Integer> update(final Connection conn, final Map<String, Object> props, final Condition whereCause) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.update(conn, props, whereCause); } }); } // @Deprecated // public ContinuableFuture<Integer> updateAll(final Collection<? extends T> entities) { // return asyncExecutor.execute(new Callable<Integer>() { // @Override // public Integer call() throws Exception { // return mapper.updateAll(entities); // } // }); // } // // @Deprecated // public ContinuableFuture<Integer> updateAll(final Collection<? extends T> entities, final Collection<String> propNamesToUpdate) { // return asyncExecutor.execute(new Callable<Integer>() { // @Override // public Integer call() throws Exception { // return mapper.updateAll(entities, propNamesToUpdate); // } // }); // } // // @Deprecated // public ContinuableFuture<Integer> updateAll(final Collection<? extends T> entities, final IsolationLevel isolationLevel) { // return asyncExecutor.execute(new Callable<Integer>() { // @Override // public Integer call() throws Exception { // return mapper.updateAll(entities, isolationLevel); // } // }); // } // // @Deprecated // public ContinuableFuture<Integer> updateAll(final Collection<? extends T> entities, final Collection<String> propNamesToUpdate, // final IsolationLevel isolationLevel) { // return asyncExecutor.execute(new Callable<Integer>() { // @Override // public Integer call() throws Exception { // return mapper.updateAll(entities, propNamesToUpdate, isolationLevel); // } // }); // } // // @Deprecated // public ContinuableFuture<Integer> updateAll(final Connection conn, final Collection<? extends T> entities) { // return asyncExecutor.execute(new Callable<Integer>() { // @Override // public Integer call() throws Exception { // return mapper.updateAll(conn, entities); // } // }); // } // // @Deprecated // public ContinuableFuture<Integer> updateAll(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToUpdate) { // return asyncExecutor.execute(new Callable<Integer>() { // @Override // public Integer call() throws Exception { // return mapper.updateAll(conn, entities, propNamesToUpdate); // } // }); // } public ContinuableFuture<Integer> batchUpdate(final Collection<? extends T> entities) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(entities); } }); } public ContinuableFuture<Integer> batchUpdate(final Collection<? extends T> entities, final Collection<String> propNamesToUpdate) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(entities, propNamesToUpdate); } }); } public ContinuableFuture<Integer> batchUpdate(final Collection<? extends T> entities, final int batchSize) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(entities, batchSize); } }); } public ContinuableFuture<Integer> batchUpdate(final Collection<? extends T> entities, final Collection<String> propNamesToUpdate, final int batchSize) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(entities, propNamesToUpdate, batchSize); } }); } public ContinuableFuture<Integer> batchUpdate(final Collection<? extends T> entities, final int batchSize, final IsolationLevel isolationLevel) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(entities, batchSize, isolationLevel); } }); } public ContinuableFuture<Integer> batchUpdate(final Collection<? extends T> entities, final Collection<String> propNamesToUpdate, final int batchSize, final IsolationLevel isolationLevel) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(entities, propNamesToUpdate, batchSize, isolationLevel); } }); } public ContinuableFuture<Integer> batchUpdate(final Connection conn, final Collection<? extends T> entities) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(conn, entities); } }); } public ContinuableFuture<Integer> batchUpdate(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToUpdate) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(conn, entities, propNamesToUpdate); } }); } public ContinuableFuture<Integer> batchUpdate(final Connection conn, final Collection<? extends T> entities, final int batchSize) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(conn, entities, batchSize); } }); } public ContinuableFuture<Integer> batchUpdate(final Connection conn, final Collection<? extends T> entities, final Collection<String> propNamesToUpdate, final int batchSize) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchUpdate(conn, entities, propNamesToUpdate, batchSize); } }); } public ContinuableFuture<Integer> delete(final Condition whereCause) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.delete(whereCause); } }); } public ContinuableFuture<Integer> delete(final Connection conn, final Condition whereCause) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.delete(conn, whereCause); } }); } public ContinuableFuture<Integer> delete(final T entity) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.delete(entity); } }); } public ContinuableFuture<Integer> delete(final Connection conn, final T entity) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.delete(conn, entity); } }); } public ContinuableFuture<Integer> batchDelete(final Collection<T> entities) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDelete(entities); } }); } public ContinuableFuture<Integer> batchDelete(final Collection<T> entities, final int batchSize) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDelete(entities, batchSize); } }); } public ContinuableFuture<Integer> batchDelete(final Collection<T> entities, final int batchSize, final IsolationLevel isolationLevel) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDelete(entities, batchSize, isolationLevel); } }); } public ContinuableFuture<Integer> batchDelete(final Connection conn, final Collection<T> entities) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDelete(conn, entities); } }); } public ContinuableFuture<Integer> batchDelete(final Connection conn, final Collection<T> entities, final int batchSize) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDelete(conn, entities, batchSize); } }); } public ContinuableFuture<Integer> deleteById(final ID id) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.deleteById(id); } }); } public ContinuableFuture<Integer> deleteById(final Connection conn, final ID id) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.deleteById(conn, id); } }); } public ContinuableFuture<Integer> batchDeleteByIds(final Collection<ID> ids) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDeleteByIds(ids); } }); } public ContinuableFuture<Integer> batchDeleteByIds(final Collection<ID> ids, final int batchSize) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDeleteByIds(ids, batchSize); } }); } public ContinuableFuture<Integer> batchDeleteByIds(final Collection<ID> ids, final int batchSize, final IsolationLevel isolationLevel) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDeleteByIds(ids, batchSize, isolationLevel); } }); } public ContinuableFuture<Integer> batchDeleteByIds(final Connection conn, final Collection<ID> ids) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDeleteByIds(conn, ids); } }); } public ContinuableFuture<Integer> batchDeleteByIds(final Connection conn, final Collection<ID> ids, final int batchSize) { return asyncExecutor.execute(new Callable<Integer>() { @Override public Integer call() throws Exception { return mapper.batchDeleteByIds(conn, ids, batchSize); } }); } } /** * Refer to http://landawn.com/introduction-to-jdbc.html about how to set parameters in <code>java.sql.PreparedStatement</code> * * @author Haiyang Li * */ public static interface StatementSetter { public static final StatementSetter DEFAULT = new AbstractStatementSetter() { @SuppressWarnings("rawtypes") @Override protected void setParameters(final PreparedStatement stmt, final int parameterCount, final Object[] parameters, final Type[] parameterTypes) throws SQLException { if (N.notNullOrEmpty(parameterTypes) && parameterTypes.length >= parameterCount) { for (int i = 0; i < parameterCount; i++) { parameterTypes[i].set(stmt, i + 1, parameters[i]); } } else if (N.notNullOrEmpty(parameters) && parameters.length >= parameterCount) { for (int i = 0; i < parameterCount; i++) { if (parameters[i] == null) { stmt.setObject(i + 1, parameters[i]); } else { N.typeOf(parameters[i].getClass()).set(stmt, i + 1, parameters[i]); } } } } }; public void setParameters(final NamedSQL namedSQL, final PreparedStatement stmt, final Object... parameters) throws SQLException; } /** * Refer to http://landawn.com/introduction-to-jdbc.html about how to read columns/rows from <code>java.sql.ResultSet</code> * * @author Haiyang Li * */ public static interface ResultExtractor<T> { public static final ResultExtractor<DataSet> DATA_SET = new AbstractResultExtractor<DataSet>() { @Override protected DataSet convert(DataSet dataSet) { return dataSet; } }; public T extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException; /** * * @param keyExtractor * @param valueExtractor * @return */ public static <K, V> ResultExtractor<Map<K, V>> toMap(final RowMapper<K> keyExtractor, final RowMapper<V> valueExtractor) { return toMap(keyExtractor, valueExtractor, Suppliers.<K, V> ofMap()); } /** * * @param keyExtractor * @param valueExtractor * @param supplier * @return */ public static <K, V, M extends Map<K, V>> ResultExtractor<M> toMap(final RowMapper<K> keyExtractor, final RowMapper<V> valueExtractor, final Supplier<? extends M> supplier) { return toMap(keyExtractor, valueExtractor, FN.throwingMerger(), supplier); } /** * * @param keyExtractor * @param valueExtractor * @param mergeFunction * @return * @see {@link Fn.EE#throwingMerger()} * @see {@link Fn.EE#replacingMerger()} * @see {@link Fn.EE#ignoringMerger()} */ public static <K, V> ResultExtractor<Map<K, V>> toMap(final RowMapper<K> keyExtractor, final RowMapper<V> valueExtractor, final Try.BinaryOperator<V, SQLException> mergeFunction) { return toMap(keyExtractor, valueExtractor, mergeFunction, Suppliers.<K, V> ofMap()); } /** * * @param keyExtractor * @param valueExtractor * @param mergeFunction * @param supplier * @return * @see {@link Fn.EE#throwingMerger()} * @see {@link Fn.EE#replacingMerger()} * @see {@link Fn.EE#ignoringMerger()} */ public static <K, V, M extends Map<K, V>> ResultExtractor<M> toMap(final RowMapper<K> keyExtractor, final RowMapper<V> valueExtractor, final Try.BinaryOperator<V, SQLException> mergeFunction, final Supplier<? extends M> supplier) { N.checkArgNotNull(keyExtractor, "keyExtractor"); N.checkArgNotNull(valueExtractor, "valueExtractor"); N.checkArgNotNull(mergeFunction, "mergeFunction"); N.checkArgNotNull(supplier, "supplier"); return new ResultExtractor<M>() { @Override public M extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { final int offset = jdbcSettings.getOffset(); int count = jdbcSettings.getCount(); JdbcUtil.skip(rs, offset); final M result = supplier.get(); while (count-- > 0 && rs.next()) { Maps.merge(result, keyExtractor.apply(rs), valueExtractor.apply(rs), mergeFunction); } return result; } }; } /** * * @param keyExtractor * @param valueExtractor * @param downstream * @return */ public static <K, V, A, D> ResultExtractor<Map<K, D>> toMap(final RowMapper<K> keyExtractor, final RowMapper<V> valueExtractor, final Collector<? super V, A, D> downstream) { return toMap(keyExtractor, valueExtractor, downstream, Suppliers.<K, D> ofMap()); } /** * * @param keyExtractor * @param valueExtractor * @param downstream * @param supplier * @return */ public static <K, V, A, D, M extends Map<K, D>> ResultExtractor<M> toMap(final RowMapper<K> keyExtractor, final RowMapper<V> valueExtractor, final Collector<? super V, A, D> downstream, final Supplier<? extends M> supplier) { N.checkArgNotNull(keyExtractor, "keyExtractor"); N.checkArgNotNull(valueExtractor, "valueExtractor"); N.checkArgNotNull(downstream, "downstream"); N.checkArgNotNull(supplier, "supplier"); return new ResultExtractor<M>() { @Override public M extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { final int offset = jdbcSettings.getOffset(); int count = jdbcSettings.getCount(); JdbcUtil.skip(rs, offset); final Supplier<A> downstreamSupplier = downstream.supplier(); final BiConsumer<A, ? super V> downstreamAccumulator = downstream.accumulator(); final Function<A, D> downstreamFinisher = downstream.finisher(); final M result = supplier.get(); final Map<K, A> tmp = (Map<K, A>) result; K key = null; A container = null; while (count-- > 0 && rs.next()) { key = keyExtractor.apply(rs); container = tmp.get(key); if (container == null) { container = downstreamSupplier.get(); tmp.put(key, container); } downstreamAccumulator.accept(container, valueExtractor.apply(rs)); } for (Map.Entry<K, D> entry : result.entrySet()) { entry.setValue(downstreamFinisher.apply((A) entry.getValue())); } return result; } }; } /** * * @param keyExtractor * @param valueExtractor * @return */ public static <K, V> ResultExtractor<Map<K, V>> toMap(final BiRowMapper<K> keyExtractor, final BiRowMapper<V> valueExtractor) { return toMap(keyExtractor, valueExtractor, Suppliers.<K, V> ofMap()); } /** * * @param keyExtractor * @param valueExtractor * @param supplier * @return */ public static <K, V, M extends Map<K, V>> ResultExtractor<M> toMap(final BiRowMapper<K> keyExtractor, final BiRowMapper<V> valueExtractor, final Supplier<? extends M> supplier) { return toMap(keyExtractor, valueExtractor, FN.throwingMerger(), supplier); } /** * * @param keyExtractor * @param valueExtractor * @param mergeFunction * @return * @see {@link Fn.EE#throwingMerger()} * @see {@link Fn.EE#replacingMerger()} * @see {@link Fn.EE#ignoringMerger()} */ public static <K, V> ResultExtractor<Map<K, V>> toMap(final BiRowMapper<K> keyExtractor, final BiRowMapper<V> valueExtractor, final Try.BinaryOperator<V, SQLException> mergeFunction) { return toMap(keyExtractor, valueExtractor, mergeFunction, Suppliers.<K, V> ofMap()); } /** * * @param keyExtractor * @param valueExtractor * @param mergeFunction * @param supplier * @return * @see {@link Fn.EE#throwingMerger()} * @see {@link Fn.EE#replacingMerger()} * @see {@link Fn.EE#ignoringMerger()} */ public static <K, V, M extends Map<K, V>> ResultExtractor<M> toMap(final BiRowMapper<K> keyExtractor, final BiRowMapper<V> valueExtractor, final Try.BinaryOperator<V, SQLException> mergeFunction, final Supplier<? extends M> supplier) { N.checkArgNotNull(keyExtractor, "keyExtractor"); N.checkArgNotNull(valueExtractor, "valueExtractor"); N.checkArgNotNull(mergeFunction, "mergeFunction"); N.checkArgNotNull(supplier, "supplier"); return new ResultExtractor<M>() { @Override public M extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { final int offset = jdbcSettings.getOffset(); int count = jdbcSettings.getCount(); JdbcUtil.skip(rs, offset); final List<String> columnLabels = JdbcUtil.getColumnLabelList(rs); final M result = supplier.get(); while (count-- > 0 && rs.next()) { Maps.merge(result, keyExtractor.apply(rs, columnLabels), valueExtractor.apply(rs, columnLabels), mergeFunction); } return result; } }; } /** * * @param keyExtractor * @param valueExtractor * @param downstream * @return */ public static <K, V, A, D> ResultExtractor<Map<K, D>> toMap(final BiRowMapper<K> keyExtractor, final BiRowMapper<V> valueExtractor, final Collector<? super V, A, D> downstream) { return toMap(keyExtractor, valueExtractor, downstream, Suppliers.<K, D> ofMap()); } /** * * @param keyExtractor * @param valueExtractor * @param downstream * @param supplier * @return */ public static <K, V, A, D, M extends Map<K, D>> ResultExtractor<M> toMap(final BiRowMapper<K> keyExtractor, final BiRowMapper<V> valueExtractor, final Collector<? super V, A, D> downstream, final Supplier<? extends M> supplier) { N.checkArgNotNull(keyExtractor, "keyExtractor"); N.checkArgNotNull(valueExtractor, "valueExtractor"); N.checkArgNotNull(downstream, "downstream"); N.checkArgNotNull(supplier, "supplier"); return new ResultExtractor<M>() { @Override public M extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { final int offset = jdbcSettings.getOffset(); int count = jdbcSettings.getCount(); JdbcUtil.skip(rs, offset); final Supplier<A> downstreamSupplier = downstream.supplier(); final BiConsumer<A, ? super V> downstreamAccumulator = downstream.accumulator(); final Function<A, D> downstreamFinisher = downstream.finisher(); final List<String> columnLabels = JdbcUtil.getColumnLabelList(rs); final M result = supplier.get(); final Map<K, A> tmp = (Map<K, A>) result; K key = null; A container = null; while (count-- > 0 && rs.next()) { key = keyExtractor.apply(rs, columnLabels); container = tmp.get(key); if (container == null) { container = downstreamSupplier.get(); tmp.put(key, container); } downstreamAccumulator.accept(container, valueExtractor.apply(rs, columnLabels)); } for (Map.Entry<K, D> entry : result.entrySet()) { entry.setValue(downstreamFinisher.apply((A) entry.getValue())); } return result; } }; } public static <K, V> ResultExtractor<Map<K, List<V>>> groupTo(final RowMapper<K> keyExtractor, final RowMapper<V> valueExtractor) { return groupTo(keyExtractor, valueExtractor, Suppliers.<K, List<V>> ofMap()); } public static <K, V, M extends Map<K, List<V>>> ResultExtractor<M> groupTo(final RowMapper<K> keyExtractor, final RowMapper<V> valueExtractor, final Supplier<? extends M> supplier) { N.checkArgNotNull(keyExtractor, "keyExtractor"); N.checkArgNotNull(valueExtractor, "valueExtractor"); N.checkArgNotNull(supplier, "supplier"); return new ResultExtractor<M>() { @Override public M extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { final int offset = jdbcSettings.getOffset(); int count = jdbcSettings.getCount(); JdbcUtil.skip(rs, offset); final M result = supplier.get(); K key = null; List<V> value = null; while (count-- > 0 && rs.next()) { key = keyExtractor.apply(rs); value = result.get(key); if (value == null) { value = new ArrayList<>(); result.put(key, value); } value.add(valueExtractor.apply(rs)); } return result; } }; } public static <K, V> ResultExtractor<Map<K, List<V>>> groupTo(final BiRowMapper<K> keyExtractor, final BiRowMapper<V> valueExtractor) { return groupTo(keyExtractor, valueExtractor, Suppliers.<K, List<V>> ofMap()); } public static <K, V, M extends Map<K, List<V>>> ResultExtractor<M> groupTo(final BiRowMapper<K> keyExtractor, final BiRowMapper<V> valueExtractor, final Supplier<? extends M> supplier) { N.checkArgNotNull(keyExtractor, "keyExtractor"); N.checkArgNotNull(valueExtractor, "valueExtractor"); N.checkArgNotNull(supplier, "supplier"); return new ResultExtractor<M>() { @Override public M extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { final int offset = jdbcSettings.getOffset(); int count = jdbcSettings.getCount(); JdbcUtil.skip(rs, offset); final List<String> columnLabels = JdbcUtil.getColumnLabelList(rs); final M result = supplier.get(); K key = null; List<V> value = null; while (count-- > 0 && rs.next()) { key = keyExtractor.apply(rs, columnLabels); value = result.get(key); if (value == null) { value = new ArrayList<>(); result.put(key, value); } value.add(valueExtractor.apply(rs, columnLabels)); } return result; } }; } } /** * Refer to http://landawn.com/introduction-to-jdbc.html about how to read columns/rows from <code>java.sql.ResultSet</code> * * @author Haiyang Li * */ static interface ResultSetExtractor<T> { public T extractData(final Class<?> targetClass, final NamedSQL namedSQL, final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException; } public static abstract class AbstractStatementSetter implements StatementSetter { @SuppressWarnings("rawtypes") @Override public void setParameters(final NamedSQL namedSQL, final PreparedStatement stmt, final Object... parameters) throws SQLException { final int parameterCount = namedSQL.getParameterCount(); if (parameterCount == 0) { return; } else if (N.isNullOrEmpty(parameters)) { throw new IllegalArgumentException( "The count of parameter in sql is: " + namedSQL.getParameterCount() + ". But the specified parameters is null or empty"); } Object[] parameterValues = null; Type[] parameterTypes = null; if (isEntityOrMapParameter(namedSQL, parameters)) { final List<String> namedParameters = namedSQL.getNamedParameters(); Object parameter_0 = parameters[0]; parameterValues = new Object[parameterCount]; if (parameter_0 instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> m = (Map<String, Object>) parameter_0; for (int i = 0; i < parameterCount; i++) { parameterValues[i] = m.get(namedParameters.get(i)); if ((parameterValues[i] == null) && !m.containsKey(namedParameters.get(i))) { throw new IllegalArgumentException("Parameter for property '" + namedParameters.get(i) + "' is missed"); } } } else { final Object entity = parameter_0; final Class<?> cls = entity.getClass(); final EntityInfo entityInfo = ParserUtil.getEntityInfo(cls); parameterTypes = new Type[parameterCount]; PropInfo propInfo = null; for (int i = 0; i < parameterCount; i++) { propInfo = entityInfo.getPropInfo(namedParameters.get(i)); if (propInfo == null) { throw new IllegalArgumentException("Parameter for property '" + namedParameters.get(i) + "' is missed"); } parameterValues[i] = propInfo.getPropValue(entity); parameterTypes[i] = propInfo.dbType; } } } else { parameterValues = getParameterValues(namedSQL, parameters); } setParameters(stmt, parameterCount, parameterValues, parameterTypes); } @SuppressWarnings("rawtypes") protected abstract void setParameters(PreparedStatement stmt, int parameterCount, Object[] parameters, Type[] parameterTypes) throws SQLException; protected Object[] getParameterValues(final NamedSQL namedSQL, final Object... parameters) { if ((parameters.length == 1) && (parameters[0] != null)) { if (parameters[0] instanceof Object[] && ((((Object[]) parameters[0]).length) >= namedSQL.getParameterCount())) { return (Object[]) parameters[0]; } else if (parameters[0] instanceof List && (((List<?>) parameters[0]).size() >= namedSQL.getParameterCount())) { final Collection<?> c = (Collection<?>) parameters[0]; return c.toArray(new Object[c.size()]); } } return parameters; } } public static abstract class AbstractResultExtractor<T> implements ResultExtractor<T> { @Override public T extractData(final ResultSet rs, final JdbcSettings jdbcSettings) throws SQLException { return convert(JdbcUtil.extractData(rs, jdbcSettings.getOffset(), jdbcSettings.getCount(), false)); } protected abstract T convert(final DataSet dataSet); } public static class JdbcSettings { public static final String DEFAULT_GENERATED_ID_PROP_NAME = "id"; public static final int DEFAULT_BATCH_SIZE = 200; public static final int DEFAULT_NO_GENERATED_KEYS = Statement.NO_GENERATED_KEYS; public static final int DEFAULT_FETCH_DIRECTION = ResultSet.FETCH_FORWARD; public static final int DEFAULT_RESULT_SET_TYPE = ResultSet.TYPE_FORWARD_ONLY; public static final int DEFAULT_RESULT_SET_CONCURRENCY = ResultSet.CONCUR_READ_ONLY; public static final int DEFAULT_RESULT_SET_HOLDABILITY = ResultSet.HOLD_CURSORS_OVER_COMMIT; private boolean logSQL = false; private boolean logSQLWithParameters = false; private int batchSize = -1; private int queryTimeout = -1; private boolean autoGeneratedKeys = false; private int[] returnedColumnIndexes = null; private String[] returnedColumnNames = null; private int maxRows = -1; private int maxFieldSize = -1; private int fetchSize = -1; private int fetchDirection = -1; private int resultSetType = -1; private int resultSetConcurrency = -1; private int resultSetHoldability = -1; private int offset = 0; private int count = Integer.MAX_VALUE; private String queryWithDataSource; private Collection<String> queryWithDataSources; private boolean queryInParallel = false; private IsolationLevel isolationLevel = null; private boolean streamTransactionIndependent = false; private boolean fozen = false; protected JdbcSettings() { } public static JdbcSettings create() { return new JdbcSettings(); } public JdbcSettings copy() { JdbcSettings copy = new JdbcSettings(); copy.logSQL = this.logSQL; copy.batchSize = this.batchSize; copy.queryTimeout = this.queryTimeout; copy.autoGeneratedKeys = this.autoGeneratedKeys; copy.returnedColumnIndexes = (this.returnedColumnIndexes == null) ? null : N.copyOf(this.returnedColumnIndexes, this.returnedColumnIndexes.length); copy.returnedColumnNames = (this.returnedColumnNames == null) ? null : N.copyOf(this.returnedColumnNames, this.returnedColumnNames.length); copy.maxRows = this.maxRows; copy.maxFieldSize = this.maxFieldSize; copy.fetchSize = this.fetchSize; copy.fetchDirection = this.fetchDirection; copy.resultSetType = this.resultSetType; copy.resultSetConcurrency = this.resultSetConcurrency; copy.resultSetHoldability = this.resultSetHoldability; copy.offset = this.offset; copy.count = this.count; copy.queryWithDataSource = this.queryWithDataSource; copy.queryWithDataSources = this.queryWithDataSources == null ? null : new ArrayList<>(this.queryWithDataSources); copy.queryInParallel = this.queryInParallel; copy.isolationLevel = this.isolationLevel; copy.streamTransactionIndependent = this.streamTransactionIndependent; return copy; } public boolean isLogSQL() { return logSQL; } public JdbcSettings setLogSQL(final boolean logSQL) { assertNotFrozen(); this.logSQL = logSQL; return this; } public boolean isLogSQLWithParameters() { return logSQLWithParameters; } public JdbcSettings setLogSQLWithParameters(final boolean logSQLWithParameters) { assertNotFrozen(); this.logSQLWithParameters = logSQLWithParameters; return this; } public int getBatchSize() { return batchSize; } public JdbcSettings setBatchSize(final int batchSize) { assertNotFrozen(); this.batchSize = batchSize; return this; } public int getQueryTimeout() { return queryTimeout; } public JdbcSettings setQueryTimeout(final int queryTimeout) { assertNotFrozen(); this.queryTimeout = queryTimeout; return this; } public boolean isAutoGeneratedKeys() { return autoGeneratedKeys; } public JdbcSettings setAutoGeneratedKeys(final boolean autoGeneratedKeys) { assertNotFrozen(); this.autoGeneratedKeys = autoGeneratedKeys; return this; } /** * * @return * @see {@link Connection#prepareStatement(String, int[])} */ public int[] getReturnedColumnIndexes() { return returnedColumnIndexes; } /** * * @param columnIndexes * @return * @see {@link Connection#prepareStatement(String, int[])} */ public JdbcSettings setReturnedColumnIndexes(final int[] columnIndexes) { assertNotFrozen(); this.returnedColumnIndexes = columnIndexes; return this; } /** * * @return * @see {@link Connection#prepareStatement(String, String[])} */ public String[] getReturnedColumnNames() { return returnedColumnNames; } /** * * @param columnNames * @return * @see {@link Connection#prepareStatement(String, String[])} */ public JdbcSettings setReturnedColumnNames(final String[] columnNames) { assertNotFrozen(); this.returnedColumnNames = columnNames; return this; } public int getMaxRows() { return maxRows; } public JdbcSettings setMaxRows(final int maxRows) { assertNotFrozen(); this.maxRows = maxRows; return this; } public int getMaxFieldSize() { return maxFieldSize; } public JdbcSettings setMaxFieldSize(final int maxFieldSize) { assertNotFrozen(); this.maxFieldSize = maxFieldSize; return this; } public int getFetchSize() { return fetchSize; } public JdbcSettings setFetchSize(final int fetchSize) { assertNotFrozen(); this.fetchSize = fetchSize; return this; } public int getFetchDirection() { return fetchDirection; } public JdbcSettings setFetchDirection(final int fetchDirection) { assertNotFrozen(); this.fetchDirection = fetchDirection; return this; } public int getResultSetType() { return resultSetType; } public JdbcSettings setResultSetType(final int resultSetType) { assertNotFrozen(); this.resultSetType = resultSetType; return this; } public int getResultSetConcurrency() { return resultSetConcurrency; } public JdbcSettings setResultSetConcurrency(final int resultSetConcurrency) { assertNotFrozen(); this.resultSetConcurrency = resultSetConcurrency; return this; } public int getResultSetHoldability() { return resultSetHoldability; } public JdbcSettings setResultSetHoldability(final int resultSetHoldability) { assertNotFrozen(); this.resultSetHoldability = resultSetHoldability; return this; } public int getOffset() { return offset; } public JdbcSettings setOffset(final int offset) { assertNotFrozen(); this.offset = offset; return this; } public int getCount() { return count; } public JdbcSettings setCount(final int count) { assertNotFrozen(); this.count = count; return this; } public String getQueryWithDataSource() { return queryWithDataSource; } public JdbcSettings setQueryWithDataSource(final String queryWithDataSource) { assertNotFrozen(); this.queryWithDataSource = queryWithDataSource; return this; } public Collection<String> getQueryWithDataSources() { return queryWithDataSources; } public JdbcSettings setQueryWithDataSources(final Collection<String> queryWithDataSources) { assertNotFrozen(); this.queryWithDataSources = queryWithDataSources; return this; } public boolean isQueryInParallel() { return queryInParallel; } public JdbcSettings setQueryInParallel(final boolean queryInParallel) { assertNotFrozen(); this.queryInParallel = queryInParallel; return this; } public IsolationLevel getIsolationLevel() { return isolationLevel; } public JdbcSettings setIsolationLevel(IsolationLevel isolationLevel) { assertNotFrozen(); this.isolationLevel = isolationLevel; return this; } /** * * @return */ boolean streamTransactionIndependent() { return streamTransactionIndependent; } /** * {@code streamTransactionIndependent = true} means the query executed by {@code stream/streamAll(...)} methods won't be in any transaction(using connection started by transaction), even the {@code stream/streamAll(...)} methods are invoked inside of a transaction block. * * @param streamTransactionIndependent * @return */ JdbcSettings setStreamTransactionIndependent(final boolean streamTransactionIndependent) { assertNotFrozen(); this.streamTransactionIndependent = streamTransactionIndependent; return this; } void freeze() { fozen = true; } void assertNotFrozen() { if (fozen) { throw new AbacusException("It's finalized. No change is allowed"); } } @Override public int hashCode() { final int prime = 31; int result = 1; result = (prime * result) + (logSQL ? 1231 : 1237); result = (prime * result) + (logSQLWithParameters ? 1231 : 1237); result = (prime * result) + batchSize; result = (prime * result) + queryTimeout; result = (prime * result) + (autoGeneratedKeys ? 1231 : 1237); result = (prime * result) + Arrays.hashCode(returnedColumnIndexes); result = (prime * result) + Arrays.hashCode(returnedColumnNames); result = (prime * result) + maxRows; result = (prime * result) + maxFieldSize; result = (prime * result) + fetchSize; result = (prime * result) + fetchDirection; result = (prime * result) + resultSetType; result = (prime * result) + resultSetConcurrency; result = (prime * result) + resultSetHoldability; result = (prime * result) + offset; result = (prime * result) + count; result = (prime * result) + ((queryWithDataSource == null) ? 0 : queryWithDataSource.hashCode()); result = (prime * result) + ((queryWithDataSources == null) ? 0 : queryWithDataSources.hashCode()); result = (prime * result) + (queryInParallel ? 1231 : 1237); result = (prime * result) + ((isolationLevel == null) ? 0 : isolationLevel.hashCode()); result = (prime * result) + (streamTransactionIndependent ? 1231 : 1237); return result; } @Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj instanceof JdbcSettings) { JdbcSettings other = (JdbcSettings) obj; return N.equals(logSQL, other.logSQL) && N.equals(logSQLWithParameters, other.logSQLWithParameters) && N.equals(batchSize, other.batchSize) && N.equals(queryTimeout, other.queryTimeout) && N.equals(autoGeneratedKeys, other.autoGeneratedKeys) && N.equals(returnedColumnIndexes, other.returnedColumnIndexes) && N.equals(returnedColumnNames, other.returnedColumnNames) && N.equals(maxRows, other.maxRows) && N.equals(maxFieldSize, other.maxFieldSize) && N.equals(fetchSize, other.fetchSize) && N.equals(fetchDirection, other.fetchDirection) && N.equals(resultSetType, other.resultSetType) && N.equals(resultSetConcurrency, other.resultSetConcurrency) && N.equals(resultSetHoldability, other.resultSetHoldability) && N.equals(offset, other.offset) && N.equals(count, other.count) && N.equals(queryWithDataSource, other.queryWithDataSource) && N.equals(queryWithDataSources, other.queryWithDataSources) && N.equals(queryInParallel, other.queryInParallel) && N.equals(isolationLevel, other.isolationLevel) && N.equals(streamTransactionIndependent, other.streamTransactionIndependent); } return false; } @Override public String toString() { return "{logSQL=" + logSQL + ", logSQLWithParameters=" + logSQLWithParameters + ", batchSize=" + batchSize + ", queryTimeout=" + queryTimeout + ", autoGeneratedKeys=" + autoGeneratedKeys + ", returnedColumnIndexes=" + N.toString(returnedColumnIndexes) + ", returnedColumnNames=" + N.toString(returnedColumnNames) + ", maxRows=" + maxRows + ", maxFieldSize=" + maxFieldSize + ", fetchSize=" + fetchSize + ", fetchDirection=" + fetchDirection + ", resultSetType=" + resultSetType + ", resultSetConcurrency=" + resultSetConcurrency + ", resultSetHoldability=" + resultSetHoldability + ", offset=" + offset + ", count=" + count + ", queryWithDataSource=" + queryWithDataSource + ", queryWithDataSources=" + queryWithDataSources + ", queryInParallel=" + queryInParallel + ", isolationLevel=" + isolationLevel + ", streamTransactionIndependent=" + streamTransactionIndependent + "}"; } } }
apache-2.0
dvamedveda/b.savelev
chapter_002/src/main/java/ru/job4j/professions/common/Diagnose.java
167
package ru.job4j.professions.common; /** * Вспомогательный класс, обозначащий диагноз врача */ public class Diagnose { }
apache-2.0
lyncode/sushi-counter
sushi-counter-core/src/main/java/org/niso/schemas/counter/Contact.java
769
package org.niso.schemas.counter; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlType; @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "Contact", propOrder = { "contact", "eMail" }) public class Contact { @XmlElement(name = "Contact") protected String contact; @XmlElement(name = "E-mail") protected String eMail; public String getContact() { return contact; } public void setContact(String value) { this.contact = value; } public String getEMail() { return eMail; } public void setEMail(String value) { this.eMail = value; } }
apache-2.0
akbrant/javaschtasks.batchjob
java_schtasks/src/main/java/javaschtasks/batchjob/ReadXMLFile.java
6302
package javaschtasks.batchjob; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.DocumentBuilder; import javax.xml.transform.OutputKeys; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerConfigurationException; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; import org.apache.log4j.Logger; import org.w3c.dom.Document; import org.w3c.dom.NamedNodeMap; import org.w3c.dom.NodeList; import org.w3c.dom.Node; import java.io.BufferedReader; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.util.ArrayList; import java.util.List; public class ReadXMLFile { private static Logger logger = Logger.getLogger(ReadXMLFile.class.getName()); //make task object and return it. static Taskobj task = new Taskobj(); public static void main(String args[]) throws IOException, InterruptedException { //ReadXMLFile.listFilesForFolder(folder); ReadXMLFile.parseSASargs("-SYSIN \"X:\\CP\\sash\\sampleFiles\\R341.SAS\" -SYSPARM \"2055 2010\" -LOG G:\\PRINT -PRINT G:\\PRINT"); //ReadXMLFile.readxmlString(null); //ReadXMLFile.parsetasksfromXML(null); } public static List<String> parseSASargs(String sasargs){ List<String> allparms = new ArrayList<String>(); logger.debug(sasargs.toUpperCase()); logger.debug(sasargs.contains("-SYSPARM")); if(sasargs.toUpperCase().contains("-SYSPARM")){ for (String retval: sasargs.split("\"")){ logger.debug(retval); allparms.add(retval); } } return allparms; } public static boolean readxmlString(String xmlstr, String newargs) { try { DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder dBuilder = dbFactory.newDocumentBuilder(); File file = new File(xmlstr); Document doc = dBuilder.parse(file); doc.getDocumentElement().normalize(); System.out.println("Root element :" + doc.getDocumentElement().getNodeName()); if (doc.hasChildNodes()) { task.setFilename(xmlstr); Taskobj atask = printNote(doc.getChildNodes(), newargs ); System.out.println(atask); } if(newargs != null && newargs.length() > 1) { testwrite(xmlstr, doc); //rights out the new args } return true; } catch (Exception e) { logger.error(e); System.out.println("Not parsable xml: " + xmlstr); //e.printStackTrace(); } return false; } private static void testwrite(String xmlstr, Document doc) throws TransformerException, IOException{ // write the content into xml file TransformerFactory transformerFactory = TransformerFactory.newInstance(); Transformer transformer = transformerFactory.newTransformer(); transformer.setOutputProperty(OutputKeys.METHOD, "xml"); //transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-16LE"); //almost works but is missing the byte order marker transformer.setOutputProperty(OutputKeys.ENCODING, "UnicodeLittle"); //holy crap we got it! DOMSource source = new DOMSource(doc); FileOutputStream result = new FileOutputStream(new File(xmlstr)); // Output to console for testing // StreamResult result = new StreamResult(System.out); transformer.transform(source, new StreamResult(result)); result.close(); } private static Taskobj printNote(NodeList nodeList, String newargs) { for (int count = 0; count < nodeList.getLength(); count++) { Node tempNode = nodeList.item(count); // make sure it's element node. if (tempNode.getNodeType() == Node.ELEMENT_NODE) { // get node name and value //System.out.println("\nNode Name =" + tempNode.getNodeName() + " [OPEN]"); //System.out.println("Node Value =" + tempNode.getTextContent()); if(tempNode.getNodeName().equals("Author")) { task.setAuthor(tempNode.getTextContent()); } if(tempNode.getNodeName().equals("Description")) { task.setDescrip(tempNode.getTextContent()); } if(tempNode.getNodeName().equals("UserId")) { task.setUserID(tempNode.getTextContent()); } if(tempNode.getNodeName().equals("Command")) { task.setCommand(tempNode.getTextContent()); } if(tempNode.getNodeName().equals("Arguments")) { if(newargs != null && newargs.length() > 1) { tempNode.setTextContent(newargs); } task.setArgs(tempNode.getTextContent()); } if(tempNode.getNodeName().equals("WorkingDirectory")) { task.setWorkDir(tempNode.getTextContent()); } if (tempNode.hasAttributes()) { // get attributes names and values NamedNodeMap nodeMap = tempNode.getAttributes(); for (int i = 0; i < nodeMap.getLength(); i++) { Node node = nodeMap.item(i); //System.out.println("attr name : " + node.getNodeName()); //System.out.println("attr value : " + node.getNodeValue()); } } if (tempNode.hasChildNodes()) { // loop again if has child nodes printNote(tempNode.getChildNodes(), newargs); } //System.out.println("Node Name =" + tempNode.getNodeName() + " [CLOSE]"); } } return task; } public static List<String> readFile(String filepath){ List<String> allLines = new ArrayList<String>(); try (BufferedReader br = new BufferedReader(new FileReader(filepath))) { String sCurrentLine; while ((sCurrentLine = br.readLine()) != null) { allLines.add(sCurrentLine); } } catch (IOException e) { e.printStackTrace(); } return allLines; } public static List<File> listFilesForFolder(final File folder) { List<File> allfiles = new ArrayList<File>(); for (final File fileEntry : folder.listFiles()) { if (fileEntry.isDirectory()) { listFilesForFolder(fileEntry); } else { allfiles.add(fileEntry); } } return allfiles; } }
apache-2.0
thuongleit/dynamic-recycler-adapter
sample/src/main/java/me/thuongle/adaptersample/view/main/PresenterImpl.java
3921
package me.thuongle.adaptersample.view.main; import android.support.annotation.NonNull; import android.util.Log; import me.thuongle.adaptersample.BR; import me.thuongle.adaptersample.data.Task; import me.thuongle.adaptersample.data.TaskRepository; import me.thuongle.dynamicadapter.handler.ChildItemsClickBinder; import me.thuongle.dynamicadapter.handler.ClickHandler; import me.thuongle.dynamicadapter.handler.ItemTouchHandler; import me.thuongle.dynamicadapter.handler.LongClickHandler; import java.util.Arrays; import java.util.List; import rx.Subscription; import rx.android.schedulers.AndroidSchedulers; import rx.schedulers.Schedulers; import rx.subscriptions.CompositeSubscription; /** * Created by thuongleit on 8/11/16. */ public class PresenterImpl implements MainContract.Presenter { private static final String TAG = PresenterImpl.class.getSimpleName(); @NonNull private MainContract.View mView; @NonNull private final MainViewModel mViewModel; @NonNull private final TaskRepository mTaskRepository; @NonNull private CompositeSubscription mSubscriptions; private Subscription mPendingSubscription; public PresenterImpl(@NonNull MainContract.View view, @NonNull MainViewModel viewModel, @NonNull TaskRepository taskRepository) { this.mView = view; this.mViewModel = viewModel; this.mTaskRepository = taskRepository; mSubscriptions = new CompositeSubscription(); } @Override public void subscribe() { mSubscriptions.add(getTasks()); } @Override public void unsubscribe() { mSubscriptions.clear(); } @Override public void onDestroy() { mSubscriptions = null; mView = null; } public ClickHandler<Task> clickHandler = model -> mView.onItemClick(model); public LongClickHandler<Task> longClickHandler = model -> mView.onItemLongClick(model); public ItemTouchHandler<Task> itemTouchHandler = new ItemTouchHandler<Task>() { @Override public void onItemMove(int position, Task model) { } @Override public void onItemDismiss(int position, Task model) { try { final Task clone = (Task) model.clone(); mView.addPendingRemove(position, clone); } catch (CloneNotSupportedException e) { Log.e(TAG, e.getMessage(), e); return; } mViewModel.removeItem(model); } }; public ChildItemsClickBinder<Task> childItemsClickBinder = new ChildItemsClickBinder<Task>() { @Override public int getBindingVariable() { return BR.childHandlers; } @Override public List<ClickHandler<Task>> clickHandlers() { ClickHandler<Task> clickHandler1 = model -> { mView.onChildItem1Click(model); mViewModel.removeItem(model); }; ClickHandler<Task> clickHandler2 = model -> mView.onChildItem2Click(model); return Arrays.asList(clickHandler1, clickHandler2); } }; public Subscription getTasks() { mViewModel.loading(); return mTaskRepository .getTasks() .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(mViewModel::setData); } @Override public void createTask() { mSubscriptions.add(mTaskRepository .createTask() .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(item -> { mViewModel.addItem(item); mView.onItemCreated(item); })); } @Override public void undoRemovedProduct(int position, Task task) { mViewModel.addItem(position, task); } }
apache-2.0
google/vpn-libraries
krypton/tunnel_manager_test.cc
14430
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the ); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "privacy/net/krypton/tunnel_manager.h" #include <memory> #include "privacy/net/brass/rpc/brass.proto.h" #include "privacy/net/krypton/add_egress_request.h" #include "privacy/net/krypton/add_egress_response.h" #include "privacy/net/krypton/pal/mock_vpn_service_interface.h" #include "privacy/net/krypton/proto/tun_fd_data.proto.h" #include "privacy/net/krypton/test_packet_pipe.h" #include "privacy/net/krypton/utils/ip_range.h" #include "privacy/net/krypton/utils/status.h" #include "testing/base/public/gmock.h" #include "testing/base/public/gunit.h" #include "third_party/absl/types/optional.h" namespace privacy { namespace krypton { namespace { constexpr auto tunnel_data_string = R"pb(is_metered: false)pb"; using ::testing::_; using ::testing::status::IsOk; using ::testing::status::IsOkAndHolds; // Helper macro for returning a PacketPipe wrapping a file descriptor, since // it's complicated and used in many locations. #define RETURN_TEST_PIPE(id) \ ::testing::Return(testing::ByMove(std::make_unique<TestPacketPipe>(id))) class TunnelManagerTest : public ::testing::Test { public: void SetUp() override {} void TearDown() override {} TunFdData buildTunFdData() { TunFdData tunnel_data_; tunnel_data_.set_is_metered(false); return tunnel_data_; } protected: MockVpnService vpn_service_; }; TEST_F(TunnelManagerTest, StartWithSafeDisconnectDisabledNoTunnel) { auto tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(_)).Times(0); ASSERT_OK(tunnel_manager.Start()); } TEST_F(TunnelManagerTest, StartWithSafeDisconnectEnabledNoTunnel) { auto tunnel_manager = TunnelManager(&vpn_service_, true); EXPECT_CALL(vpn_service_, CreateTunnel(_)).Times(0); ASSERT_OK(tunnel_manager.Start()); } TEST_F(TunnelManagerTest, StartSessionWithSafeDisconnectDisabledThenCreateTunnel) { int tun_fd = 0x1000; TunnelManager tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); } TEST_F(TunnelManagerTest, TunnelOutlivesSessionWhenSafeDisconnectEnabled) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(true); tunnel_manager.TerminateSession(/*forceFailOpen=*/false); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); EXPECT_TRUE(tunnel_manager.IsTunnelActive()); } TEST_F(TunnelManagerTest, TunnelAndSessionDieWhenSafeDisconnectDisabled) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.TerminateSession(/*forceFailOpen=*/false); EXPECT_EQ(tunnel_manager.active_tunnel_test_only(), nullptr); EXPECT_FALSE(tunnel_manager.IsTunnelActive()); } TEST_F(TunnelManagerTest, TunnelUnchangedWhenTogglingSafeDisconnect) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(true); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(false); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); } TEST_F(TunnelManagerTest, GetTunnelReturnsOldTunnelAfterSafeDisconnect) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .Times(1) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(true); tunnel_manager.TerminateSession(/*forceFailOpen=*/false); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); } TEST_F(TunnelManagerTest, CreateNewTunnelAfterStartingWithSafeDisconnectEnabled) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, true); EXPECT_CALL(vpn_service_, CreateTunnel(_)).WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), nullptr); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); } TEST_F(TunnelManagerTest, SessionStartAndStopWithSafeDisconnectOff) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); EXPECT_EQ(tunnel_manager.active_tunnel_test_only(), nullptr); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); EXPECT_TRUE(tunnel_manager.IsTunnelActive()); tunnel_manager.TerminateSession(/*forceFailOpen=*/false); EXPECT_EQ(tunnel_manager.active_tunnel_test_only(), nullptr); EXPECT_FALSE(tunnel_manager.IsTunnelActive()); } TEST_F(TunnelManagerTest, StopClosesActiveTunnel) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); EXPECT_EQ(tunnel_manager.active_tunnel_test_only(), nullptr); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(true); tunnel_manager.Stop(); EXPECT_EQ(tunnel_manager.active_tunnel_test_only(), nullptr); } TEST_F(TunnelManagerTest, DisablingSafeDisconnectClosesTunnelWhenNoActiveSession) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, true); EXPECT_CALL(vpn_service_, CreateTunnel(_)).WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), nullptr); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.TerminateSession(/*forceFailOpen=*/false); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(false); EXPECT_EQ(tunnel_manager.active_tunnel_test_only(), nullptr); } TEST_F(TunnelManagerTest, TunnelAndSessionDieWhenSafeDisconnectOverridden) { int tun_fd = 0x1000; auto tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(true); tunnel_manager.TerminateSession(/*forceFailOpen=*/true); EXPECT_EQ(tunnel_manager.active_tunnel_test_only(), nullptr); EXPECT_FALSE(tunnel_manager.IsTunnelActive()); } TEST_F(TunnelManagerTest, TerminateSessionCalledBeforeStartSessionResultsInInactiveTunnel) { auto tunnel_manager = TunnelManager(&vpn_service_, false); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.SetSafeDisconnectEnabled(true); tunnel_manager.TerminateSession(/*forceFailOpen=*/true); EXPECT_EQ(tunnel_manager.active_tunnel_test_only(), nullptr); EXPECT_FALSE(tunnel_manager.IsTunnelActive()); } TEST_F(TunnelManagerTest, RecreateTunnelReturnsOldTunnel) { int tun_fd = 0x1000; TunnelManager tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(/*enable=*/true); // Snooze will bypass safe disconnect, meaning that the active_tunnel_ is // closed. This simulates snooze. tunnel_manager.TerminateSession(/*forceFailOpen=*/true); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); tunnel_manager.StartSession(); ASSERT_OK(tunnel_manager.RecreateTunnelIfNeeded()); ASSERT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); } TEST_F(TunnelManagerTest, DoNotRecreateTunnelIfOneAlreadyPresent) { int tun_fd = 0x1000; TunnelManager tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(/*enable=*/true); tunnel_manager.TerminateSession(/*forceFailOpen=*/false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .Times(0); tunnel_manager.StartSession(); ASSERT_OK(tunnel_manager.RecreateTunnelIfNeeded()); ASSERT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); } TEST_F(TunnelManagerTest, DoNotRecreateTunnelIfSafeDisconnectNotEnabled) { int tun_fd = 0x1000; TunnelManager tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .WillOnce(RETURN_TEST_PIPE(tun_fd)); ASSERT_OK(tunnel_manager.Start()); tunnel_manager.StartSession(); ASSERT_THAT(tunnel_manager.GetTunnel(buildTunFdData()), IsOkAndHolds(PacketPipeHasFd(tun_fd))); EXPECT_THAT(tunnel_manager.active_tunnel_test_only(), PacketPipeHasFd(tun_fd)); tunnel_manager.SetSafeDisconnectEnabled(/*enable=*/false); tunnel_manager.TerminateSession(/*forceFailOpen=*/false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .Times(0); tunnel_manager.StartSession(); ASSERT_OK(tunnel_manager.RecreateTunnelIfNeeded()); ASSERT_THAT(tunnel_manager.active_tunnel_test_only(), nullptr); } TEST_F(TunnelManagerTest, DoNotRecreateTunnelIfNoActiveTunnelDataAvailable) { TunnelManager tunnel_manager = TunnelManager(&vpn_service_, false); EXPECT_CALL(vpn_service_, CreateTunnel(testing::EqualsProto(tunnel_data_string))) .Times(0); tunnel_manager.StartSession(); ASSERT_OK(tunnel_manager.RecreateTunnelIfNeeded()); ASSERT_THAT(tunnel_manager.active_tunnel_test_only(), nullptr); } } // namespace } // namespace krypton } // namespace privacy
apache-2.0
steven-zou/harbor
src/portal/lib/src/config/vulnerability/vulnerability-config.component.ts
7220
import { Component, Input, Output, EventEmitter, ViewChild, OnInit } from '@angular/core'; import { NgForm } from '@angular/forms'; import { map, catchError } from "rxjs/operators"; import { Observable, throwError as observableThrowError, of } from "rxjs"; import { Configuration } from '../config'; import { ScanningResultService, SystemInfo, SystemInfoService, ConfigurationService } from '../../service/index'; import { ErrorHandler } from '../../error-handler/index'; import { isEmptyObject, clone} from '../../utils'; import { TranslateService } from '@ngx-translate/core'; import { ClairDetail } from '../../service/interface'; import { ScanAllRepoService } from './scanAll.service'; import { OriginCron } from '../../service/interface'; import { CronScheduleComponent } from "../../cron-schedule/cron-schedule.component"; const ONE_HOUR_SECONDS: number = 3600; const ONE_DAY_SECONDS: number = 24 * ONE_HOUR_SECONDS; const SCHEDULE_TYPE_NONE = "None"; @Component({ selector: 'vulnerability-config', templateUrl: './vulnerability-config.component.html', styleUrls: ['./vulnerability-config.component.scss', '../registry-config.component.scss'] }) export class VulnerabilityConfigComponent implements OnInit { onGoing: boolean; _localTime: Date = new Date(); originCron: OriginCron; schedule: any; onSubmitting: boolean = false; config: Configuration; openState: boolean = false; getLabelCurrent: string; @ViewChild(CronScheduleComponent) CronScheduleComponent: CronScheduleComponent; @Input() @Input() showSubTitle: boolean = false; @Input() showScanningNamespaces: boolean = false; @Output() loadingStatus = new EventEmitter<boolean>(); systemInfo: SystemInfo; constructor( // private scanningService: ScanningResultService, private scanningService: ScanAllRepoService, private errorHandler: ErrorHandler, private translate: TranslateService, private systemInfoService: SystemInfoService, private configService: ConfigurationService ) { } get scanAvailable(): boolean { return !this.onSubmitting; } getScanText() { this.translate.get('CONFIG.SCANNING.SCAN_ALL').subscribe((res: string) => { this.getLabelCurrent = res; }); } get updatedTimestamp(): Date { if (this.systemInfo && this.systemInfo.clair_vulnerability_status && this.systemInfo.clair_vulnerability_status.overall_last_update > 0) { return this.convertToLocalTime(this.systemInfo.clair_vulnerability_status.overall_last_update); } return null; } get namespaceTimestamps(): ClairDetail[] { if (this.systemInfo && this.systemInfo.clair_vulnerability_status && this.systemInfo.clair_vulnerability_status.details && this.systemInfo.clair_vulnerability_status.details.length > 0) { return this.systemInfo.clair_vulnerability_status.details; } return []; } getSchedule() { this.onGoing = true; this.scanningService.getSchedule().subscribe(schedule => { this.initSchedule(schedule); this.onGoing = false; this.loadingStatus.emit(this.onGoing); }, error => { this.errorHandler.error(error); this.loadingStatus.emit(this.onGoing); }); } public initSchedule(schedule: any) { if (schedule && schedule.schedule !== null) { this.schedule = schedule; this.originCron = this.schedule.schedule; } else { this.originCron = { type: SCHEDULE_TYPE_NONE, cron: '' }; } } @ViewChild("systemConfigFrom") systemSettingsForm: NgForm; get isValid(): boolean { return this.systemSettingsForm && this.systemSettingsForm.valid; } get isClairDBFullyReady(): boolean { return this.systemInfo && this.systemInfo.clair_vulnerability_status && this.systemInfo.clair_vulnerability_status.overall_last_update > 0; } ngOnInit(): void { this.getSystemInfo(); this.getScanText(); this.getSchedule(); } getSystemInfo(): void { this.systemInfoService.getSystemInfo() .subscribe((info: SystemInfo) => (this.systemInfo = info) , error => this.errorHandler.error(error)); } convertToLocalTime(utcTime: number): Date { let dt: Date = new Date(); dt.setTime(utcTime * 1000); return dt; } scanNow(): void { if (this.onSubmitting) { return; // Aoid duplicated submitting } if (!this.scanAvailable) { return; // Aoid page hacking } this.onSubmitting = true; this.scanningService.manualScan() .subscribe(() => { this.translate.get("CONFIG.SCANNING.TRIGGER_SCAN_ALL_SUCCESS").subscribe((res: string) => { this.errorHandler.info(res); }); // Update system info this.systemInfoService.getSystemInfo() .subscribe(() => { this.onSubmitting = false; }, error => { this.onSubmitting = false; }); } , error => { if (error && error.status && error.status === 412) { this.translate.get("CONFIG.SCANNING.TRIGGER_SCAN_ALL_FAIL", { error: '' + error }).subscribe((res: string) => { this.errorHandler.error(res); }); } else { this.errorHandler.error(error); } this.onSubmitting = false; }); } reset(cron): void { this.schedule = { schedule: { type: this.CronScheduleComponent.scheduleType, cron: cron } }; } scanAll(cron: string): void { let schedule = this.schedule; if (schedule && schedule.schedule && schedule.schedule.type !== SCHEDULE_TYPE_NONE) { this.scanningService.putSchedule(this.CronScheduleComponent.scheduleType, cron) .subscribe(response => { this.translate .get("CONFIG.SAVE_SUCCESS") .subscribe((res) => { this.errorHandler.info(res); this.CronScheduleComponent.resetSchedule(); }); this.reset(cron); }, error => { this.errorHandler.error(error); } ); } else { this.scanningService.postSchedule(this.CronScheduleComponent.scheduleType, cron) .subscribe(response => { this.translate.get("CONFIG.SAVE_SUCCESS").subscribe((res) => { this.errorHandler.info(res); this.CronScheduleComponent.resetSchedule(); }); this.reset(cron); }, error => { this.errorHandler.error(error); } ); } } }
apache-2.0
Epi-Info/Epi-Info-Community-Edition
Epi.Windows.Analysis/Dialogs/RelateDialog.Designer.cs
14082
namespace Epi.Windows.Analysis.Dialogs { partial class RelateDialog { /// <summary> /// Required designer variable. /// </summary> private System.ComponentModel.IContainer components = null; /// <summary> /// Clean up any resources being used. /// </summary> /// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param> protected override void Dispose(bool disposing) { if (disposing && (components != null)) { components.Dispose(); } base.Dispose(disposing); } #region Designer generated code /// <summary> /// Required method for Designer support - do not modify /// the contents of this method with the code editor. /// </summary> private void InitializeComponent() { System.ComponentModel.ComponentResourceManager resources = new System.ComponentModel.ComponentResourceManager(typeof(RelateDialog)); this.btnHelp = new System.Windows.Forms.Button(); this.btnCancel = new System.Windows.Forms.Button(); this.btnOK = new System.Windows.Forms.Button(); this.btnEllipse = new System.Windows.Forms.Button(); this.btnClear = new System.Windows.Forms.Button(); this.cmbDataFormats = new System.Windows.Forms.ComboBox(); this.txtDataSource = new System.Windows.Forms.TextBox(); this.lblDataSource = new System.Windows.Forms.Label(); this.lblDataFormats = new System.Windows.Forms.Label(); this.lblKey = new System.Windows.Forms.Label(); this.cbxUnmatched = new System.Windows.Forms.CheckBox(); this.txtKey = new System.Windows.Forms.TextBox(); this.btnBuildKey = new System.Windows.Forms.Button(); this.gbxShow = new System.Windows.Forms.GroupBox(); this.rbAll = new System.Windows.Forms.RadioButton(); this.rbView = new System.Windows.Forms.RadioButton(); this.lbxDataSourceObjects = new System.Windows.Forms.ListBox(); this.btnSaveOnly = new System.Windows.Forms.Button(); this.gbxShow.SuspendLayout(); this.SuspendLayout(); // // baseImageList // this.baseImageList.ImageStream = ((System.Windows.Forms.ImageListStreamer)(resources.GetObject("baseImageList.ImageStream"))); this.baseImageList.Images.SetKeyName(0, ""); this.baseImageList.Images.SetKeyName(1, ""); this.baseImageList.Images.SetKeyName(2, ""); this.baseImageList.Images.SetKeyName(3, ""); this.baseImageList.Images.SetKeyName(4, ""); this.baseImageList.Images.SetKeyName(5, ""); this.baseImageList.Images.SetKeyName(6, ""); this.baseImageList.Images.SetKeyName(7, ""); this.baseImageList.Images.SetKeyName(8, ""); this.baseImageList.Images.SetKeyName(9, ""); this.baseImageList.Images.SetKeyName(10, ""); this.baseImageList.Images.SetKeyName(11, ""); this.baseImageList.Images.SetKeyName(12, ""); this.baseImageList.Images.SetKeyName(13, ""); this.baseImageList.Images.SetKeyName(14, ""); this.baseImageList.Images.SetKeyName(15, ""); this.baseImageList.Images.SetKeyName(16, ""); this.baseImageList.Images.SetKeyName(17, ""); this.baseImageList.Images.SetKeyName(18, ""); this.baseImageList.Images.SetKeyName(19, ""); this.baseImageList.Images.SetKeyName(20, ""); this.baseImageList.Images.SetKeyName(21, ""); this.baseImageList.Images.SetKeyName(22, ""); this.baseImageList.Images.SetKeyName(23, ""); this.baseImageList.Images.SetKeyName(24, ""); this.baseImageList.Images.SetKeyName(25, ""); this.baseImageList.Images.SetKeyName(26, ""); this.baseImageList.Images.SetKeyName(27, ""); this.baseImageList.Images.SetKeyName(28, ""); this.baseImageList.Images.SetKeyName(29, ""); this.baseImageList.Images.SetKeyName(30, ""); this.baseImageList.Images.SetKeyName(31, ""); this.baseImageList.Images.SetKeyName(32, ""); this.baseImageList.Images.SetKeyName(33, ""); this.baseImageList.Images.SetKeyName(34, ""); this.baseImageList.Images.SetKeyName(35, ""); this.baseImageList.Images.SetKeyName(36, ""); this.baseImageList.Images.SetKeyName(37, ""); this.baseImageList.Images.SetKeyName(38, ""); this.baseImageList.Images.SetKeyName(39, ""); this.baseImageList.Images.SetKeyName(40, ""); this.baseImageList.Images.SetKeyName(41, ""); this.baseImageList.Images.SetKeyName(42, ""); this.baseImageList.Images.SetKeyName(43, ""); this.baseImageList.Images.SetKeyName(44, ""); this.baseImageList.Images.SetKeyName(45, ""); this.baseImageList.Images.SetKeyName(46, ""); this.baseImageList.Images.SetKeyName(47, ""); this.baseImageList.Images.SetKeyName(48, ""); this.baseImageList.Images.SetKeyName(49, ""); this.baseImageList.Images.SetKeyName(50, ""); this.baseImageList.Images.SetKeyName(51, ""); this.baseImageList.Images.SetKeyName(52, ""); this.baseImageList.Images.SetKeyName(53, ""); this.baseImageList.Images.SetKeyName(54, ""); this.baseImageList.Images.SetKeyName(55, ""); this.baseImageList.Images.SetKeyName(56, ""); this.baseImageList.Images.SetKeyName(57, ""); this.baseImageList.Images.SetKeyName(58, ""); this.baseImageList.Images.SetKeyName(59, ""); this.baseImageList.Images.SetKeyName(60, ""); this.baseImageList.Images.SetKeyName(61, ""); this.baseImageList.Images.SetKeyName(62, ""); this.baseImageList.Images.SetKeyName(63, ""); this.baseImageList.Images.SetKeyName(64, ""); this.baseImageList.Images.SetKeyName(65, ""); this.baseImageList.Images.SetKeyName(66, ""); this.baseImageList.Images.SetKeyName(67, ""); this.baseImageList.Images.SetKeyName(68, ""); this.baseImageList.Images.SetKeyName(69, ""); this.baseImageList.Images.SetKeyName(70, ""); this.baseImageList.Images.SetKeyName(71, ""); this.baseImageList.Images.SetKeyName(72, ""); this.baseImageList.Images.SetKeyName(73, ""); this.baseImageList.Images.SetKeyName(74, ""); this.baseImageList.Images.SetKeyName(75, ""); // // btnHelp // resources.ApplyResources(this.btnHelp, "btnHelp"); this.btnHelp.Name = "btnHelp"; this.btnHelp.Click += new System.EventHandler(this.btnHelp_Click); // // btnCancel // this.btnCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel; resources.ApplyResources(this.btnCancel, "btnCancel"); this.btnCancel.Name = "btnCancel"; // // btnOK // resources.ApplyResources(this.btnOK, "btnOK"); this.btnOK.Name = "btnOK"; // // btnEllipse // resources.ApplyResources(this.btnEllipse, "btnEllipse"); this.btnEllipse.Name = "btnEllipse"; this.btnEllipse.Click += new System.EventHandler(this.btnEllipse_Click); // // btnClear // resources.ApplyResources(this.btnClear, "btnClear"); this.btnClear.Name = "btnClear"; this.btnClear.Click += new System.EventHandler(this.btnClear_Click); // // cmbDataFormats // resources.ApplyResources(this.cmbDataFormats, "cmbDataFormats"); this.cmbDataFormats.Name = "cmbDataFormats"; this.cmbDataFormats.SelectedIndexChanged += new System.EventHandler(this.cmbDataFormats_SelectedIndexChanged); // // txtDataSource // resources.ApplyResources(this.txtDataSource, "txtDataSource"); this.txtDataSource.Name = "txtDataSource"; // // lblDataSource // resources.ApplyResources(this.lblDataSource, "lblDataSource"); this.lblDataSource.FlatStyle = System.Windows.Forms.FlatStyle.System; this.lblDataSource.Name = "lblDataSource"; // // lblDataFormats // resources.ApplyResources(this.lblDataFormats, "lblDataFormats"); this.lblDataFormats.FlatStyle = System.Windows.Forms.FlatStyle.System; this.lblDataFormats.Name = "lblDataFormats"; // // lblKey // this.lblKey.FlatStyle = System.Windows.Forms.FlatStyle.System; resources.ApplyResources(this.lblKey, "lblKey"); this.lblKey.Name = "lblKey"; // // cbxUnmatched // resources.ApplyResources(this.cbxUnmatched, "cbxUnmatched"); this.cbxUnmatched.Name = "cbxUnmatched"; // // txtKey // resources.ApplyResources(this.txtKey, "txtKey"); this.txtKey.Name = "txtKey"; // // btnBuildKey // resources.ApplyResources(this.btnBuildKey, "btnBuildKey"); this.btnBuildKey.Name = "btnBuildKey"; this.btnBuildKey.Click += new System.EventHandler(this.btnBuildKey_Click); // // gbxShow // resources.ApplyResources(this.gbxShow, "gbxShow"); this.gbxShow.Controls.Add(this.rbAll); this.gbxShow.Controls.Add(this.rbView); this.gbxShow.FlatStyle = System.Windows.Forms.FlatStyle.System; this.gbxShow.Name = "gbxShow"; this.gbxShow.TabStop = false; // // rbAll // resources.ApplyResources(this.rbAll, "rbAll"); this.rbAll.Name = "rbAll"; this.rbAll.CheckedChanged += new System.EventHandler(this.View_CheckedChanged); // // rbView // this.rbView.Checked = true; resources.ApplyResources(this.rbView, "rbView"); this.rbView.Name = "rbView"; this.rbView.TabStop = true; this.rbView.CheckedChanged += new System.EventHandler(this.View_CheckedChanged); // // lbxDataSourceObjects // resources.ApplyResources(this.lbxDataSourceObjects, "lbxDataSourceObjects"); this.lbxDataSourceObjects.Name = "lbxDataSourceObjects"; this.lbxDataSourceObjects.Sorted = true; this.lbxDataSourceObjects.SelectedIndexChanged += new System.EventHandler(this.lbxDataSourceObjects_SelectedIndexChanged); // // btnSaveOnly // resources.ApplyResources(this.btnSaveOnly, "btnSaveOnly"); this.btnSaveOnly.Name = "btnSaveOnly"; this.btnSaveOnly.UseVisualStyleBackColor = true; // // RelateDialog // resources.ApplyResources(this, "$this"); this.CancelButton = this.btnCancel; this.Controls.Add(this.btnSaveOnly); this.Controls.Add(this.gbxShow); this.Controls.Add(this.lbxDataSourceObjects); this.Controls.Add(this.btnBuildKey); this.Controls.Add(this.txtKey); this.Controls.Add(this.txtDataSource); this.Controls.Add(this.cbxUnmatched); this.Controls.Add(this.lblKey); this.Controls.Add(this.btnHelp); this.Controls.Add(this.btnCancel); this.Controls.Add(this.btnOK); this.Controls.Add(this.btnEllipse); this.Controls.Add(this.btnClear); this.Controls.Add(this.cmbDataFormats); this.Controls.Add(this.lblDataSource); this.Controls.Add(this.lblDataFormats); this.MaximizeBox = false; this.MinimizeBox = false; this.Name = "RelateDialog"; this.ShowIcon = false; this.Load += new System.EventHandler(this.RelateDialog_Load); this.gbxShow.ResumeLayout(false); this.ResumeLayout(false); this.PerformLayout(); } #endregion private System.Windows.Forms.Button btnHelp; private System.Windows.Forms.Button btnCancel; private System.Windows.Forms.Button btnOK; private System.Windows.Forms.Button btnEllipse; private System.Windows.Forms.Button btnClear; private System.Windows.Forms.ComboBox cmbDataFormats; private System.Windows.Forms.TextBox txtDataSource; private System.Windows.Forms.Label lblDataSource; private System.Windows.Forms.Label lblDataFormats; private System.Windows.Forms.Label lblKey; private System.Windows.Forms.CheckBox cbxUnmatched; private System.Windows.Forms.TextBox txtKey; private System.Windows.Forms.Button btnBuildKey; private System.Windows.Forms.GroupBox gbxShow; private System.Windows.Forms.RadioButton rbAll; private System.Windows.Forms.RadioButton rbView; private System.Windows.Forms.ListBox lbxDataSourceObjects; private System.Windows.Forms.Button btnSaveOnly; } }
apache-2.0
bhavyanshu/OpenVID-sys
app/Http/Controllers/OrgController.php
4293
<?php /** * OpenVID-sys * * @copyright 2016 Bhavyanshu Parasher (https://bhavyanshu.me) * @license http://www.apache.org/licenses/LICENSE-2.0.txt Apache License * @link https://bhavyanshu.me/pages/openvid_sys/ */ namespace openvidsys\Http\Controllers; use DB; use Carbon\Carbon; use Validator; use Hash; use File; use Storage; use Illuminate\Support\Facades\Auth; use Illuminate\Foundation\Auth\ThrottlesLogins; use Illuminate\Foundation\Auth\AuthenticatesAndRegistersUsers; use Illuminate\Contracts\Auth\Guard; use Illuminate\Http\Request; use openvidsys\User; use openvidsys\Http\Requests; use openvidsys\Common\Utility; use openvidsys\Model\OrgProfile; use openvidsys\Model\Product; use openvidsys\Http\Controllers\Controller; /** * Controller for org/vendor specific methods */ class OrgController extends Controller { /** * User model instance * * @var User */ protected $user; /** * For Guard * * @var Authenticator */ protected $auth; /** * Create a new controller instance. * * @param Guard $auth * @param User $user * * @return void */ public function __construct(Guard $auth, User $user) { $this->user = $user; $this->auth = $auth; } /** * GET View to display form to register product. * * @return Response */ public function createProduct() { $profile = Utility::getProfile(Auth::user()); return View('users.org.editproduct')->with('product',null)->with('profile', $profile); } /** * GET View to display form for editing registered product * * @param int $productid * * @return Response */ public function editProduct($productid) { $profile = Utility::getProfile(Auth::user()); $product = Product::where('user_p_id','=',Auth::user()->id)->where('p_id','=',$productid)->firstOrFail(); return View('users.org.editproduct')->with('product',$product)->with('profile', $profile); } /** * POST Save project related information * * @param Request $request * * @return Response */ public function saveProduct(Request $request) { Utility::killXSS(); if(is_null($request->p_id)) { //create new product $rules = array( 'p_name' => 'required|max:255', 'p_author_name' => 'required|max:255', 'p_author_email' => 'required|min:6|email', 'p_description' => 'required', 'p_url' => 'required', 'p_type' => 'required', ); $validator = Validator::make($request->all(), $rules); if ($validator->passes()) { $p = new Product; $p->user()->associate(Auth::user()); $p->p_name = ucfirst($request->p_name); $p->p_author_name = ucfirst($request->p_author_name); $p->p_author_email = $request->p_author_email; $p->p_description = $request->p_description; $p->p_url = $request->p_url; $p->p_type = $request->p_type; $p->save(); return redirect()->route('dashboard')->with('message','A new product has been added.'); } else { return redirect()->back()->withInput() ->withErrors($validator); } } else { //edit product $rules = array( 'p_id' => 'required', 'p_name' => 'required|max:255', 'p_author_name' => 'required|max:255', 'p_author_email' => 'required|min:6|email', 'p_description' => 'required', 'p_url' => 'required', 'p_type' => 'required', ); $validator = Validator::make($request->all(), $rules); if ($validator->passes()) { $p = Product::where('user_p_id','=',Auth::user()->id)->where('p_id','=',$request->p_id)->firstOrFail(); $p->user()->associate(Auth::user()); $p->p_name = ucfirst($request->p_name); $p->p_author_name = ucfirst($request->p_author_name); $p->p_author_email = $request->p_author_email; $p->p_description = $request->p_description; $p->p_url = $request->p_url; $p->p_type = $request->p_type; $p->save(); return redirect()->route('dashboard')->with('message','The product information has been added.'); } else { return redirect()->back()->withInput() ->withErrors($validator); } } } }
apache-2.0
fdecampredon/jsx-typescript-old-version
tests/baselines/reference/genericArrayMethods1.js
58
//// [genericArrayMethods1.js] var x = [0, 1].slice(0);
apache-2.0
ViDA-NYU/ache
ache/src/main/java/achecrawler/crawler/crawlercommons/fetcher/http/LocalCookieStore.java
4070
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package achecrawler.crawler.crawlercommons.fetcher.http; import java.io.Serializable; import java.util.ArrayList; import java.util.Date; import java.util.Iterator; import java.util.List; import java.util.TreeSet; //import org.apache.http.annotation.NotThreadSafe; import org.apache.http.client.CookieStore; import org.apache.http.cookie.Cookie; import org.apache.http.cookie.CookieIdentityComparator; /** * Default implementation of {@link CookieStore} Initially copied from * HttpComponents Changes: removed synchronization * */ //@NotThreadSafe public class LocalCookieStore implements CookieStore, Serializable { private static final long serialVersionUID = -7581093305228232025L; private final TreeSet<Cookie> cookies; public LocalCookieStore() { super(); this.cookies = new TreeSet<Cookie>(new CookieIdentityComparator()); } /** * Adds an {@link Cookie HTTP cookie}, replacing any existing equivalent * cookies. If the given ookie has already expired it will not be added, but * existing values will still be removed. * * @param cookie * the {@link Cookie cookie} to be added * * @see #addCookies(Cookie[]) * */ public void addCookie(Cookie cookie) { if (cookie != null) { // first remove any old cookie that is equivalent cookies.remove(cookie); if (!cookie.isExpired(new Date())) { cookies.add(cookie); } } } /** * Adds an array of {@link Cookie HTTP cookies}. Cookies are added * individually and in the given array order. If any of the given cookies * has already expired it will not be added, but existing values will still * be removed. * * @param cookies * the {@link Cookie cookies} to be added * * @see #addCookie(Cookie) * */ public void addCookies(Cookie[] cookies) { if (cookies != null) { for (Cookie cooky : cookies) { this.addCookie(cooky); } } } /** * Returns an immutable array of {@link Cookie cookies} that this HTTP state * currently contains. * * @return an array of {@link Cookie cookies}. */ public List<Cookie> getCookies() { // create defensive copy so it won't be concurrently modified return new ArrayList<Cookie>(cookies); } /** * Removes all of {@link Cookie cookies} in this HTTP state that have * expired by the specified {@link java.util.Date date}. * * @return true if any cookies were purged. * * @see Cookie#isExpired(Date) */ public boolean clearExpired(final Date date) { if (date == null) { return false; } boolean removed = false; for (Iterator<Cookie> it = cookies.iterator(); it.hasNext();) { if (it.next().isExpired(date)) { it.remove(); removed = true; } } return removed; } /** * Clears all cookies. */ public void clear() { cookies.clear(); } @Override public String toString() { return cookies.toString(); } }
apache-2.0
Inform-Software/jamps
jamps-core/src/main/java/com/inform/jamps/modeling/QuadraticTerm.java
751
/* * Copyright (C) 2015 The Jamps Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.inform.jamps.modeling; public interface QuadraticTerm { double getCoefficient (); Variable getVariable1 (); Variable getVariable2 (); }
apache-2.0
ConsecroMUD/ConsecroMUD
com/suscipio_solutions/consecro_mud/Behaviors/TaxiBehavior.java
7929
package com.suscipio_solutions.consecro_mud.Behaviors; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import com.suscipio_solutions.consecro_mud.Abilities.interfaces.Ability; import com.suscipio_solutions.consecro_mud.Behaviors.interfaces.Behavior; import com.suscipio_solutions.consecro_mud.Common.interfaces.CMMsg; import com.suscipio_solutions.consecro_mud.Common.interfaces.PhyStats; import com.suscipio_solutions.consecro_mud.Exits.interfaces.Exit; import com.suscipio_solutions.consecro_mud.Items.interfaces.Container; import com.suscipio_solutions.consecro_mud.Items.interfaces.Item; import com.suscipio_solutions.consecro_mud.Libraries.interfaces.TrackingLibrary; import com.suscipio_solutions.consecro_mud.Locales.interfaces.Room; import com.suscipio_solutions.consecro_mud.MOBS.interfaces.MOB; import com.suscipio_solutions.consecro_mud.core.CMClass; import com.suscipio_solutions.consecro_mud.core.CMLib; import com.suscipio_solutions.consecro_mud.core.collections.XVector; import com.suscipio_solutions.consecro_mud.core.interfaces.Affectable; import com.suscipio_solutions.consecro_mud.core.interfaces.Environmental; import com.suscipio_solutions.consecro_mud.core.interfaces.PhysicalAgent; import com.suscipio_solutions.consecro_mud.core.interfaces.Rideable; import com.suscipio_solutions.consecro_mud.core.interfaces.Rider; import com.suscipio_solutions.consecro_mud.core.interfaces.Tickable; public class TaxiBehavior extends Concierge { @Override public String ID(){return "TaxiBehavior";} @Override protected int canImproveCode(){return Behavior.CAN_ITEMS|Behavior.CAN_MOBS;} protected final TrackingLibrary.TrackingFlags taxiTrackingFlags = new TrackingLibrary.TrackingFlags().plus(TrackingLibrary.TrackingFlag.NOEMPTYGRIDS); protected volatile Ability isEnRouter = null; protected Room returnToRoom = null; protected Room destRoom = null; protected MOB riderM = null; protected List<Room> trailTo= null; protected List<Rider> defaultRiders = null; @Override protected TrackingLibrary.TrackingFlags getTrackingFlags() { return taxiTrackingFlags; } @Override public String accountForYourself() { return "taking you from here to there"; } @Override protected String getGiveMoneyMessage(Environmental observer, Environmental destination, String moneyName) { if(observer instanceof MOB) return L("I can take you to @x1, but you'll need to give me @x2 first.",getDestinationName(destination),moneyName); else if(observer instanceof Container) return L("I can take you to @x1, but you'll need to put @x2 into @x3 first.",getDestinationName(destination),moneyName,observer.name()); else return L("I can take you to @x1, but you'll need to drop @x2 first.",getDestinationName(destination),moneyName); } @Override protected void giveMerchandise(MOB whoM, Room destR, Environmental observer, Room room) { MOB fromM=getTalker(observer,room); TrackingLibrary.TrackingFlags taxiTrackingFlags = new TrackingLibrary.TrackingFlags() .plus(TrackingLibrary.TrackingFlag.NOEMPTYGRIDS) .plus(TrackingLibrary.TrackingFlag.OPENONLY); if(areaOnly) taxiTrackingFlags=taxiTrackingFlags.plus(TrackingLibrary.TrackingFlag.AREAONLY); final ArrayList<Room> set=new ArrayList<Room>(); CMLib.tracking().getRadiantRooms(fromM.location(),set,getTrackingFlags(),null,maxRange,null); trailTo=CMLib.tracking().findBastardTheBestWay(fromM.location(), destR, taxiTrackingFlags, maxRange); thingsToSay.addElement(whoM,L("OK, we're now on our way to @x1.",getDestinationName(destR))); this.returnToRoom=fromM.location(); this.isEnRouter=CMClass.getAbility("Prop_Adjuster"); this.isEnRouter.setMiscText("sen+"+PhyStats.CAN_NOT_WORK); this.destRoom = destR; this.riderM = whoM; if(observer instanceof Affectable) ((Affectable)observer).addNonUninvokableEffect(this.isEnRouter); } @Override protected boolean disableComingsAndGoings() { return (isEnRouter!=null); } @Override protected final MOB getTalker(Environmental o, Room room) { if(o instanceof Rideable) { if(defaultRiders == null) defaultRiders = new XVector<Rider>(((Rideable)o).riders()); } return super.getTalker(o, room); } private void endTheRide(Environmental observer) { if(this.isEnRouter != null) { final Room room=CMLib.map().roomLocation(observer); MOB conciergeM=this.getTalker(observer,room); if(room==this.destRoom) CMLib.commands().postSay(conciergeM,null,L("We're here. Best of luck!."),false,false); else CMLib.commands().postSay(conciergeM,null,L("This is as far as I can go. Best of luck!."),false,false); Rideable rideable = null; if(observer instanceof Rideable) rideable = (Rideable)observer; else if((observer instanceof Rider)&&(((Rider)observer).riding()!=null)) rideable=((Rider)observer).riding(); if((rideable!=null)&&(room!=null)) { MOB mob=this.getTalker(observer, room); for(final Iterator<Rider> r = rideable.riders(); r.hasNext(); ) { final Rider rider=r.next(); if(!defaultRiders.contains(rider)) { if(rider instanceof MOB) room.show((MOB)rider, rideable, mob, CMMsg.MASK_ALWAYS|CMMsg.MSG_DISMOUNT, "<S-NAME> "+rideable.dismountString(rider)+" from <T-NAME>."); else room.show(mob, rideable, rider, CMMsg.MASK_ALWAYS|CMMsg.MSG_DISMOUNT, "<S-NAME> help(s) <O-NAME> off of <T-NAME>."); rider.setRiding(null); } } } if(returnToRoom != null) { if(observer instanceof MOB) CMLib.tracking().wanderFromTo((MOB)observer, returnToRoom, false ); else if((observer instanceof Item)&&(room != null)) { room.showHappens(CMMsg.MSG_OK_ACTION, observer.name()+" heads off."); returnToRoom.moveItemTo((Item)observer); } } if(isEnRouter != null) { if(isEnRouter.affecting() != null) isEnRouter.affecting().delEffect(isEnRouter); } isEnRouter = null; returnToRoom = null; destRoom = null; trailTo= null; riderM = null; } } @Override public boolean tick(Tickable ticking, int tickID) { if(!super.tick(ticking, tickID)) return false; if((ticking instanceof Environmental) && (isEnRouter != null)) { final Environmental observer=(Environmental)ticking; if((ticking instanceof MOB) && (!super.canFreelyBehaveNormal(ticking))) endTheRide(observer); else { final Room locR=CMLib.map().roomLocation(observer); if(locR==destRoom) endTheRide(observer); else if(locR!=null) { final int nextDirection=CMLib.tracking().trackNextDirectionFromHere(trailTo, locR, true); final Room nextR=locR.getRoomInDir(nextDirection); final Exit nextE=locR.getExitInDir(nextDirection); if((nextR != null) && (nextE != null) && (nextE.isOpen())) { if((!indoorOK)&&((nextR.domainType()&Room.INDOORS)!=0)) endTheRide(observer); else if(observer instanceof MOB) { if(!CMLib.tracking().walk((MOB)observer, nextDirection,false,false)) endTheRide(observer); } else if(observer instanceof Item) { if(!CMLib.tracking().walk((Item)observer, nextDirection)) endTheRide(observer); } } else endTheRide(observer); } else endTheRide(observer); } } return true; } @Override protected void resetDefaults() { super.resetDefaults(); indoorOK=false; greeting="Need a lift? If so, come aboard."; mountStr="Where are you headed?"; isEnRouter = null; returnToRoom = null; destRoom = null; trailTo= null; riderM = null; basePrice=10.0; perRoomPrice=1.0; } @Override public void startBehavior(PhysicalAgent behaving) { super.startBehavior(behaving); if((talkerName.length()==0) && (behaving instanceof Item)) talkerName="the driver"; } @Override public void setParms(String newParm) { super.setParms(newParm); } }
apache-2.0
mhlx/mblog
src/main/java/me/qyh/blog/file/vo/StaticFile.java
1832
/* * Copyright 2017 qyh.me * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package me.qyh.blog.file.vo; public class StaticFile { private String path; private long size; private String name; private String ext; private boolean dir; private String url; private boolean editable; public String getPath() { return path; } public void setPath(String path) { this.path = path; } public long getSize() { return size; } public void setSize(long size) { this.size = size; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getExt() { return ext; } public void setExt(String ext) { this.ext = ext; } public boolean isDir() { return dir; } public void setDir(boolean dir) { this.dir = dir; } public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public boolean isEditable() { return editable; } public void setEditable(boolean editable) { this.editable = editable; } /** * 判斷是否匹配某后缀 * * @param ext * @return */ public boolean is(String ext) { if (dir) { return false; } if (this.ext == null || this.ext.isEmpty()) { return ext == null || ext.isEmpty(); } return this.ext.equalsIgnoreCase(ext); } }
apache-2.0
zhujunxxxxx/SportsPage
andriod/SportPage/app/src/main/java/com/sportspage/event/UpdateBadgeEvent.java
338
package com.sportspage.event; /** * Created by tenma on 3/23/17. */ public class UpdateBadgeEvent { private String path; public UpdateBadgeEvent(String path) { this.path = path; } public String getPath() { return path; } public void setPath(String path) { this.path = path; } }
apache-2.0
googleapis/google-api-php-client-services
src/OSConfig/OSPolicyResourceRepositoryResourceAptRepository.php
2321
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ namespace Google\Service\OSConfig; class OSPolicyResourceRepositoryResourceAptRepository extends \Google\Collection { protected $collection_key = 'components'; /** * @var string */ public $archiveType; /** * @var string[] */ public $components; /** * @var string */ public $distribution; /** * @var string */ public $gpgKey; /** * @var string */ public $uri; /** * @param string */ public function setArchiveType($archiveType) { $this->archiveType = $archiveType; } /** * @return string */ public function getArchiveType() { return $this->archiveType; } /** * @param string[] */ public function setComponents($components) { $this->components = $components; } /** * @return string[] */ public function getComponents() { return $this->components; } /** * @param string */ public function setDistribution($distribution) { $this->distribution = $distribution; } /** * @return string */ public function getDistribution() { return $this->distribution; } /** * @param string */ public function setGpgKey($gpgKey) { $this->gpgKey = $gpgKey; } /** * @return string */ public function getGpgKey() { return $this->gpgKey; } /** * @param string */ public function setUri($uri) { $this->uri = $uri; } /** * @return string */ public function getUri() { return $this->uri; } } // Adding a class alias for backwards compatibility with the previous class name. class_alias(OSPolicyResourceRepositoryResourceAptRepository::class, 'Google_Service_OSConfig_OSPolicyResourceRepositoryResourceAptRepository');
apache-2.0
consulo/consulo-javascript
plugin/src/main/java/com/intellij/lang/javascript/flex/importer/MemberInfo.java
1495
/* * Copyright 2000-2005 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.lang.javascript.flex.importer; import javax.annotation.Nonnull; /** * @author Maxim.Mossienko * Date: Oct 20, 2008 * Time: 7:00:39 PM */ abstract class MemberInfo { Traits parentTraits; int id; int kind; Multiname name; MetaData[] metadata; boolean isOverride; boolean isPublic; boolean isFinal; abstract void dump(Abc abc, String indent, String attr, final @Nonnull FlexByteCodeInformationProcessor processor); protected void dumpMetaData(String indent, final @Nonnull FlexByteCodeInformationProcessor processor) { if(metadata != null) { for(MetaData md : metadata) { if(processor.doDumpMetaData(md)) { processor.append(indent); processor.processMetadata(md); processor.append("\n"); } } } } String getParentName() { return parentTraits != null ? parentTraits.getClassName() : null; } }
apache-2.0
pacogomez/pyvcloud
tests/vcd_catalog_setup.py
1193
import os import unittest import yaml from pyvcloud.vcd.client import BasicLoginCredentials from pyvcloud.vcd.client import Client from pyvcloud.vcd.org import Org from pyvcloud.vcd.test import TestCase class TestCatalogSetup(TestCase): def test_create_catalog(self): logged_in_org = self.client.get_org() org = Org(self.client, resource=logged_in_org) catalog = org.create_catalog(self.config['vcd']['catalog'], 'test catalog') assert self.config['vcd']['catalog'] == catalog.get('name') def test_upload_ova(self): logged_in_org = self.client.get_org() org = Org(self.client, resource=logged_in_org) template = org.upload_ovf(self.config['vcd']['catalog'], self.config['vcd']['local_template']) def test_validate_ova(self): logged_in_org = self.client.get_org() org = Org(self.client, resource=logged_in_org) template = org.get_catalog_item(self.config['vcd']['catalog'], self.config['vcd']['template']) assert self.config['vcd']['template'] == template.get('name') if __name__ == '__main__': unittest.main()
apache-2.0
erdi/grails-core
grails-plugin-gsp/src/ast/groovy/org/codehaus/groovy/grails/compiler/web/gsp/GroovyPageBytecodeOptimizer.java
2117
/* * Copyright 2011 SpringSource * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.codehaus.groovy.grails.compiler.web.gsp; import org.codehaus.groovy.ast.ClassNode; import org.codehaus.groovy.ast.MethodNode; import org.codehaus.groovy.ast.Parameter; import org.codehaus.groovy.ast.stmt.BlockStatement; import org.codehaus.groovy.classgen.GeneratorContext; import org.codehaus.groovy.control.SourceUnit; import org.codehaus.groovy.grails.compiler.injection.AstTransformer; import org.codehaus.groovy.grails.compiler.injection.GroovyPageInjector; import java.net.URL; @AstTransformer public class GroovyPageBytecodeOptimizer implements GroovyPageInjector { private final static String RUN_METHOD = "run"; public void performInjection(SourceUnit source, GeneratorContext context, ClassNode classNode) { // search run method in GSP script and get codeblock MethodNode runMethod = classNode.getMethod(RUN_METHOD, new Parameter[0]); if (runMethod != null && runMethod.getCode() instanceof BlockStatement) { BlockStatement block = (BlockStatement) runMethod.getCode(); //scan all MethodExpressionCalls to optimize them GroovyPageOptimizerVisitor groovyPageVisitor = new GroovyPageOptimizerVisitor(classNode); groovyPageVisitor.visitBlockStatement(block); } } public void performInjection(SourceUnit source, ClassNode classNode) { performInjection(source, null, classNode); } //Avoid other injection public boolean shouldInject(URL url) { return false; } }
apache-2.0
AlanJinTS/zstack
sdk/src/main/java/org/zstack/sdk/ImageInventory.java
4437
package org.zstack.sdk; public class ImageInventory { public java.lang.String uuid; public void setUuid(java.lang.String uuid) { this.uuid = uuid; } public java.lang.String getUuid() { return this.uuid; } public java.lang.String name; public void setName(java.lang.String name) { this.name = name; } public java.lang.String getName() { return this.name; } public java.lang.String description; public void setDescription(java.lang.String description) { this.description = description; } public java.lang.String getDescription() { return this.description; } public java.lang.String exportUrl; public void setExportUrl(java.lang.String exportUrl) { this.exportUrl = exportUrl; } public java.lang.String getExportUrl() { return this.exportUrl; } public java.lang.String exportMd5Sum; public void setExportMd5Sum(java.lang.String exportMd5Sum) { this.exportMd5Sum = exportMd5Sum; } public java.lang.String getExportMd5Sum() { return this.exportMd5Sum; } public java.lang.String state; public void setState(java.lang.String state) { this.state = state; } public java.lang.String getState() { return this.state; } public java.lang.String status; public void setStatus(java.lang.String status) { this.status = status; } public java.lang.String getStatus() { return this.status; } public java.lang.Long size; public void setSize(java.lang.Long size) { this.size = size; } public java.lang.Long getSize() { return this.size; } public java.lang.Long actualSize; public void setActualSize(java.lang.Long actualSize) { this.actualSize = actualSize; } public java.lang.Long getActualSize() { return this.actualSize; } public java.lang.String md5Sum; public void setMd5Sum(java.lang.String md5Sum) { this.md5Sum = md5Sum; } public java.lang.String getMd5Sum() { return this.md5Sum; } public java.lang.String url; public void setUrl(java.lang.String url) { this.url = url; } public java.lang.String getUrl() { return this.url; } public java.lang.String mediaType; public void setMediaType(java.lang.String mediaType) { this.mediaType = mediaType; } public java.lang.String getMediaType() { return this.mediaType; } public java.lang.String guestOsType; public void setGuestOsType(java.lang.String guestOsType) { this.guestOsType = guestOsType; } public java.lang.String getGuestOsType() { return this.guestOsType; } public java.lang.String type; public void setType(java.lang.String type) { this.type = type; } public java.lang.String getType() { return this.type; } public java.lang.String platform; public void setPlatform(java.lang.String platform) { this.platform = platform; } public java.lang.String getPlatform() { return this.platform; } public java.lang.String format; public void setFormat(java.lang.String format) { this.format = format; } public java.lang.String getFormat() { return this.format; } public java.lang.Boolean system; public void setSystem(java.lang.Boolean system) { this.system = system; } public java.lang.Boolean getSystem() { return this.system; } public java.sql.Timestamp createDate; public void setCreateDate(java.sql.Timestamp createDate) { this.createDate = createDate; } public java.sql.Timestamp getCreateDate() { return this.createDate; } public java.sql.Timestamp lastOpDate; public void setLastOpDate(java.sql.Timestamp lastOpDate) { this.lastOpDate = lastOpDate; } public java.sql.Timestamp getLastOpDate() { return this.lastOpDate; } public java.util.List<ImageBackupStorageRefInventory> backupStorageRefs; public void setBackupStorageRefs(java.util.List<ImageBackupStorageRefInventory> backupStorageRefs) { this.backupStorageRefs = backupStorageRefs; } public java.util.List<ImageBackupStorageRefInventory> getBackupStorageRefs() { return this.backupStorageRefs; } }
apache-2.0
Jackygq1982/hbase_src
hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
62305
/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.master; import static org.apache.hadoop.hbase.SplitLogCounters.tot_mgr_wait_for_zk_delete; import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_final_transition_failed; import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_preempt_task; import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_task_acquired; import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_task_done; import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_task_err; import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_task_resigned; import static org.junit.Assert.*; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.NavigableSet; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.NonceGenerator; import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.exceptions.OperationConflictException; import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException; import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; @Category(LargeTests.class) public class TestDistributedLogSplitting { private static final Log LOG = LogFactory.getLog(TestSplitLogManager.class); static { // Uncomment the following line if more verbosity is needed for // debugging (see HBASE-12285 for details). //Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.DEBUG); // test ThreeRSAbort fails under hadoop2 (2.0.2-alpha) if shortcircuit-read (scr) is on. this // turns it off for this test. TODO: Figure out why scr breaks recovery. System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); } // Start a cluster with 2 masters and 6 regionservers static final int NUM_MASTERS = 2; static final int NUM_RS = 6; MiniHBaseCluster cluster; HMaster master; Configuration conf; static Configuration originalConf; static HBaseTestingUtility TEST_UTIL; static MiniDFSCluster dfsCluster; static MiniZooKeeperCluster zkCluster; @BeforeClass public static void setup() throws Exception { TEST_UTIL = new HBaseTestingUtility(HBaseConfiguration.create()); dfsCluster = TEST_UTIL.startMiniDFSCluster(1); zkCluster = TEST_UTIL.startMiniZKCluster(); originalConf = TEST_UTIL.getConfiguration(); } @AfterClass public static void tearDown() throws IOException { TEST_UTIL.shutdownMiniZKCluster(); TEST_UTIL.shutdownMiniDFSCluster(); } private void startCluster(int num_rs) throws Exception { SplitLogCounters.resetCounters(); LOG.info("Starting cluster"); conf.getLong("hbase.splitlog.max.resubmit", 0); // Make the failure test faster conf.setInt("zookeeper.recovery.retry", 0); conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0); // no load balancing conf.setInt("hbase.regionserver.wal.max.splitters", 3); conf.setInt("hfile.format.version", 3); TEST_UTIL = new HBaseTestingUtility(conf); TEST_UTIL.setDFSCluster(dfsCluster); TEST_UTIL.setZkCluster(zkCluster); TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, num_rs); cluster = TEST_UTIL.getHBaseCluster(); LOG.info("Waiting for active/ready master"); cluster.waitForActiveAndReadyMaster(); master = cluster.getMaster(); while (cluster.getLiveRegionServerThreads().size() < num_rs) { Threads.sleep(1); } } @Before public void before() throws Exception { // refresh configuration conf = HBaseConfiguration.create(originalConf); } @After public void after() throws Exception { try { if (TEST_UTIL.getHBaseCluster() != null) { for (MasterThread mt : TEST_UTIL.getHBaseCluster().getLiveMasterThreads()) { mt.getMaster().abort("closing...", null); } } TEST_UTIL.shutdownMiniHBaseCluster(); } finally { TEST_UTIL.getTestFileSystem().delete(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), true); ZKUtil.deleteNodeRecursively(TEST_UTIL.getZooKeeperWatcher(), "/hbase"); } } @Test (timeout=300000) public void testRecoveredEdits() throws Exception { LOG.info("testRecoveredEdits"); conf.setLong("hbase.regionserver.hlog.blocksize", 30 * 1024); // create more than one wal conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); startCluster(NUM_RS); final int NUM_LOG_LINES = 1000; final SplitLogManager slm = master.getMasterFileSystem().splitLogManager; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); FileSystem fs = master.getMasterFileSystem().getFileSystem(); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); Path rootdir = FSUtils.getRootDir(conf); installTable(new ZooKeeperWatcher(conf, "table-creation", null), "table", "family", 40); TableName table = TableName.valueOf("table"); List<HRegionInfo> regions = null; HRegionServer hrs = null; for (int i = 0; i < NUM_RS; i++) { boolean foundRs = false; hrs = rsts.get(i).getRegionServer(); regions = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo region : regions) { if (region.getTable().getNameAsString().equalsIgnoreCase("table")) { foundRs = true; break; } } if (foundRs) break; } final Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(hrs .getServerName().toString())); LOG.info("#regions = " + regions.size()); Iterator<HRegionInfo> it = regions.iterator(); while (it.hasNext()) { HRegionInfo region = it.next(); if (region.getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { it.remove(); } } makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); slm.splitLogDistributed(logDir); int count = 0; for (HRegionInfo hri : regions) { Path tdir = FSUtils.getTableDir(rootdir, table); @SuppressWarnings("deprecation") Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName())); LOG.debug("checking edits dir " + editsdir); FileStatus[] files = fs.listStatus(editsdir); assertTrue(files.length > 1); for (int i = 0; i < files.length; i++) { int c = countHLog(files[i].getPath(), fs, conf); count += c; } LOG.info(count + " edits in " + files.length + " recovered edits files."); } // check that the log file is moved assertFalse(fs.exists(logDir)); assertEquals(NUM_LOG_LINES, count); } @Test(timeout = 300000) public void testLogReplayWithNonMetaRSDown() throws Exception { LOG.info("testLogReplayWithNonMetaRSDown"); conf.setLong("hbase.regionserver.hlog.blocksize", 30 * 1024); // create more than one wal conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_LOG_LINES = 1000; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); HRegionServer hrs = findRSToKill(false, "table"); List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs); makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); // wait for abort completes this.abortRSAndVerifyRecovery(hrs, ht, zkw, NUM_REGIONS_TO_CREATE, NUM_LOG_LINES); ht.close(); zkw.close(); } private static class NonceGeneratorWithDups extends PerClientRandomNonceGenerator { private boolean isDups = false; private LinkedList<Long> nonces = new LinkedList<Long>(); public void startDups() { isDups = true; } @Override public long newNonce() { long nonce = isDups ? nonces.removeFirst() : super.newNonce(); if (!isDups) { nonces.add(nonce); } return nonce; } } @Test(timeout = 300000) public void testNonceRecovery() throws Exception { LOG.info("testNonceRecovery"); final String TABLE_NAME = "table"; final String FAMILY_NAME = "family"; final int NUM_REGIONS_TO_CREATE = 40; conf.setLong("hbase.regionserver.hlog.blocksize", 100*1024); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); master.balanceSwitch(false); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, TABLE_NAME, FAMILY_NAME, NUM_REGIONS_TO_CREATE); NonceGeneratorWithDups ng = new NonceGeneratorWithDups(); NonceGenerator oldNg = HConnectionManager.injectNonceGeneratorForTesting(ht.getConnection(), ng); try { List<Increment> reqs = new ArrayList<Increment>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo hri : hris) { if (TABLE_NAME.equalsIgnoreCase(hri.getTable().getNameAsString())) { byte[] key = hri.getStartKey(); if (key == null || key.length == 0) { key = Bytes.copy(hri.getEndKey()); --(key[key.length - 1]); } Increment incr = new Increment(key); incr.addColumn(Bytes.toBytes(FAMILY_NAME), Bytes.toBytes("q"), 1); ht.increment(incr); reqs.add(incr); } } } HRegionServer hrs = findRSToKill(false, "table"); abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE); ng.startDups(); for (Increment incr : reqs) { try { ht.increment(incr); fail("should have thrown"); } catch (OperationConflictException ope) { LOG.debug("Caught as expected: " + ope.getMessage()); } } } finally { HConnectionManager.injectNonceGeneratorForTesting(ht.getConnection(), oldNg); ht.close(); zkw.close(); } } @Test(timeout = 300000) public void testLogReplayWithMetaRSDown() throws Exception { LOG.info("testRecoveredEditsReplayWithMetaRSDown"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_LOG_LINES = 1000; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); HRegionServer hrs = findRSToKill(true, "table"); List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs); makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); this.abortRSAndVerifyRecovery(hrs, ht, zkw, NUM_REGIONS_TO_CREATE, NUM_LOG_LINES); ht.close(); zkw.close(); } private void abortRSAndVerifyRecovery(HRegionServer hrs, HTable ht, final ZooKeeperWatcher zkw, final int numRegions, final int numofLines) throws Exception { abortRSAndWaitForRecovery(hrs, zkw, numRegions); assertEquals(numofLines, TEST_UTIL.countRows(ht)); } private void abortRSAndWaitForRecovery(HRegionServer hrs, final ZooKeeperWatcher zkw, final int numRegions) throws Exception { final MiniHBaseCluster tmpCluster = this.cluster; // abort RS LOG.info("Aborting region server: " + hrs.getServerName()); hrs.abort("testing"); // wait for abort completes TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (tmpCluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1)); } }); // wait for regions come online TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (getAllOnlineRegions(tmpCluster).size() >= (numRegions + 1)); } }); // wait for all regions are fully recovered TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( zkw.recoveringRegionsZNode, false); return (recoveringRegions != null && recoveringRegions.size() == 0); } }); } @Test(timeout = 300000) public void testMasterStartsUpWithLogSplittingWork() throws Exception { LOG.info("testMasterStartsUpWithLogSplittingWork"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, NUM_RS - 1); startCluster(NUM_RS); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_LOG_LINES = 1000; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); HRegionServer hrs = findRSToKill(false, "table"); List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs); makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); // abort master abortMaster(cluster); // abort RS LOG.info("Aborting region server: " + hrs.getServerName()); hrs.abort("testing"); // wait for abort completes TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (cluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1)); } }); Thread.sleep(2000); LOG.info("Current Open Regions:" + getAllOnlineRegions(cluster).size()); startMasterAndWaitUntilLogSplit(cluster); // wait for abort completes TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (getAllOnlineRegions(cluster).size() >= (NUM_REGIONS_TO_CREATE + 1)); } }); LOG.info("Current Open Regions After Master Node Starts Up:" + getAllOnlineRegions(cluster).size()); assertEquals(NUM_LOG_LINES, TEST_UTIL.countRows(ht)); ht.close(); zkw.close(); } @Test(timeout = 300000) public void testMasterStartsUpWithLogReplayWork() throws Exception { LOG.info("testMasterStartsUpWithLogReplayWork"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, NUM_RS - 1); startCluster(NUM_RS); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_LOG_LINES = 1000; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); HRegionServer hrs = findRSToKill(false, "table"); List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs); makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); // abort master abortMaster(cluster); // abort RS LOG.info("Aborting region server: " + hrs.getServerName()); hrs.abort("testing"); // wait for the RS dies TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (cluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1)); } }); Thread.sleep(2000); LOG.info("Current Open Regions:" + getAllOnlineRegions(cluster).size()); startMasterAndWaitUntilLogSplit(cluster); // wait for all regions are fully recovered TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( zkw.recoveringRegionsZNode, false); return (recoveringRegions != null && recoveringRegions.size() == 0); } }); LOG.info("Current Open Regions After Master Node Starts Up:" + getAllOnlineRegions(cluster).size()); assertEquals(NUM_LOG_LINES, TEST_UTIL.countRows(ht)); ht.close(); zkw.close(); } @Test(timeout = 300000) public void testLogReplayTwoSequentialRSDown() throws Exception { LOG.info("testRecoveredEditsReplayTwoSequentialRSDown"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_LOG_LINES = 1000; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); List<HRegionInfo> regions = null; HRegionServer hrs1 = findRSToKill(false, "table"); regions = ProtobufUtil.getOnlineRegions(hrs1); makeHLog(hrs1.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); // abort RS1 LOG.info("Aborting region server: " + hrs1.getServerName()); hrs1.abort("testing"); // wait for abort completes TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (cluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1)); } }); // wait for regions come online TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (getAllOnlineRegions(cluster).size() >= (NUM_REGIONS_TO_CREATE + 1)); } }); // sleep a little bit in order to interrupt recovering in the middle Thread.sleep(300); // abort second region server rsts = cluster.getLiveRegionServerThreads(); HRegionServer hrs2 = rsts.get(0).getRegionServer(); LOG.info("Aborting one more region server: " + hrs2.getServerName()); hrs2.abort("testing"); // wait for abort completes TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (cluster.getLiveRegionServerThreads().size() <= (NUM_RS - 2)); } }); // wait for regions come online TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (getAllOnlineRegions(cluster).size() >= (NUM_REGIONS_TO_CREATE + 1)); } }); // wait for all regions are fully recovered TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( zkw.recoveringRegionsZNode, false); return (recoveringRegions != null && recoveringRegions.size() == 0); } }); assertEquals(NUM_LOG_LINES, TEST_UTIL.countRows(ht)); ht.close(); zkw.close(); } @Test(timeout = 300000) public void testMarkRegionsRecoveringInZK() throws Exception { LOG.info("testMarkRegionsRecoveringInZK"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); master.balanceSwitch(false); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = master.getZooKeeperWatcher(); HTable ht = installTable(zkw, "table", "family", 40); final SplitLogManager slm = master.getMasterFileSystem().splitLogManager; Set<HRegionInfo> regionSet = new HashSet<HRegionInfo>(); HRegionInfo region = null; HRegionServer hrs = null; ServerName firstFailedServer = null; ServerName secondFailedServer = null; for (int i = 0; i < NUM_RS; i++) { hrs = rsts.get(i).getRegionServer(); List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs); if (regions.isEmpty()) continue; region = regions.get(0); regionSet.add(region); firstFailedServer = hrs.getServerName(); secondFailedServer = rsts.get((i + 1) % NUM_RS).getRegionServer().getServerName(); break; } slm.markRegionsRecoveringInZK(firstFailedServer, regionSet); slm.markRegionsRecoveringInZK(secondFailedServer, regionSet); List<String> recoveringRegions = ZKUtil.listChildrenNoWatch(zkw, ZKUtil.joinZNode(zkw.recoveringRegionsZNode, region.getEncodedName())); assertEquals(recoveringRegions.size(), 2); // wait for splitLogWorker to mark them up because there is no WAL files recorded in ZK final HRegionServer tmphrs = hrs; TEST_UTIL.waitFor(60000, 1000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (tmphrs.getRecoveringRegions().size() == 0); } }); ht.close(); zkw.close(); } @Test(timeout = 300000) public void testReplayCmd() throws Exception { LOG.info("testReplayCmd"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); final int NUM_REGIONS_TO_CREATE = 40; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); List<HRegionInfo> regions = null; HRegionServer hrs = null; for (int i = 0; i < NUM_RS; i++) { boolean isCarryingMeta = false; hrs = rsts.get(i).getRegionServer(); regions = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo region : regions) { if (region.isMetaRegion()) { isCarryingMeta = true; break; } } if (isCarryingMeta) { continue; } if (regions.size() > 0) break; } this.prepareData(ht, Bytes.toBytes("family"), Bytes.toBytes("c1")); String originalCheckSum = TEST_UTIL.checksumRows(ht); // abort RA and trigger replay abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE); assertEquals("Data should remain after reopening of regions", originalCheckSum, TEST_UTIL.checksumRows(ht)); ht.close(); zkw.close(); } @Test(timeout = 300000) public void testLogReplayForDisablingTable() throws Exception { LOG.info("testLogReplayForDisablingTable"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_LOG_LINES = 1000; List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable disablingHT = installTable(zkw, "disableTable", "family", NUM_REGIONS_TO_CREATE); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE, NUM_REGIONS_TO_CREATE); // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); List<HRegionInfo> regions = null; HRegionServer hrs = null; boolean hasRegionsForBothTables = false; String tableName = null; for (int i = 0; i < NUM_RS; i++) { tableName = null; hasRegionsForBothTables = false; boolean isCarryingSystem = false; hrs = rsts.get(i).getRegionServer(); regions = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo region : regions) { if (region.getTable().isSystemTable()) { isCarryingSystem = true; break; } if (tableName != null && !tableName.equalsIgnoreCase(region.getTable().getNameAsString())) { // make sure that we find a RS has online regions for both "table" and "disableTable" hasRegionsForBothTables = true; break; } else if (tableName == null) { tableName = region.getTable().getNameAsString(); } } if (isCarryingSystem) { continue; } if (hasRegionsForBothTables) { break; } } // make sure we found a good RS Assert.assertTrue(hasRegionsForBothTables); LOG.info("#regions = " + regions.size()); Iterator<HRegionInfo> it = regions.iterator(); while (it.hasNext()) { HRegionInfo region = it.next(); if (region.isMetaTable()) { it.remove(); } } makeHLog(hrs.getWAL(), regions, "disableTable", "family", NUM_LOG_LINES, 100, false); makeHLog(hrs.getWAL(), regions, "table", "family", NUM_LOG_LINES, 100); LOG.info("Disabling table\n"); TEST_UTIL.getHBaseAdmin().disableTable(Bytes.toBytes("disableTable")); // abort RS LOG.info("Aborting region server: " + hrs.getServerName()); hrs.abort("testing"); // wait for abort completes TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (cluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1)); } }); // wait for regions come online TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (getAllOnlineRegions(cluster).size() >= (NUM_REGIONS_TO_CREATE + 1)); } }); // wait for all regions are fully recovered TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( zkw.recoveringRegionsZNode, false); ServerManager serverManager = master.getServerManager(); return (!serverManager.areDeadServersInProgress() && recoveringRegions != null && recoveringRegions.size() == 0); } }); int count = 0; FileSystem fs = master.getMasterFileSystem().getFileSystem(); Path rootdir = FSUtils.getRootDir(conf); Path tdir = FSUtils.getTableDir(rootdir, TableName.valueOf("disableTable")); for (HRegionInfo hri : regions) { @SuppressWarnings("deprecation") Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName())); LOG.debug("checking edits dir " + editsdir); if(!fs.exists(editsdir)) continue; FileStatus[] files = fs.listStatus(editsdir); if(files != null) { for(FileStatus file : files) { int c = countHLog(file.getPath(), fs, conf); count += c; LOG.info(c + " edits in " + file.getPath()); } } } LOG.info("Verify edits in recovered.edits files"); assertEquals(NUM_LOG_LINES, count); LOG.info("Verify replayed edits"); assertEquals(NUM_LOG_LINES, TEST_UTIL.countRows(ht)); // clean up for (HRegionInfo hri : regions) { @SuppressWarnings("deprecation") Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName())); fs.delete(editsdir, true); } disablingHT.close(); ht.close(); zkw.close(); } @Test(timeout = 300000) public void testDisallowWritesInRecovering() throws Exception { LOG.info("testDisallowWritesInRecovering"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3); conf.setBoolean(HConstants.DISALLOW_WRITES_IN_RECOVERING, true); startCluster(NUM_RS); final int NUM_REGIONS_TO_CREATE = 40; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); final SplitLogManager slm = master.getMasterFileSystem().splitLogManager; Set<HRegionInfo> regionSet = new HashSet<HRegionInfo>(); HRegionInfo region = null; HRegionServer hrs = null; HRegionServer dstRS = null; for (int i = 0; i < NUM_RS; i++) { hrs = rsts.get(i).getRegionServer(); List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs); if (regions.isEmpty()) continue; region = regions.get(0); regionSet.add(region); dstRS = rsts.get((i+1) % NUM_RS).getRegionServer(); break; } slm.markRegionsRecoveringInZK(hrs.getServerName(), regionSet); // move region in order for the region opened in recovering state final HRegionInfo hri = region; final HRegionServer tmpRS = dstRS; TEST_UTIL.getHBaseAdmin().move(region.getEncodedNameAsBytes(), Bytes.toBytes(dstRS.getServerName().getServerName())); // wait for region move completes final RegionStates regionStates = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); TEST_UTIL.waitFor(45000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { ServerName sn = regionStates.getRegionServerOfRegion(hri); return (sn != null && sn.equals(tmpRS.getServerName())); } }); try { byte[] key = region.getStartKey(); if (key == null || key.length == 0) { key = new byte[] { 0, 0, 0, 0, 1 }; } ht.setAutoFlush(true, true); Put put = new Put(key); put.add(Bytes.toBytes("family"), Bytes.toBytes("c1"), new byte[]{'b'}); ht.put(put); ht.close(); } catch (IOException ioe) { Assert.assertTrue(ioe instanceof RetriesExhaustedWithDetailsException); RetriesExhaustedWithDetailsException re = (RetriesExhaustedWithDetailsException) ioe; boolean foundRegionInRecoveryException = false; for (Throwable t : re.getCauses()) { if (t instanceof RegionInRecoveryException) { foundRegionInRecoveryException = true; break; } } Assert.assertTrue( "No RegionInRecoveryException. Following exceptions returned=" + re.getCauses(), foundRegionInRecoveryException); } zkw.close(); } /** * The original intention of this test was to force an abort of a region * server and to make sure that the failure path in the region servers is * properly evaluated. But it is difficult to ensure that the region server * doesn't finish the log splitting before it aborts. Also now, there is * this code path where the master will preempt the region server when master * detects that the region server has aborted. * @throws Exception */ @Test (timeout=300000) public void testWorkerAbort() throws Exception { LOG.info("testWorkerAbort"); startCluster(3); final int NUM_LOG_LINES = 10000; final SplitLogManager slm = master.getMasterFileSystem().splitLogManager; FileSystem fs = master.getMasterFileSystem().getFileSystem(); final List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); HRegionServer hrs = findRSToKill(false, "table"); Path rootdir = FSUtils.getRootDir(conf); final Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(hrs.getServerName().toString())); installTable(new ZooKeeperWatcher(conf, "table-creation", null), "table", "family", 40); makeHLog(hrs.getWAL(), ProtobufUtil.getOnlineRegions(hrs), "table", "family", NUM_LOG_LINES, 100); new Thread() { public void run() { waitForCounter(tot_wkr_task_acquired, 0, 1, 1000); for (RegionServerThread rst : rsts) { rst.getRegionServer().abort("testing"); break; } } }.start(); // slm.splitLogDistributed(logDir); FileStatus[] logfiles = fs.listStatus(logDir); TaskBatch batch = new TaskBatch(); slm.enqueueSplitTask(logfiles[0].getPath().toString(), batch); //waitForCounter but for one of the 2 counters long curt = System.currentTimeMillis(); long waitTime = 80000; long endt = curt + waitTime; while (curt < endt) { if ((tot_wkr_task_resigned.get() + tot_wkr_task_err.get() + tot_wkr_final_transition_failed.get() + tot_wkr_task_done.get() + tot_wkr_preempt_task.get()) == 0) { Thread.yield(); curt = System.currentTimeMillis(); } else { assertTrue(1 <= (tot_wkr_task_resigned.get() + tot_wkr_task_err.get() + tot_wkr_final_transition_failed.get() + tot_wkr_task_done.get() + tot_wkr_preempt_task.get())); return; } } fail("none of the following counters went up in " + waitTime + " milliseconds - " + "tot_wkr_task_resigned, tot_wkr_task_err, " + "tot_wkr_final_transition_failed, tot_wkr_task_done, " + "tot_wkr_preempt_task"); } @Test (timeout=300000) public void testThreeRSAbort() throws Exception { LOG.info("testThreeRSAbort"); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_ROWS_PER_REGION = 100; startCluster(NUM_RS); // NUM_RS=6. final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "distributed log splitting test", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); populateDataInTable(NUM_ROWS_PER_REGION, "family"); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); assertEquals(NUM_RS, rsts.size()); rsts.get(0).getRegionServer().abort("testing"); rsts.get(1).getRegionServer().abort("testing"); rsts.get(2).getRegionServer().abort("testing"); long start = EnvironmentEdgeManager.currentTimeMillis(); while (cluster.getLiveRegionServerThreads().size() > (NUM_RS - 3)) { if (EnvironmentEdgeManager.currentTimeMillis() - start > 60000) { assertTrue(false); } Thread.sleep(200); } start = EnvironmentEdgeManager.currentTimeMillis(); while (getAllOnlineRegions(cluster).size() < (NUM_REGIONS_TO_CREATE + 1)) { if (EnvironmentEdgeManager.currentTimeMillis() - start > 60000) { assertTrue("Timedout", false); } Thread.sleep(200); } // wait for all regions are fully recovered TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( zkw.recoveringRegionsZNode, false); return (recoveringRegions != null && recoveringRegions.size() == 0); } }); assertEquals(NUM_REGIONS_TO_CREATE * NUM_ROWS_PER_REGION, TEST_UTIL.countRows(ht)); ht.close(); zkw.close(); } @Test(timeout=30000) public void testDelayedDeleteOnFailure() throws Exception { LOG.info("testDelayedDeleteOnFailure"); startCluster(1); final SplitLogManager slm = master.getMasterFileSystem().splitLogManager; final FileSystem fs = master.getMasterFileSystem().getFileSystem(); final Path logDir = new Path(FSUtils.getRootDir(conf), "x"); fs.mkdirs(logDir); ExecutorService executor = null; try { final Path corruptedLogFile = new Path(logDir, "x"); FSDataOutputStream out; out = fs.create(corruptedLogFile); out.write(0); out.write(Bytes.toBytes("corrupted bytes")); out.close(); slm.ignoreZKDeleteForTesting = true; executor = Executors.newSingleThreadExecutor(); Runnable runnable = new Runnable() { @Override public void run() { try { // since the logDir is a fake, corrupted one, so the split log worker // will finish it quickly with error, and this call will fail and throw // an IOException. slm.splitLogDistributed(logDir); } catch (IOException ioe) { try { assertTrue(fs.exists(corruptedLogFile)); // this call will block waiting for the task to be removed from the // tasks map which is not going to happen since ignoreZKDeleteForTesting // is set to true, until it is interrupted. slm.splitLogDistributed(logDir); } catch (IOException e) { assertTrue(Thread.currentThread().isInterrupted()); return; } fail("did not get the expected IOException from the 2nd call"); } fail("did not get the expected IOException from the 1st call"); } }; Future<?> result = executor.submit(runnable); try { result.get(2000, TimeUnit.MILLISECONDS); } catch (TimeoutException te) { // it is ok, expected. } waitForCounter(tot_mgr_wait_for_zk_delete, 0, 1, 10000); executor.shutdownNow(); executor = null; // make sure the runnable is finished with no exception thrown. result.get(); } finally { if (executor != null) { // interrupt the thread in case the test fails in the middle. // it has no effect if the thread is already terminated. executor.shutdownNow(); } fs.delete(logDir, true); } } @Test(timeout = 300000) public void testMetaRecoveryInZK() throws Exception { LOG.info("testMetaRecoveryInZK"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); // only testing meta recovery in ZK operation HRegionServer hrs = findRSToKill(true, null); List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs); LOG.info("#regions = " + regions.size()); Set<HRegionInfo> tmpRegions = new HashSet<HRegionInfo>(); tmpRegions.add(HRegionInfo.FIRST_META_REGIONINFO); master.getMasterFileSystem().prepareLogReplay(hrs.getServerName(), tmpRegions); Set<HRegionInfo> userRegionSet = new HashSet<HRegionInfo>(); userRegionSet.addAll(regions); master.getMasterFileSystem().prepareLogReplay(hrs.getServerName(), userRegionSet); boolean isMetaRegionInRecovery = false; List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(zkw.recoveringRegionsZNode, false); for (String curEncodedRegionName : recoveringRegions) { if (curEncodedRegionName.equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) { isMetaRegionInRecovery = true; break; } } assertTrue(isMetaRegionInRecovery); master.getMasterFileSystem().splitMetaLog(hrs.getServerName()); isMetaRegionInRecovery = false; recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(zkw.recoveringRegionsZNode, false); for (String curEncodedRegionName : recoveringRegions) { if (curEncodedRegionName.equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) { isMetaRegionInRecovery = true; break; } } // meta region should be recovered assertFalse(isMetaRegionInRecovery); zkw.close(); } @Test(timeout = 300000) public void testSameVersionUpdatesRecovery() throws Exception { LOG.info("testSameVersionUpdatesRecovery"); conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); startCluster(NUM_RS); final AtomicLong sequenceId = new AtomicLong(100); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_LOG_LINES = 1000; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); List<HRegionInfo> regions = null; HRegionServer hrs = null; for (int i = 0; i < NUM_RS; i++) { boolean isCarryingMeta = false; hrs = rsts.get(i).getRegionServer(); regions = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo region : regions) { if (region.isMetaRegion()) { isCarryingMeta = true; break; } } if (isCarryingMeta) { continue; } break; } LOG.info("#regions = " + regions.size()); Iterator<HRegionInfo> it = regions.iterator(); while (it.hasNext()) { HRegionInfo region = it.next(); if (region.isMetaTable() || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) { it.remove(); } } if (regions.size() == 0) return; HRegionInfo curRegionInfo = regions.get(0); byte[] startRow = curRegionInfo.getStartKey(); if (startRow == null || startRow.length == 0) { startRow = new byte[] { 0, 0, 0, 0, 1 }; } byte[] row = Bytes.incrementBytes(startRow, 1); // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key row = Arrays.copyOfRange(row, 3, 8); long value = 0; byte[] tableName = Bytes.toBytes("table"); byte[] family = Bytes.toBytes("family"); byte[] qualifier = Bytes.toBytes("c1"); long timeStamp = System.currentTimeMillis(); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor(family)); for (int i = 0; i < NUM_LOG_LINES; i += 1) { WALEdit e = new WALEdit(); value++; e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value))); hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e, System.currentTimeMillis(), htd, sequenceId); } hrs.getWAL().sync(); hrs.getWAL().close(); // wait for abort completes this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE); // verify we got the last value LOG.info("Verification Starts..."); Get g = new Get(row); Result r = ht.get(g); long theStoredVal = Bytes.toLong(r.getValue(family, qualifier)); assertEquals(value, theStoredVal); // after flush LOG.info("Verification after flush..."); TEST_UTIL.getHBaseAdmin().flush(tableName); r = ht.get(g); theStoredVal = Bytes.toLong(r.getValue(family, qualifier)); assertEquals(value, theStoredVal); ht.close(); } @Test(timeout = 300000) public void testSameVersionUpdatesRecoveryWithCompaction() throws Exception { LOG.info("testSameVersionUpdatesRecoveryWithWrites"); conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 30 * 1024); conf.setInt("hbase.hstore.compactionThreshold", 3); startCluster(NUM_RS); final AtomicLong sequenceId = new AtomicLong(100); final int NUM_REGIONS_TO_CREATE = 40; final int NUM_LOG_LINES = 1000; // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); List<HRegionInfo> regions = null; HRegionServer hrs = null; for (int i = 0; i < NUM_RS; i++) { boolean isCarryingMeta = false; hrs = rsts.get(i).getRegionServer(); regions = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo region : regions) { if (region.isMetaRegion()) { isCarryingMeta = true; break; } } if (isCarryingMeta) { continue; } break; } LOG.info("#regions = " + regions.size()); Iterator<HRegionInfo> it = regions.iterator(); while (it.hasNext()) { HRegionInfo region = it.next(); if (region.isMetaTable() || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) { it.remove(); } } if (regions.size() == 0) return; HRegionInfo curRegionInfo = regions.get(0); byte[] startRow = curRegionInfo.getStartKey(); if (startRow == null || startRow.length == 0) { startRow = new byte[] { 0, 0, 0, 0, 1 }; } byte[] row = Bytes.incrementBytes(startRow, 1); // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key row = Arrays.copyOfRange(row, 3, 8); long value = 0; final byte[] tableName = Bytes.toBytes("table"); byte[] family = Bytes.toBytes("family"); byte[] qualifier = Bytes.toBytes("c1"); long timeStamp = System.currentTimeMillis(); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor(family)); for (int i = 0; i < NUM_LOG_LINES; i += 1) { WALEdit e = new WALEdit(); value++; e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value))); hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e, System.currentTimeMillis(), htd, sequenceId); } hrs.getWAL().sync(); hrs.getWAL().close(); // wait for abort completes this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE); // verify we got the last value LOG.info("Verification Starts..."); Get g = new Get(row); Result r = ht.get(g); long theStoredVal = Bytes.toLong(r.getValue(family, qualifier)); assertEquals(value, theStoredVal); // after flush & compaction LOG.info("Verification after flush..."); TEST_UTIL.getHBaseAdmin().flush(tableName); TEST_UTIL.getHBaseAdmin().compact(tableName); // wait for compaction completes TEST_UTIL.waitFor(30000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (TEST_UTIL.getHBaseAdmin().getCompactionState(tableName) == CompactionState.NONE); } }); r = ht.get(g); theStoredVal = Bytes.toLong(r.getValue(family, qualifier)); assertEquals(value, theStoredVal); ht.close(); } HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs) throws Exception { return installTable(zkw, tname, fname, nrs, 0); } HTable installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs, int existingRegions) throws Exception { // Create a table with regions byte [] table = Bytes.toBytes(tname); byte [] family = Bytes.toBytes(fname); LOG.info("Creating table with " + nrs + " regions"); HTable ht = TEST_UTIL.createTable(table, family); int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family, nrs); assertEquals(nrs, numRegions); LOG.info("Waiting for no more RIT\n"); blockUntilNoRIT(zkw, master); // disable-enable cycle to get rid of table's dead regions left behind // by createMultiRegions LOG.debug("Disabling table\n"); TEST_UTIL.getHBaseAdmin().disableTable(table); LOG.debug("Waiting for no more RIT\n"); blockUntilNoRIT(zkw, master); NavigableSet<String> regions = getAllOnlineRegions(cluster); LOG.debug("Verifying only catalog and namespace regions are assigned\n"); if (regions.size() != 2) { for (String oregion : regions) LOG.debug("Region still online: " + oregion); } assertEquals(2 + existingRegions, regions.size()); LOG.debug("Enabling table\n"); TEST_UTIL.getHBaseAdmin().enableTable(table); LOG.debug("Waiting for no more RIT\n"); blockUntilNoRIT(zkw, master); LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n"); regions = getAllOnlineRegions(cluster); assertEquals(numRegions + 2 + existingRegions, regions.size()); return ht; } void populateDataInTable(int nrows, String fname) throws Exception { byte [] family = Bytes.toBytes(fname); List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); assertEquals(NUM_RS, rsts.size()); for (RegionServerThread rst : rsts) { HRegionServer hrs = rst.getRegionServer(); List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo hri : hris) { if (hri.getTable().isSystemTable()) { continue; } LOG.debug("adding data to rs = " + rst.getName() + " region = "+ hri.getRegionNameAsString()); HRegion region = hrs.getOnlineRegion(hri.getRegionName()); assertTrue(region != null); putData(region, hri.getStartKey(), nrows, Bytes.toBytes("q"), family); } } } public void makeHLog(HLog log, List<HRegionInfo> regions, String tname, String fname, int num_edits, int edit_size) throws IOException { makeHLog(log, regions, tname, fname, num_edits, edit_size, true); } public void makeHLog(HLog log, List<HRegionInfo> regions, String tname, String fname, int num_edits, int edit_size, boolean closeLog) throws IOException { TableName fullTName = TableName.valueOf(tname); // remove root and meta region regions.remove(HRegionInfo.FIRST_META_REGIONINFO); // using one sequenceId for edits across all regions is ok. final AtomicLong sequenceId = new AtomicLong(10); for(Iterator<HRegionInfo> iter = regions.iterator(); iter.hasNext(); ) { HRegionInfo regionInfo = iter.next(); if(regionInfo.getTable().isSystemTable()) { iter.remove(); } } HTableDescriptor htd = new HTableDescriptor(fullTName); byte[] family = Bytes.toBytes(fname); htd.addFamily(new HColumnDescriptor(family)); byte[] value = new byte[edit_size]; List<HRegionInfo> hris = new ArrayList<HRegionInfo>(); for (HRegionInfo region : regions) { if (!region.getTable().getNameAsString().equalsIgnoreCase(tname)) { continue; } hris.add(region); } LOG.info("Creating wal edits across " + hris.size() + " regions."); for (int i = 0; i < edit_size; i++) { value[i] = (byte) ('a' + (i % 26)); } int n = hris.size(); int[] counts = new int[n]; if (n > 0) { for (int i = 0; i < num_edits; i += 1) { WALEdit e = new WALEdit(); HRegionInfo curRegionInfo = hris.get(i % n); byte[] startRow = curRegionInfo.getStartKey(); if (startRow == null || startRow.length == 0) { startRow = new byte[] { 0, 0, 0, 0, 1 }; } byte[] row = Bytes.incrementBytes(startRow, counts[i % n]); row = Arrays.copyOfRange(row, 3, 8); // use last 5 bytes because // HBaseTestingUtility.createMultiRegions use 5 bytes // key byte[] qualifier = Bytes.toBytes("c" + Integer.toString(i)); e.add(new KeyValue(row, family, qualifier, System.currentTimeMillis(), value)); log.append(curRegionInfo, fullTName, e, System.currentTimeMillis(), htd, sequenceId); counts[i % n] += 1; } } log.sync(); if(closeLog) { log.close(); } for (int i = 0; i < n; i++) { LOG.info("region " + hris.get(i).getRegionNameAsString() + " has " + counts[i] + " edits"); } return; } private int countHLog(Path log, FileSystem fs, Configuration conf) throws IOException { int count = 0; HLog.Reader in = HLogFactory.createReader(fs, log, conf); while (in.next() != null) { count++; } return count; } private void blockUntilNoRIT(ZooKeeperWatcher zkw, HMaster master) throws KeeperException, InterruptedException { ZKAssign.blockUntilNoRIT(zkw); master.assignmentManager.waitUntilNoRegionsInTransition(60000); } private void putData(HRegion region, byte[] startRow, int numRows, byte [] qf, byte [] ...families) throws IOException { for(int i = 0; i < numRows; i++) { Put put = new Put(Bytes.add(startRow, Bytes.toBytes(i))); for(byte [] family : families) { put.add(family, qf, null); } region.put(put); } } /** * Load table with puts and deletes with expected values so that we can verify later */ private void prepareData(final HTable t, final byte[] f, final byte[] column) throws IOException { t.setAutoFlush(false, true); byte[] k = new byte[3]; // add puts for (byte b1 = 'a'; b1 <= 'z'; b1++) { for (byte b2 = 'a'; b2 <= 'z'; b2++) { for (byte b3 = 'a'; b3 <= 'z'; b3++) { k[0] = b1; k[1] = b2; k[2] = b3; Put put = new Put(k); put.add(f, column, k); t.put(put); } } } t.flushCommits(); // add deletes for (byte b3 = 'a'; b3 <= 'z'; b3++) { k[0] = 'a'; k[1] = 'a'; k[2] = b3; Delete del = new Delete(k); t.delete(del); } t.flushCommits(); } private NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster) throws IOException { NavigableSet<String> online = new TreeSet<String>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { for (HRegionInfo region : ProtobufUtil.getOnlineRegions(rst.getRegionServer())) { online.add(region.getRegionNameAsString()); } } return online; } private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems) { long curt = System.currentTimeMillis(); long endt = curt + timems; while (curt < endt) { if (ctr.get() == oldval) { Thread.yield(); curt = System.currentTimeMillis(); } else { assertEquals(newval, ctr.get()); return; } } assertTrue(false); } private void abortMaster(MiniHBaseCluster cluster) throws InterruptedException { for (MasterThread mt : cluster.getLiveMasterThreads()) { if (mt.getMaster().isActiveMaster()) { mt.getMaster().abort("Aborting for tests", new Exception("Trace info")); mt.join(); break; } } LOG.debug("Master is aborted"); } private void startMasterAndWaitUntilLogSplit(MiniHBaseCluster cluster) throws IOException, InterruptedException { cluster.startMaster(); HMaster master = cluster.getMaster(); while (!master.isInitialized()) { Thread.sleep(100); } ServerManager serverManager = master.getServerManager(); while (serverManager.areDeadServersInProgress()) { Thread.sleep(100); } } /** * Find a RS that has regions of a table. * @param hasMetaRegion when true, the returned RS has hbase:meta region as well * @param tableName * @return * @throws Exception */ private HRegionServer findRSToKill(boolean hasMetaRegion, String tableName) throws Exception { List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads(); int numOfRSs = rsts.size(); List<HRegionInfo> regions = null; HRegionServer hrs = null; for (int i = 0; i < numOfRSs; i++) { boolean isCarryingMeta = false; boolean foundTableRegion = false; hrs = rsts.get(i).getRegionServer(); regions = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo region : regions) { if (region.isMetaRegion()) { isCarryingMeta = true; } if (tableName == null || region.getTable().getNameAsString().equals(tableName)) { foundTableRegion = true; } if (foundTableRegion && (isCarryingMeta || !hasMetaRegion)) { break; } } if (isCarryingMeta && hasMetaRegion) { // clients ask for a RS with META if (!foundTableRegion) { final HRegionServer destRS = hrs; // the RS doesn't have regions of the specified table so we need move one to this RS List<HRegionInfo> tableRegions = TEST_UTIL.getHBaseAdmin().getTableRegions(Bytes.toBytes(tableName)); final HRegionInfo hri = tableRegions.get(0); TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(), Bytes.toBytes(destRS.getServerName().getServerName())); // wait for region move completes final RegionStates regionStates = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); TEST_UTIL.waitFor(45000, 200, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { ServerName sn = regionStates.getRegionServerOfRegion(hri); return (sn != null && sn.equals(destRS.getServerName())); } }); } return hrs; } else if (hasMetaRegion || isCarryingMeta) { continue; } if (foundTableRegion) break; } return hrs; } }
apache-2.0
jarib/mongo-ruby-driver
lib/bson/types/dbref.rb
1122
# encoding: UTF-8 # -- # Copyright (C) 2008-2010 10gen Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ++ module BSON # A reference to another object in a MongoDB database. class DBRef attr_reader :namespace, :object_id # Create a DBRef. Use this class in conjunction with DB#dereference. # # @param [String] a collection name # @param [ObjectID] an object id # # @core dbrefs constructor_details def initialize(namespace, object_id) @namespace = namespace @object_id = object_id end def to_s "ns: #{namespace}, id: #{object_id}" end end end
apache-2.0
lsmaira/gradle
subprojects/core/src/main/java/org/gradle/initialization/layout/ProjectCacheDir.java
1999
/* * Copyright 2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gradle.initialization.layout; import org.gradle.cache.internal.DefaultCleanupProgressMonitor; import org.gradle.cache.internal.VersionSpecificCacheCleanupAction; import org.gradle.internal.concurrent.Stoppable; import org.gradle.internal.logging.progress.ProgressLogger; import org.gradle.internal.logging.progress.ProgressLoggerFactory; import java.io.File; public class ProjectCacheDir implements Stoppable { private static final long MAX_UNUSED_DAYS_FOR_RELEASES_AND_SNAPSHOTS = 7; private final File dir; private final ProgressLoggerFactory progressLoggerFactory; public ProjectCacheDir(File dir, ProgressLoggerFactory progressLoggerFactory) { this.dir = dir; this.progressLoggerFactory = progressLoggerFactory; } public File getDir() { return dir; } @Override public void stop() { VersionSpecificCacheCleanupAction cleanupAction = new VersionSpecificCacheCleanupAction(dir, MAX_UNUSED_DAYS_FOR_RELEASES_AND_SNAPSHOTS); String description = cleanupAction.getDisplayName(); ProgressLogger progressLogger = progressLoggerFactory.newOperation(ProjectCacheDir.class).start(description, description); try { cleanupAction.execute(new DefaultCleanupProgressMonitor(progressLogger)); } finally { progressLogger.completed(); } } }
apache-2.0
CESNET/secant
tools/assessment.py
3494
#!/usr/bin/env python3 import re import sys import fileinput from lxml import etree import os import importlib import time import logging import yaml import subprocess secant_path = os.path.dirname(os.path.realpath(__file__)) + "/.." sys.path.append(secant_path + "/include") from py_functions import getSettingsFromBashConfFile def assessment(template_id, report_file, tests_version, base_mpuri, message_id): total_outcome = False secant = etree.Element('SECANT') version = etree.SubElement(secant, "VERSION") imageID = etree.SubElement(secant, "IMAGEID") date = etree.SubElement(secant, "DATE") outcome = etree.SubElement(secant, "OUTCOME") outcome_description = etree.SubElement(secant, "OUTCOME_DESCRIPTION") messageID = etree.SubElement(secant, "MESSAGEID") log = etree.SubElement(secant, "LOG") version.text = tests_version imageID.text = base_mpuri messageID.text = message_id report = etree.parse(report_file) conf_path = os.environ.get('SECANT_CONFIG_DIR', '/etc/secant') + '/' + 'probes.conf' total_outcomeE = False total_outcomeI = False config = getSettingsFromBashConfFile(conf_path, 'SECANT_PROBES').split(',') for probe_name in config: if report.find(probe_name) is not None: check = etree.SubElement(log, "CHECK") test_version = etree.SubElement(check, "VERSION") test_id = etree.SubElement(check, "TEST_ID") description = etree.SubElement(check, "DESCRIPTION") test_outcome = etree.SubElement(check, "OUTCOME") summary = etree.SubElement(check, "SUMMARY") details = etree.SubElement(check, "DETAILS") test_id.text = probe_name.upper() with open(secant_path + '/probes/' + probe_name + '/probe.yaml') as y: data = yaml.load(y) test_version.text = str(data['version']) description.text = data['title'] status = report.find(probe_name).get("status") test_outcome.text = status if (status == "ERROR"): total_outcomeE = True if (status == "INTERNAL_FAILURE"): total_outcomeI = True nodeS = report.find("/" + probe_name + "/summary") if nodeS is not None: summary.text = nodeS.text nodeD = report.find("/" + probe_name + "/details") if nodeD is not None: if nodeD.text == '\n\t': details.text = None else: details.text = nodeD.text else: raise Exception('Probe result of %s not found in report.' % (probe_name)) if total_outcomeI: outcome.text = 'INTERNAL_FAILURE' outcome_description.text = 'The check failed to finish correctly due to internal failure in Secant.' elif total_outcomeE: outcome.text = 'FAIL' outcome_description.text = 'The machine has been found to expose security vulnerabilities.' else: outcome.text = "OK" outcome_description.text = 'The machine has not been found to expose known security vulnerabilities.' date.text = str(time.time()) # etree should output the XML declaration itself print("<?xml version=\"1.0\" encoding=\"UTF-8\"?>") print(etree.tostring(secant,pretty_print=True).decode('utf-8')) if __name__ == "__main__": assessment(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
apache-2.0
NationalSecurityAgency/ghidra
Ghidra/Processors/Atmel/src/test.processors/java/ghidra/test/processors/AVR8_6_GCC_O0_EmulatorTest.java
2107
/* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ghidra.test.processors; import ghidra.framework.options.Options; import ghidra.program.model.listing.Program; import ghidra.test.processors.support.EmulatorTestRunner; import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; import junit.framework.Test; public class AVR8_6_GCC_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { private static final String LANGUAGE_ID = "avr8:LE:16:atmega256"; private static final String COMPILER_SPEC_ID = "gcc"; private static final String[] REG_DUMP_SET = new String[] {}; public AVR8_6_GCC_O0_EmulatorTest(String name) throws Exception { super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); } @Override protected String getProcessorDesignator() { return "AVR8_6_GCC_O0"; } @Override protected void initializeState(EmulatorTestRunner testRunner, Program program) throws Exception { // These eliminate "uninitialized register" errors. Not strictly needed, but helps find actual problems. testRunner.setRegister("SP", 0x0); testRunner.setRegister("R1", 0x0); testRunner.setRegister("Y", 0x0); testRunner.setRegister("W", 0x0); } @Override protected void setAnalysisOptions(Options analysisOptions) { super.setAnalysisOptions(analysisOptions); analysisOptions.setBoolean("Reference", false); // too many bad disassemblies analysisOptions.setBoolean("Data Reference", false); } public static Test suite() { return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite( AVR8_6_GCC_O0_EmulatorTest.class); } }
apache-2.0
AlexLandau/escape-rope
src/main/java/net/alloyggp/escaperope/restrict/CharDomain.java
259
package net.alloyggp.escaperope.restrict; /** * A CharDomain represents a set of available characters. If you * want to restrict the output of a Delimiter to a single value, * you can use a CharDomain to specify this. */ public interface CharDomain { }
apache-2.0
danieldk/dictomaton
src/main/java/eu/danieldk/dictomaton/collections/ImmutableStringObjectMap.java
8573
// Copyright 2013 Daniel de Kok // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package eu.danieldk.dictomaton.collections; import eu.danieldk.dictomaton.DictionaryBuilder; import eu.danieldk.dictomaton.DictionaryBuilderException; import eu.danieldk.dictomaton.PerfectHashDictionary; import java.io.Serializable; import java.util.*; /** * An immutable mapping from {@link String} to an object with the type <code>V</code>, where keys are compactly stored * using a finite state automaton. */ public class ImmutableStringObjectMap<V> extends AbstractMap<String, V> implements Serializable { private static final long serialVersionUID = 1L; private final PerfectHashDictionary d_keys; private final V[] d_values; /** * A builder for {@link ImmutableStringIntMap}. Mappings * can be added to the builder using the {@link #put} and * {@link #putAll} methods. The {@link ImmutableStringIntMap} * can then be constructed using the {@link #build} method. */ public static class Builder<V> { private final TreeMap<String, V> d_map; public Builder() { d_map = new TreeMap<>(); } /** * Put a key/value pair. */ public synchronized Builder<V> put(String key, V value) { d_map.put(key, value); return this; } /** * Put all key/value pairs from a {@link java.util.Map}. */ public synchronized Builder<V> putAll(Map<String, V> map) { d_map.putAll(map); return this; } /** * Construct a {@link ImmutableStringIntMap}. */ public synchronized ImmutableStringObjectMap<V> build() throws DictionaryBuilderException { PerfectHashDictionary dict = new DictionaryBuilder().addAll(d_map.keySet()).buildPerfectHash(false); @SuppressWarnings("unchecked") V[] values = (V[]) new Object[d_map.size()]; int i = 0; for (V value : d_map.values()) values[i++] = value; return new ImmutableStringObjectMap<>(dict, values); } } /** * A builder for {@link ImmutableStringObjectMap}. Mappings can be added to the builder using the {@link #put} and * {@link #putAll} methods. The {@link ImmutableStringObjectMap} can then be constructed using the {@link #build} * method. <b>Note:</b> This builder assumes that entries are put in key order. This additional assumption makes * the builder more efficient than {@link Builder}. */ public static class OrderedBuilder<V> { private final DictionaryBuilder dictionaryBuilder; private final ArrayList<V> values; public OrderedBuilder() { this.dictionaryBuilder = new DictionaryBuilder(); this.values = new ArrayList<>(); } /** * Put a key/value pair. */ public synchronized OrderedBuilder put(String key, V value) throws DictionaryBuilderException { dictionaryBuilder.add(key); values.add(value); return this; } /** * Put all key/value pairs from a {@link Map}. The map should be an ordered map (by key). If * not, a {@link IllegalArgumentException} is thrown. */ public synchronized OrderedBuilder putAll(SortedMap<String, V> map) throws DictionaryBuilderException { if (map.comparator() != null) throw new IllegalArgumentException("SortedMap does not use the natural ordering of its keys"); values.ensureCapacity(values.size() + map.size()); for (SortedMap.Entry<String, V> entry: map.entrySet()) { dictionaryBuilder.add(entry.getKey()); values.add(entry.getValue()); } return this; } /** * Construct a {@link ImmutableStringIntMap}. */ public synchronized ImmutableStringObjectMap build() throws DictionaryBuilderException { PerfectHashDictionary dict = dictionaryBuilder.buildPerfectHash(false); @SuppressWarnings("unchecked") V[] arr = (V[]) new Object[values.size()]; for (int i = 0; i < values.size(); ++i) arr[i] = values.get(i); return new ImmutableStringObjectMap<>(dict, arr); } } private class EntrySet extends AbstractSet<Entry<String, V>> { private class EntrySetIterator implements Iterator<Entry<String, V>> { private final Iterator<String> d_keyIter; public EntrySetIterator() { d_keyIter = d_keys.iterator(); } @Override public boolean hasNext() { return d_keyIter.hasNext(); } @Override public Entry<String, V> next() { String key = d_keyIter.next(); int idx = d_keys.number(key) - 1; return new SimpleEntry<>(key, d_values[idx]); } @Override public void remove() { throw new UnsupportedOperationException(); } } @Override public boolean contains(Object o) { if (o == null) return false; if (!(o instanceof Entry)) return false; Entry e = (Entry) o; // Key cannot be null. if (e.getKey() == null) return false; if (!(e.getKey() instanceof String)) return false; String key = (String) e.getKey(); Object value = e.getValue(); int hash = d_keys.number(key); // Does not contain the key. if (hash == -1) return false; return d_values[hash - 1].equals(value); } @Override public boolean isEmpty() { return d_keys.isEmpty(); } @Override public Iterator<Entry<String, V>> iterator() { return new EntrySetIterator(); } @Override public int size() { return d_keys.size(); } } private class ObjectArrayList extends AbstractList<V> { @Override public V get(int index) { return d_values[index]; } @Override public int size() { return d_values.length; } } private ImmutableStringObjectMap(PerfectHashDictionary keys, V[] values) { d_keys = keys; d_values = values; } @Override public void clear() { throw new UnsupportedOperationException(); } @Override public boolean containsKey(Object o) { return d_keys.contains(o); } @Override public Set<Entry<String, V>> entrySet() { return new EntrySet(); } @Override public V get(Object o) { if (!(o instanceof String)) return null; String key = (String) o; int hashcode = d_keys.number(key); if (hashcode == -1) return null; return d_values[hashcode - 1]; } @Override public boolean isEmpty() { return d_keys.isEmpty(); } @Override public Set<String> keySet() { return d_keys; } @Override public V put(String k, V v) { throw new UnsupportedOperationException(); } @Override public void putAll(Map<? extends String, ? extends V> m) { throw new UnsupportedOperationException(); } @Override public V remove(Object key) { throw new UnsupportedOperationException(); } @Override public int size() { return d_keys.size(); } /** * Get an iterator over the keys in the mapping. */ public Iterator<String> keyIterator() { return d_keys.iterator(); } @Override public Collection<V> values() { return new ObjectArrayList(); } }
apache-2.0
wspeirs/sop4j-base
src/main/java/com/sop4j/base/apache/io/EndianUtils.java
17798
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.sop4j.base.apache.io; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; /** * Utility code for dealing with different endian systems. * <p> * Different computer architectures adopt different conventions for * byte ordering. In so-called "Little Endian" architectures (eg Intel), * the low-order byte is stored in memory at the lowest address, and * subsequent bytes at higher addresses. For "Big Endian" architectures * (eg Motorola), the situation is reversed. * This class helps you solve this incompatability. * <p> * Origin of code: Excalibur * * @version $Id: EndianUtils.java 1302056 2012-03-18 03:03:38Z ggregory $ * @see com.sop4j.base.apache.io.input.SwappedDataInputStream */ public class EndianUtils { /** * Instances should NOT be constructed in standard programming. */ public EndianUtils() { super(); } // ========================================== Swapping routines /** * Converts a "short" value between endian systems. * @param value value to convert * @return the converted value */ public static short swapShort(short value) { return (short) ( ( ( ( value >> 0 ) & 0xff ) << 8 ) + ( ( ( value >> 8 ) & 0xff ) << 0 ) ); } /** * Converts a "int" value between endian systems. * @param value value to convert * @return the converted value */ public static int swapInteger(int value) { return ( ( ( value >> 0 ) & 0xff ) << 24 ) + ( ( ( value >> 8 ) & 0xff ) << 16 ) + ( ( ( value >> 16 ) & 0xff ) << 8 ) + ( ( ( value >> 24 ) & 0xff ) << 0 ); } /** * Converts a "long" value between endian systems. * @param value value to convert * @return the converted value */ public static long swapLong(long value) { return ( ( ( value >> 0 ) & 0xff ) << 56 ) + ( ( ( value >> 8 ) & 0xff ) << 48 ) + ( ( ( value >> 16 ) & 0xff ) << 40 ) + ( ( ( value >> 24 ) & 0xff ) << 32 ) + ( ( ( value >> 32 ) & 0xff ) << 24 ) + ( ( ( value >> 40 ) & 0xff ) << 16 ) + ( ( ( value >> 48 ) & 0xff ) << 8 ) + ( ( ( value >> 56 ) & 0xff ) << 0 ); } /** * Converts a "float" value between endian systems. * @param value value to convert * @return the converted value */ public static float swapFloat(float value) { return Float.intBitsToFloat( swapInteger( Float.floatToIntBits( value ) ) ); } /** * Converts a "double" value between endian systems. * @param value value to convert * @return the converted value */ public static double swapDouble(double value) { return Double.longBitsToDouble( swapLong( Double.doubleToLongBits( value ) ) ); } // ========================================== Swapping read/write routines /** * Writes a "short" value to a byte array at a given offset. The value is * converted to the opposed endian system while writing. * @param data target byte array * @param offset starting offset in the byte array * @param value value to write */ public static void writeSwappedShort(byte[] data, int offset, short value) { data[ offset + 0 ] = (byte)( ( value >> 0 ) & 0xff ); data[ offset + 1 ] = (byte)( ( value >> 8 ) & 0xff ); } /** * Reads a "short" value from a byte array at a given offset. The value is * converted to the opposed endian system while reading. * @param data source byte array * @param offset starting offset in the byte array * @return the value read */ public static short readSwappedShort(byte[] data, int offset) { return (short)( ( ( data[ offset + 0 ] & 0xff ) << 0 ) + ( ( data[ offset + 1 ] & 0xff ) << 8 ) ); } /** * Reads an unsigned short (16-bit) value from a byte array at a given * offset. The value is converted to the opposed endian system while * reading. * @param data source byte array * @param offset starting offset in the byte array * @return the value read */ public static int readSwappedUnsignedShort(byte[] data, int offset) { return ( ( ( data[ offset + 0 ] & 0xff ) << 0 ) + ( ( data[ offset + 1 ] & 0xff ) << 8 ) ); } /** * Writes a "int" value to a byte array at a given offset. The value is * converted to the opposed endian system while writing. * @param data target byte array * @param offset starting offset in the byte array * @param value value to write */ public static void writeSwappedInteger(byte[] data, int offset, int value) { data[ offset + 0 ] = (byte)( ( value >> 0 ) & 0xff ); data[ offset + 1 ] = (byte)( ( value >> 8 ) & 0xff ); data[ offset + 2 ] = (byte)( ( value >> 16 ) & 0xff ); data[ offset + 3 ] = (byte)( ( value >> 24 ) & 0xff ); } /** * Reads a "int" value from a byte array at a given offset. The value is * converted to the opposed endian system while reading. * @param data source byte array * @param offset starting offset in the byte array * @return the value read */ public static int readSwappedInteger(byte[] data, int offset) { return ( ( ( data[ offset + 0 ] & 0xff ) << 0 ) + ( ( data[ offset + 1 ] & 0xff ) << 8 ) + ( ( data[ offset + 2 ] & 0xff ) << 16 ) + ( ( data[ offset + 3 ] & 0xff ) << 24 ) ); } /** * Reads an unsigned integer (32-bit) value from a byte array at a given * offset. The value is converted to the opposed endian system while * reading. * @param data source byte array * @param offset starting offset in the byte array * @return the value read */ public static long readSwappedUnsignedInteger(byte[] data, int offset) { long low = ( ( ( data[ offset + 0 ] & 0xff ) << 0 ) + ( ( data[ offset + 1 ] & 0xff ) << 8 ) + ( ( data[ offset + 2 ] & 0xff ) << 16 ) ); long high = data[ offset + 3 ] & 0xff; return (high << 24) + (0xffffffffL & low); } /** * Writes a "long" value to a byte array at a given offset. The value is * converted to the opposed endian system while writing. * @param data target byte array * @param offset starting offset in the byte array * @param value value to write */ public static void writeSwappedLong(byte[] data, int offset, long value) { data[ offset + 0 ] = (byte)( ( value >> 0 ) & 0xff ); data[ offset + 1 ] = (byte)( ( value >> 8 ) & 0xff ); data[ offset + 2 ] = (byte)( ( value >> 16 ) & 0xff ); data[ offset + 3 ] = (byte)( ( value >> 24 ) & 0xff ); data[ offset + 4 ] = (byte)( ( value >> 32 ) & 0xff ); data[ offset + 5 ] = (byte)( ( value >> 40 ) & 0xff ); data[ offset + 6 ] = (byte)( ( value >> 48 ) & 0xff ); data[ offset + 7 ] = (byte)( ( value >> 56 ) & 0xff ); } /** * Reads a "long" value from a byte array at a given offset. The value is * converted to the opposed endian system while reading. * @param data source byte array * @param offset starting offset in the byte array * @return the value read */ public static long readSwappedLong(byte[] data, int offset) { long low = ( ( data[ offset + 0 ] & 0xff ) << 0 ) + ( ( data[ offset + 1 ] & 0xff ) << 8 ) + ( ( data[ offset + 2 ] & 0xff ) << 16 ) + ( ( data[ offset + 3 ] & 0xff ) << 24 ); long high = ( ( data[ offset + 4 ] & 0xff ) << 0 ) + ( ( data[ offset + 5 ] & 0xff ) << 8 ) + ( ( data[ offset + 6 ] & 0xff ) << 16 ) + ( ( data[ offset + 7 ] & 0xff ) << 24 ); return (high << 32) + (0xffffffffL & low); } /** * Writes a "float" value to a byte array at a given offset. The value is * converted to the opposed endian system while writing. * @param data target byte array * @param offset starting offset in the byte array * @param value value to write */ public static void writeSwappedFloat(byte[] data, int offset, float value) { writeSwappedInteger( data, offset, Float.floatToIntBits( value ) ); } /** * Reads a "float" value from a byte array at a given offset. The value is * converted to the opposed endian system while reading. * @param data source byte array * @param offset starting offset in the byte array * @return the value read */ public static float readSwappedFloat(byte[] data, int offset) { return Float.intBitsToFloat( readSwappedInteger( data, offset ) ); } /** * Writes a "double" value to a byte array at a given offset. The value is * converted to the opposed endian system while writing. * @param data target byte array * @param offset starting offset in the byte array * @param value value to write */ public static void writeSwappedDouble(byte[] data, int offset, double value) { writeSwappedLong( data, offset, Double.doubleToLongBits( value ) ); } /** * Reads a "double" value from a byte array at a given offset. The value is * converted to the opposed endian system while reading. * @param data source byte array * @param offset starting offset in the byte array * @return the value read */ public static double readSwappedDouble(byte[] data, int offset) { return Double.longBitsToDouble( readSwappedLong( data, offset ) ); } /** * Writes a "short" value to an OutputStream. The value is * converted to the opposed endian system while writing. * @param output target OutputStream * @param value value to write * @throws IOException in case of an I/O problem */ public static void writeSwappedShort(OutputStream output, short value) throws IOException { output.write( (byte)( ( value >> 0 ) & 0xff ) ); output.write( (byte)( ( value >> 8 ) & 0xff ) ); } /** * Reads a "short" value from an InputStream. The value is * converted to the opposed endian system while reading. * @param input source InputStream * @return the value just read * @throws IOException in case of an I/O problem */ public static short readSwappedShort(InputStream input) throws IOException { return (short)( ( ( read( input ) & 0xff ) << 0 ) + ( ( read( input ) & 0xff ) << 8 ) ); } /** * Reads a unsigned short (16-bit) from an InputStream. The value is * converted to the opposed endian system while reading. * @param input source InputStream * @return the value just read * @throws IOException in case of an I/O problem */ public static int readSwappedUnsignedShort(InputStream input) throws IOException { int value1 = read( input ); int value2 = read( input ); return ( ( ( value1 & 0xff ) << 0 ) + ( ( value2 & 0xff ) << 8 ) ); } /** * Writes a "int" value to an OutputStream. The value is * converted to the opposed endian system while writing. * @param output target OutputStream * @param value value to write * @throws IOException in case of an I/O problem */ public static void writeSwappedInteger(OutputStream output, int value) throws IOException { output.write( (byte)( ( value >> 0 ) & 0xff ) ); output.write( (byte)( ( value >> 8 ) & 0xff ) ); output.write( (byte)( ( value >> 16 ) & 0xff ) ); output.write( (byte)( ( value >> 24 ) & 0xff ) ); } /** * Reads a "int" value from an InputStream. The value is * converted to the opposed endian system while reading. * @param input source InputStream * @return the value just read * @throws IOException in case of an I/O problem */ public static int readSwappedInteger(InputStream input) throws IOException { int value1 = read( input ); int value2 = read( input ); int value3 = read( input ); int value4 = read( input ); return ( ( value1 & 0xff ) << 0 ) + ( ( value2 & 0xff ) << 8 ) + ( ( value3 & 0xff ) << 16 ) + ( ( value4 & 0xff ) << 24 ); } /** * Reads a unsigned integer (32-bit) from an InputStream. The value is * converted to the opposed endian system while reading. * @param input source InputStream * @return the value just read * @throws IOException in case of an I/O problem */ public static long readSwappedUnsignedInteger(InputStream input) throws IOException { int value1 = read( input ); int value2 = read( input ); int value3 = read( input ); int value4 = read( input ); long low = ( ( ( value1 & 0xff ) << 0 ) + ( ( value2 & 0xff ) << 8 ) + ( ( value3 & 0xff ) << 16 ) ); long high = value4 & 0xff; return (high << 24) + (0xffffffffL & low); } /** * Writes a "long" value to an OutputStream. The value is * converted to the opposed endian system while writing. * @param output target OutputStream * @param value value to write * @throws IOException in case of an I/O problem */ public static void writeSwappedLong(OutputStream output, long value) throws IOException { output.write( (byte)( ( value >> 0 ) & 0xff ) ); output.write( (byte)( ( value >> 8 ) & 0xff ) ); output.write( (byte)( ( value >> 16 ) & 0xff ) ); output.write( (byte)( ( value >> 24 ) & 0xff ) ); output.write( (byte)( ( value >> 32 ) & 0xff ) ); output.write( (byte)( ( value >> 40 ) & 0xff ) ); output.write( (byte)( ( value >> 48 ) & 0xff ) ); output.write( (byte)( ( value >> 56 ) & 0xff ) ); } /** * Reads a "long" value from an InputStream. The value is * converted to the opposed endian system while reading. * @param input source InputStream * @return the value just read * @throws IOException in case of an I/O problem */ public static long readSwappedLong(InputStream input) throws IOException { byte[] bytes = new byte[8]; for ( int i=0; i<8; i++ ) { bytes[i] = (byte) read( input ); } return readSwappedLong( bytes, 0 ); } /** * Writes a "float" value to an OutputStream. The value is * converted to the opposed endian system while writing. * @param output target OutputStream * @param value value to write * @throws IOException in case of an I/O problem */ public static void writeSwappedFloat(OutputStream output, float value) throws IOException { writeSwappedInteger( output, Float.floatToIntBits( value ) ); } /** * Reads a "float" value from an InputStream. The value is * converted to the opposed endian system while reading. * @param input source InputStream * @return the value just read * @throws IOException in case of an I/O problem */ public static float readSwappedFloat(InputStream input) throws IOException { return Float.intBitsToFloat( readSwappedInteger( input ) ); } /** * Writes a "double" value to an OutputStream. The value is * converted to the opposed endian system while writing. * @param output target OutputStream * @param value value to write * @throws IOException in case of an I/O problem */ public static void writeSwappedDouble(OutputStream output, double value) throws IOException { writeSwappedLong( output, Double.doubleToLongBits( value ) ); } /** * Reads a "double" value from an InputStream. The value is * converted to the opposed endian system while reading. * @param input source InputStream * @return the value just read * @throws IOException in case of an I/O problem */ public static double readSwappedDouble(InputStream input) throws IOException { return Double.longBitsToDouble( readSwappedLong( input ) ); } /** * Reads the next byte from the input stream. * @param input the stream * @return the byte * @throws IOException if the end of file is reached */ private static int read(InputStream input) throws IOException { int value = input.read(); if( -1 == value ) { throw new EOFException( "Unexpected EOF reached" ); } return value; } }
apache-2.0
alanfgates/hive
ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java
11118
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.ql.optimizer; import java.io.Serializable; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Stack; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.BucketMapJoinContext; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredLocalWork; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; /** * Operator factory for MapJoin processing. */ public final class MapJoinFactory { public static int getPositionParent(AbstractMapJoinOperator<? extends MapJoinDesc> op, Stack<Node> stack) { int pos = 0; int size = stack.size(); assert size >= 2 && stack.get(size - 1) == op; Operator<? extends OperatorDesc> parent = (Operator<? extends OperatorDesc>) stack.get(size - 2); List<Operator<? extends OperatorDesc>> parOp = op.getParentOperators(); pos = parOp.indexOf(parent); assert pos < parOp.size(); return pos; } /** * MapJoin processor. * The user can specify a mapjoin hint to specify that the input should be processed as a * mapjoin instead of map-reduce join. If hive.auto.convert.join is set to true, the * user need not specify the hint explicitly, but hive will automatically process the joins * as a mapjoin whenever possible. However, a join can only be processed as a bucketized * map-side join or a sort merge join, if the user has provided the hint explicitly. This * will be fixed as part of HIVE-3433, and eventually, we should remove support for mapjoin * hint. * However, currently, the mapjoin hint is processed as follows: * A mapjoin will have 'n' parents for a n-way mapjoin, and therefore the mapjoin operator * will be encountered 'n' times (one for each parent). Since a reduceSink operator is not * allowed before a mapjoin, the task for the mapjoin will always be a root task. The task * corresponding to the mapjoin is converted to a root task when the operator is encountered * for the first time. When the operator is encountered subsequently, the current task is * merged with the root task for the mapjoin. Note that, it is possible that the map-join task * may be performed as a bucketized map-side join (or sort-merge join), the map join operator * is enhanced to contain the bucketing info. when it is encountered. */ private static class TableScanMapJoinProcessor implements NodeProcessor { public static void setupBucketMapJoinInfo(MapWork plan, AbstractMapJoinOperator<? extends MapJoinDesc> currMapJoinOp) { if (currMapJoinOp != null) { Map<String, Map<String, List<String>>> aliasBucketFileNameMapping = currMapJoinOp.getConf().getAliasBucketFileNameMapping(); if (aliasBucketFileNameMapping != null) { MapredLocalWork localPlan = plan.getMapRedLocalWork(); if (localPlan == null) { if (currMapJoinOp instanceof SMBMapJoinOperator) { localPlan = ((SMBMapJoinOperator) currMapJoinOp).getConf().getLocalWork(); } } else { // local plan is not null, we want to merge it into SMBMapJoinOperator's local work if (currMapJoinOp instanceof SMBMapJoinOperator) { MapredLocalWork smbLocalWork = ((SMBMapJoinOperator) currMapJoinOp).getConf() .getLocalWork(); if (smbLocalWork != null) { localPlan.getAliasToFetchWork().putAll(smbLocalWork.getAliasToFetchWork()); localPlan.getAliasToWork().putAll(smbLocalWork.getAliasToWork()); } } } if (localPlan == null) { return; } if (currMapJoinOp instanceof SMBMapJoinOperator) { plan.setMapRedLocalWork(null); ((SMBMapJoinOperator) currMapJoinOp).getConf().setLocalWork(localPlan); } else { plan.setMapRedLocalWork(localPlan); } BucketMapJoinContext bucketMJCxt = new BucketMapJoinContext(); localPlan.setBucketMapjoinContext(bucketMJCxt); bucketMJCxt.setAliasBucketFileNameMapping(aliasBucketFileNameMapping); bucketMJCxt.setBucketFileNameMapping( currMapJoinOp.getConf().getBigTableBucketNumMapping()); localPlan.setInputFileChangeSensitive(true); bucketMJCxt.setMapJoinBigTableAlias(currMapJoinOp.getConf().getBigTableAlias()); bucketMJCxt .setBucketMatcherClass(org.apache.hadoop.hive.ql.exec.DefaultBucketMatcher.class); bucketMJCxt.setBigTablePartSpecToFileMapping( currMapJoinOp.getConf().getBigTablePartSpecToFileMapping()); // BucketizedHiveInputFormat should be used for either sort merge join or bucket map join if ((currMapJoinOp instanceof SMBMapJoinOperator) || (currMapJoinOp.getConf().isBucketMapJoin())) { plan.setUseBucketizedHiveInputFormat(true); } } } } /** * Initialize the current plan by adding it to root tasks. Since a reduce sink * cannot be present before a mapjoin, and the mapjoin operator is encountered * for the first time, the task corresposding to the mapjoin is added to the * root tasks. * * @param op * the map join operator encountered * @param opProcCtx * processing context * @param pos * position of the parent */ private static void initMapJoinPlan(AbstractMapJoinOperator<? extends MapJoinDesc> op, Task<? extends Serializable> currTask, GenMRProcContext opProcCtx, boolean local) throws SemanticException { // The map is overloaded to keep track of mapjoins also opProcCtx.getOpTaskMap().put(op, currTask); TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); String currAliasId = opProcCtx.getCurrAliasId(); GenMapRedUtils.setTaskPlan(currAliasId, currTopOp, currTask, local, opProcCtx); } /** * Merge the current task with the task for the current mapjoin. The mapjoin operator * has already been encountered. * * @param op * operator being processed * @param oldTask * the old task for the current mapjoin * @param opProcCtx * processing context * @param pos * position of the parent in the stack */ private static void joinMapJoinPlan(Task<? extends Serializable> oldTask, GenMRProcContext opProcCtx, boolean local) throws SemanticException { TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); GenMapRedUtils.mergeInput(currTopOp, opProcCtx, oldTask, local); } /* * The mapjoin operator will be encountered many times (n times for a n-way join). Since a * reduceSink operator is not allowed before a mapjoin, the task for the mapjoin will always * be a root task. The task corresponding to the mapjoin is converted to a root task when the * operator is encountered for the first time. When the operator is encountered subsequently, * the current task is merged with the root task for the mapjoin. Note that, it is possible * that the map-join task may be performed as a bucketized map-side join (or sort-merge join), * the map join operator is enhanced to contain the bucketing info. when it is encountered. */ @Override public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { AbstractMapJoinOperator<MapJoinDesc> mapJoin = (AbstractMapJoinOperator<MapJoinDesc>) nd; GenMRProcContext ctx = (GenMRProcContext) procCtx; // find the branch on which this processor was invoked int pos = getPositionParent(mapJoin, stack); Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(pos)); Task<? extends Serializable> currTask = mapredCtx.getCurrTask(); MapredWork currPlan = (MapredWork) currTask.getWork(); String currAliasId = mapredCtx.getCurrAliasId(); HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>> opTaskMap = ctx.getOpTaskMap(); Task<? extends Serializable> oldTask = opTaskMap.get(mapJoin); ctx.setCurrAliasId(currAliasId); ctx.setCurrTask(currTask); // If we are seeing this mapjoin for the first time, initialize the plan. // If we are seeing this mapjoin for the second or later time then atleast one of the // branches for this mapjoin have been encounered. Join the plan with the plan created // the first time. boolean local = pos != mapJoin.getConf().getPosBigTable(); if (oldTask == null) { assert currPlan.getReduceWork() == null; initMapJoinPlan(mapJoin, currTask, ctx, local); } else { // The current plan can be thrown away after being merged with the // original plan joinMapJoinPlan(oldTask, ctx, local); ctx.setCurrTask(currTask = oldTask); } MapredWork plan = (MapredWork) currTask.getWork(); setupBucketMapJoinInfo(plan.getMapWork(), mapJoin); mapCurrCtx.put(mapJoin, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrAliasId())); // local aliases need not to hand over context further return !local; } } public static NodeProcessor getTableScanMapJoin() { return new TableScanMapJoinProcessor(); } private MapJoinFactory() { // prevent instantiation } }
apache-2.0
ankushsood/strategist
src/main/webapp/src/admin/admin.controller.js
944
(function() { 'use strict'; angular.module('admin') .controller('AdminMenuController', AdminMenuController) .controller('AdminHomeController', AdminHomeController) .controller('AdminHeaderCotroller', AdminHeaderCotroller) .controller('AdminController', AdminController) AdminHeaderCotroller.$inject = ['$location', '$cookieStore']; function AdminMenuController(){ } function AdminHomeController(){ } function AdminHeaderCotroller($location, $cookieStore){ console.log('-------------------------------'); var this_ = this; this_.sss = 'asdfasfasf'; this_.logout = function(){ console.log('~~~~~~~~~~~~~~' + $cookieStore.get('accessToken')); $cookieStore.remove('accessToken'); $cookieStore.remove('user'); $location.path("/"); console.log('~~~~~~~~~~~~~~' + $cookieStore.get('accessToken')); } } function AdminController(){ } })();
apache-2.0
oehme/analysing-gradle-performance
my-app/src/test/java/org/gradle/test/performance/mediummonolithicjavaproject/p231/Test4638.java
2111
package org.gradle.test.performance.mediummonolithicjavaproject.p231; import org.junit.Test; import static org.junit.Assert.*; public class Test4638 { Production4638 objectUnderTest = new Production4638(); @Test public void testProperty0() { String value = "value"; objectUnderTest.setProperty0(value); assertEquals(value, objectUnderTest.getProperty0()); } @Test public void testProperty1() { String value = "value"; objectUnderTest.setProperty1(value); assertEquals(value, objectUnderTest.getProperty1()); } @Test public void testProperty2() { String value = "value"; objectUnderTest.setProperty2(value); assertEquals(value, objectUnderTest.getProperty2()); } @Test public void testProperty3() { String value = "value"; objectUnderTest.setProperty3(value); assertEquals(value, objectUnderTest.getProperty3()); } @Test public void testProperty4() { String value = "value"; objectUnderTest.setProperty4(value); assertEquals(value, objectUnderTest.getProperty4()); } @Test public void testProperty5() { String value = "value"; objectUnderTest.setProperty5(value); assertEquals(value, objectUnderTest.getProperty5()); } @Test public void testProperty6() { String value = "value"; objectUnderTest.setProperty6(value); assertEquals(value, objectUnderTest.getProperty6()); } @Test public void testProperty7() { String value = "value"; objectUnderTest.setProperty7(value); assertEquals(value, objectUnderTest.getProperty7()); } @Test public void testProperty8() { String value = "value"; objectUnderTest.setProperty8(value); assertEquals(value, objectUnderTest.getProperty8()); } @Test public void testProperty9() { String value = "value"; objectUnderTest.setProperty9(value); assertEquals(value, objectUnderTest.getProperty9()); } }
apache-2.0
jcfr/Midas
modules/tracker/database/upgrade/1.0.1.php
2010
<?php /*========================================================================= Midas Server Copyright Kitware SAS, 26 rue Louis Guérin, 69100 Villeurbanne, France. All rights reserved. For more information visit http://www.kitware.com/. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.txt Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================================================================*/ /** * Upgrade the tracker module to version 1.0.1. Add a user_id value to a scalar * record indicating which user uploaded the scalar, a binary "official" flag to * a scalar record indicating if it is an official or experimental submission, * and add a user_id index to the tracker_scalar table. */ class Tracker_Upgrade_1_0_1 extends MIDASUpgrade { /** Upgrade a MySQL database. */ public function mysql() { $this->db->query("ALTER TABLE `tracker_scalar` ADD COLUMN `user_id` bigint(20) NOT NULL DEFAULT '-1';"); $this->db->query("ALTER TABLE `tracker_scalar` ADD COLUMN `official` tinyint(4) NOT NULL DEFAULT '1';"); $this->db->query('ALTER TABLE `tracker_scalar` ADD KEY (`user_id`);'); } /** Upgrade a PostgreSQL database. */ public function pgsql() { $this->db->query('ALTER TABLE tracker_scalar ADD COLUMN user_id bigint NOT NULL DEFAULT -1::bigint;'); $this->db->query('ALTER TABLE tracker_scalar ADD COLUMN official smallint NOT NULL DEFAULT 1::smallint;'); $this->db->query('CREATE INDEX tracker_scalar_idx_user_id ON tracker_scalar (user_id);'); } }
apache-2.0
lawmurray/Birch
driver/src/statement/MemberFunction.hpp
670
/** * @file */ #pragma once #include "src/statement/Function.hpp" namespace birch { /** * Member function. * * @ingroup statement */ class MemberFunction: public Function { public: /** * Constructor. * * @param annotation Annotation. * @param name Name. * @param typeParams Type parameters. * @param params Parameters. * @param returnType Return type. * @param braces Body. * @param loc Location. */ MemberFunction(const Annotation annotation, Name* name, Expression* typeParams, Expression* params, Type* returnType, Statement* braces, Location* loc = nullptr); virtual void accept(Visitor* visitor) const; }; }
apache-2.0
sjbutler/jim
src/uk/ac/open/crc/jim/parser/java14/Java14ParserDefaultVisitor.java
12194
/* Generated By:JavaCC: Do not edit this line. Java14ParserDefaultVisitor.java Version 6.0_1 */ package uk.ac.open.crc.jim.parser.java14; public class Java14ParserDefaultVisitor implements Java14ParserVisitor{ public Object defaultVisit(SimpleNode node, Object data){ node.childrenAccept(this, data); return data; } public Object visit(SimpleNode node, Object data){ return defaultVisit(node, data); } public Object visit(ASTCompilationUnit node, Object data){ return defaultVisit(node, data); } public Object visit(ASTPackageDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTImportDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTModifiers node, Object data){ return defaultVisit(node, data); } public Object visit(ASTTypeDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTClassOrInterfaceDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTExtendsList node, Object data){ return defaultVisit(node, data); } public Object visit(ASTImplementsList node, Object data){ return defaultVisit(node, data); } public Object visit(ASTTypeParameters node, Object data){ return defaultVisit(node, data); } public Object visit(ASTTypeParameter node, Object data){ return defaultVisit(node, data); } public Object visit(ASTTypeBound node, Object data){ return defaultVisit(node, data); } public Object visit(ASTClassOrInterfaceBody node, Object data){ return defaultVisit(node, data); } public Object visit(ASTClassOrInterfaceBodyDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTFieldDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTVariableDeclarator node, Object data){ return defaultVisit(node, data); } public Object visit(ASTVariableDeclaratorId node, Object data){ return defaultVisit(node, data); } public Object visit(ASTVariableInitializer node, Object data){ return defaultVisit(node, data); } public Object visit(ASTArrayInitializer node, Object data){ return defaultVisit(node, data); } public Object visit(ASTMethodDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTMethodDeclarator node, Object data){ return defaultVisit(node, data); } public Object visit(ASTFormalParameters node, Object data){ return defaultVisit(node, data); } public Object visit(ASTFormalParameter node, Object data){ return defaultVisit(node, data); } public Object visit(ASTConstructorDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTExplicitConstructorInvocation node, Object data){ return defaultVisit(node, data); } public Object visit(ASTInitializer node, Object data){ return defaultVisit(node, data); } public Object visit(ASTType node, Object data){ return defaultVisit(node, data); } public Object visit(ASTReferenceType node, Object data){ return defaultVisit(node, data); } public Object visit(ASTClassOrInterfaceType node, Object data){ return defaultVisit(node, data); } public Object visit(ASTTypeArguments node, Object data){ return defaultVisit(node, data); } public Object visit(ASTTypeArgument node, Object data){ return defaultVisit(node, data); } public Object visit(ASTWildcardBounds node, Object data){ return defaultVisit(node, data); } public Object visit(ASTPrimitiveType node, Object data){ return defaultVisit(node, data); } public Object visit(ASTResultType node, Object data){ return defaultVisit(node, data); } public Object visit(ASTName node, Object data){ return defaultVisit(node, data); } public Object visit(ASTNameList node, Object data){ return defaultVisit(node, data); } public Object visit(ASTExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTAssignmentOperator node, Object data){ return defaultVisit(node, data); } public Object visit(ASTConditionalExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTConditionalOrExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTConditionalAndExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTInclusiveOrExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTExclusiveOrExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTAndExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTEqualityExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTInstanceOfExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTRelationalExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTShiftExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTAdditiveExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTMultiplicativeExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTUnaryExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTPreIncrementExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTPreDecrementExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTUnaryExpressionNotPlusMinus node, Object data){ return defaultVisit(node, data); } public Object visit(ASTCastLookahead node, Object data){ return defaultVisit(node, data); } public Object visit(ASTPostfixExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTCastExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTPrimaryExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTMemberSelector node, Object data){ return defaultVisit(node, data); } public Object visit(ASTPrimaryPrefix node, Object data){ return defaultVisit(node, data); } public Object visit(ASTPrimarySuffix node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLiteral node, Object data){ return defaultVisit(node, data); } public Object visit(ASTStringLiteral node, Object data){ return defaultVisit(node, data); } public Object visit(ASTCharacterLiteral node, Object data){ return defaultVisit(node, data); } public Object visit(ASTFloatingPointLiteral node, Object data){ return defaultVisit(node, data); } public Object visit(ASTIntegerLiteral node, Object data){ return defaultVisit(node, data); } public Object visit(ASTBooleanLiteral node, Object data){ return defaultVisit(node, data); } public Object visit(ASTNullLiteral node, Object data){ return defaultVisit(node, data); } public Object visit(ASTArguments node, Object data){ return defaultVisit(node, data); } public Object visit(ASTArgumentList node, Object data){ return defaultVisit(node, data); } public Object visit(ASTAllocationExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTArrayDimsAndInits node, Object data){ return defaultVisit(node, data); } public Object visit(ASTStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTAssertStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLabeledStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTBlock node, Object data){ return defaultVisit(node, data); } public Object visit(ASTBlockStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLocalVariableDeclaration node, Object data){ return defaultVisit(node, data); } public Object visit(ASTEmptyStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTStatementExpression node, Object data){ return defaultVisit(node, data); } public Object visit(ASTSwitchStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTSwitchLabel node, Object data){ return defaultVisit(node, data); } public Object visit(ASTCaseLabel node, Object data){ return defaultVisit(node, data); } public Object visit(ASTDefaultLabel node, Object data){ return defaultVisit(node, data); } public Object visit(ASTIfStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTWhileStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTDoStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTForStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTForInit node, Object data){ return defaultVisit(node, data); } public Object visit(ASTStatementExpressionList node, Object data){ return defaultVisit(node, data); } public Object visit(ASTForUpdate node, Object data){ return defaultVisit(node, data); } public Object visit(ASTBreakStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTContinueStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTReturnStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTThrowStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTSynchronizedStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTTryStatement node, Object data){ return defaultVisit(node, data); } public Object visit(ASTCatchClause node, Object data){ return defaultVisit(node, data); } public Object visit(ASTRUNSIGNEDSHIFT node, Object data){ return defaultVisit(node, data); } public Object visit(ASTRSIGNEDSHIFT node, Object data){ return defaultVisit(node, data); } public Object visit(ASTIdentifier node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLeftParenthesis node, Object data){ return defaultVisit(node, data); } public Object visit(ASTRightParenthesis node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLeftBrace node, Object data){ return defaultVisit(node, data); } public Object visit(ASTRightBrace node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLeftAngle node, Object data){ return defaultVisit(node, data); } public Object visit(ASTRightAngle node, Object data){ return defaultVisit(node, data); } public Object visit(ASTLeftSquare node, Object data){ return defaultVisit(node, data); } public Object visit(ASTRightSquare node, Object data){ return defaultVisit(node, data); } public Object visit(ASTSemiColon node, Object data){ return defaultVisit(node, data); } public Object visit(ASTColon node, Object data){ return defaultVisit(node, data); } public Object visit(ASTAssignment node, Object data){ return defaultVisit(node, data); } public Object visit(ASTComma node, Object data){ return defaultVisit(node, data); } public Object visit(ASTDot node, Object data){ return defaultVisit(node, data); } public Object visit(ASTAtSign node, Object data){ return defaultVisit(node, data); } public Object visit(ASTStarWildcard node, Object data){ return defaultVisit(node, data); } } /* JavaCC - OriginalChecksum=18f64b75de1332adee488bed33482be5 (do not edit this line) */
apache-2.0
corestoreio/csfw
net/ratelimit/memstore/memstore.go
3519
// Copyright 2015-present, Cyrill @ Schumacher.fm and the CoreStore contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package memstore import ( "github.com/corestoreio/errors" "github.com/corestoreio/log" "github.com/corestoreio/pkg/config" "github.com/corestoreio/pkg/config/cfgmodel" "github.com/corestoreio/pkg/net/ratelimit" "github.com/corestoreio/pkg/store/scope" "github.com/throttled/throttled/v2/store/memstore" ) // OptionName identifies this package within the register of the // backendratelimit.Backend type. const OptionName = `memstore` // NewOptionFactory creates a new option factory function for the memstore in the // backend package to be used for automatic scope based configuration // initialization. Configuration values are read from package backendratelimit.Configuration. func NewOptionFactory(burst, requests cfgmodel.Int, duration cfgmodel.Str, gcraMaxMemoryKeys cfgmodel.Int) (optionName string, _ ratelimit.OptionFactoryFunc) { return OptionName, func(sg config.Scoped) []ratelimit.Option { burst, err := burst.Get(sg) if err != nil { return ratelimit.OptionsError(errors.Wrap(err, "[memstore] RateLimitBurst.Get")) } req, err := requests.Get(sg) if err != nil { return ratelimit.OptionsError(errors.Wrap(err, "[memstore] RateLimitRequests.Get")) } durRaw, err := duration.Get(sg) if err != nil { return ratelimit.OptionsError(errors.Wrap(err, "[memstore] RateLimitDuration.Get")) } if len(durRaw) != 1 { return ratelimit.OptionsError(errors.NewFatalf("[memstore] RateLimitDuration invalid character count: %q. Should be one character long.", durRaw)) } dur := rune(durRaw[0]) useInMemMaxKeys, err := gcraMaxMemoryKeys.Get(sg) if err != nil { return ratelimit.OptionsError(errors.Wrap(err, "[memstore] RateLimitStorageGcraMaxMemoryKeys.Get")) } else if useInMemMaxKeys > 0 { return []ratelimit.Option{ WithGCRA(useInMemMaxKeys, dur, req, burst, sg.ScopeIDs()...), } } return ratelimit.OptionsError(errors.NewEmptyf("[memstore] Memstore not active because RateLimitStorageGcraMaxMemoryKeys is %d.", useInMemMaxKeys)) } } // WithGCRA creates a memory based GCRA rate limiter. // Duration: (s second,i minute,h hour,d day). // GCRA => https://en.wikipedia.org/wiki/Generic_cell_rate_algorithm // This function implements a debug log. func WithGCRA(maxKeys int, duration rune, requests, burst int, scopeIDs ...scope.TypeID) ratelimit.Option { return func(s *ratelimit.Service) error { rlStore, err := memstore.New(maxKeys) if err != nil { return errors.NewFatalf("[memstore] memstore.New MaxKeys(%d): %s", maxKeys, err) } if s.Log.IsDebug() { s.Log.Debug("ratelimit.memstore.WithGCRA", log.Stringer("scope", scope.TypeIDs(scopeIDs)), log.Int("max_keys", maxKeys), log.String("duration", string(duration)), log.Int("requests", requests), log.Int("burst", burst), ) } return ratelimit.WithGCRAStore(rlStore, duration, requests, burst, scopeIDs...)(s) } }
apache-2.0
yuananf/presto
presto-tests/src/test/java/com/facebook/presto/tests/TestProcedureCall.java
6961
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.tests; import com.facebook.presto.Session; import com.facebook.presto.connector.ConnectorId; import com.facebook.presto.metadata.ProcedureRegistry; import com.facebook.presto.server.testing.TestingPrestoServer; import com.facebook.presto.testing.ProcedureTester; import com.facebook.presto.tests.tpch.TpchQueryRunnerBuilder; import org.intellij.lang.annotations.Language; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import java.util.List; import static com.facebook.presto.testing.TestingSession.TESTING_CATALOG; import static com.facebook.presto.testing.TestingSession.testSessionBuilder; import static java.lang.String.format; import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.fail; @Test(singleThreaded = true) public class TestProcedureCall extends AbstractTestQueryFramework { private static final String PROCEDURE_SCHEMA = "procedure_schema"; private ProcedureTester tester; private Session session; public TestProcedureCall() { super(() -> TpchQueryRunnerBuilder.builder().build()); } @BeforeClass public void setUp() { TestingPrestoServer coordinator = ((DistributedQueryRunner) getQueryRunner()).getCoordinator(); tester = coordinator.getProcedureTester(); // register procedures in the bogus testing catalog ProcedureRegistry procedureRegistry = coordinator.getMetadata().getProcedureRegistry(); TestingProcedures procedures = new TestingProcedures(coordinator.getProcedureTester()); procedureRegistry.addProcedures( new ConnectorId(TESTING_CATALOG), procedures.getProcedures(PROCEDURE_SCHEMA)); session = testSessionBuilder() .setCatalog(TESTING_CATALOG) .setSchema(PROCEDURE_SCHEMA) .build(); } @AfterClass(alwaysRun = true) public void tearDown() { tester = null; session = null; } @Override protected Session getSession() { return session; } @Test public void testProcedureCall() { assertCall("CALL test_simple()", "simple"); assertCall(format("CALL %s.test_simple()", PROCEDURE_SCHEMA), "simple"); assertCall(format("CALL %s.%s.test_simple()", TESTING_CATALOG, PROCEDURE_SCHEMA), "simple"); assertCall("CALL test_args(123, 4.5, 'hello', true)", "args", 123L, 4.5, "hello", true); assertCall("CALL test_args(-5, nan(), 'bye', false)", "args", -5L, Double.NaN, "bye", false); assertCall("CALL test_args(3, 88, 'coerce', true)", "args", 3L, 88.0, "coerce", true); assertCall("CALL test_args(x => 123, y => 4.5, z => 'hello', q => true)", "args", 123L, 4.5, "hello", true); assertCall("CALL test_args(q => true, z => 'hello', y => 4.5, x => 123)", "args", 123L, 4.5, "hello", true); assertCall("CALL test_nulls(123, null)", "nulls", 123L, null); assertCall("CALL test_nulls(null, 'apple')", "nulls", null, "apple"); assertCall("CALL test_arrays(ARRAY [12, 34], ARRAY['abc', 'xyz'])", "arrays", list(12L, 34L), list("abc", "xyz")); assertCall("CALL test_arrays(ARRAY [], ARRAY[])", "arrays", list(), list()); assertCall("CALL test_nested(ARRAY [ARRAY[12, 34], ARRAY[56]])", "nested", list(list(12L, 34L), list(56L))); assertCall("CALL test_nested(ARRAY [])", "nested", list()); assertCall("CALL test_nested(ARRAY [ARRAY[]])", "nested", list(list())); assertCall("CALL test_session_first(123)", "session_first", 123L); assertCall("CALL test_session_last('grape')", "session_last", "grape"); assertCallThrows("CALL test_exception()", "exception", "test exception from procedure"); assertCallThrows("CALL test_error()", "error", "test error from procedure"); assertCallFails("CALL test_args(null, 4.5, 'hello', true)", "Procedure argument cannot be null: x"); assertCallFails("CALL test_args(123, null, 'hello', true)", "Procedure argument cannot be null: y"); assertCallFails("CALL test_args(123, 4.5, 'hello', null)", "Procedure argument cannot be null: q"); assertCallFails("CALL test_simple(123)", "line 1:1: Too many arguments for procedure"); assertCallFails("CALL test_args(123, 4.5, 'hello')", "line 1:1: Too few arguments for procedure"); assertCallFails("CALL test_args(x => 123, y => 4.5, q => true)", "line 1:1: Too few arguments for procedure"); assertCallFails("CALL test_args(123, 4.5, 'hello', q => true)", "line 1:1: Named and positional arguments cannot be mixed"); assertCallFails("CALL test_args(x => 3, x => 4)", "line 1:24: Duplicate procedure argument: x"); assertCallFails("CALL test_args(t => 404)", "line 1:16: Unknown argument name: t"); assertCallFails("CALL test_nulls('hello', null)", "line 1:17: Cannot cast type bigint to varchar(5)"); assertCallFails("CALL test_nulls(null, 123)", "line 1:23: Cannot cast type varchar to integer"); } private void assertCall(@Language("SQL") String sql, String name, Object... arguments) { tester.reset(); assertUpdate(sql); assertEquals(tester.getCalledName(), name); assertEquals(tester.getCalledArguments(), list(arguments)); } private void assertCallThrows(@Language("SQL") String sql, String name, String message) { tester.reset(); try { assertUpdate(sql); fail("expected exception"); } catch (RuntimeException e) { assertEquals(tester.getCalledName(), name); assertEquals(tester.getCalledArguments(), list()); assertEquals(e.getMessage(), message); } } private void assertCallFails(@Language("SQL") String sql, String message) { tester.reset(); try { assertUpdate(sql); fail("expected exception"); } catch (RuntimeException e) { assertFalse(tester.wasCalled()); assertEquals(e.getMessage(), message); } } @SafeVarargs private static <T> List<T> list(T... elements) { return asList(elements); } }
apache-2.0
OpenHFT/Chronicle-Wire
src/test/java/net/openhft/chronicle/wire/bytesmarshallable/BytesMarshallableTest.java
8267
package net.openhft.chronicle.wire.bytesmarshallable; import net.openhft.chronicle.bytes.Bytes; import net.openhft.chronicle.bytes.BytesIn; import net.openhft.chronicle.bytes.BytesOut; import net.openhft.chronicle.core.Maths; import net.openhft.chronicle.core.io.IORuntimeException; import net.openhft.chronicle.core.pool.ClassAliasPool; import net.openhft.chronicle.wire.BytesInBinaryMarshallable; import net.openhft.chronicle.wire.Wire; import net.openhft.chronicle.wire.WireTestCommon; import net.openhft.chronicle.wire.WireType; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.nio.BufferUnderflowException; import java.util.Arrays; import java.util.Collection; import static org.junit.Assert.assertEquals; @SuppressWarnings("rawtypes") @RunWith(value = Parameterized.class) public class BytesMarshallableTest extends WireTestCommon { private final WireType wireType; public BytesMarshallableTest(WireType wireType) { this.wireType = wireType; } @Parameterized.Parameters public static Collection<Object[]> combinations() { return Arrays.asList( new Object[]{WireType.TEXT}, new Object[]{WireType.BINARY_LIGHT} ); } private Wire createWire() { return wireType.apply(Bytes.elasticHeapByteBuffer(64)); } @SuppressWarnings("incomplete-switch") @Test public void primitiveDto() { Wire wire = createWire(); PrimDto dto1 = PrimDto.init(1); wire.write("prim").marshallable(dto1); ScalarDto sdto1 = ScalarDto.init(1); wire.write("scalar").marshallable(sdto1); String expected = "Unknown wire type"; switch (wireType) { case TEXT: expected = "[pos: 0, rlim: 159, wlim: 2147483632, cap: 2147483632 ] ǁprim: {⒑ flag: true,⒑ s8: 1,⒑ ch: \"\\x01\",⒑ s16: 1,⒑ s32: 1,⒑ s64: 1,⒑ f32: 1.0,⒑ f64: 1.0⒑}⒑scalar: {⒑ text: Hello1,⒑ buffer: bye 1,⒑ bytes: hi 1⒑}⒑‡٠٠٠٠٠٠٠٠"; break; case BINARY_LIGHT: expected = "[pos: 0, rlim: 69, wlim: 2147483632, cap: 2147483632 ] ǁÄprim\\u0082\\u001D٠٠٠Y⒈⒈⒈٠⒈٠٠٠⒈٠٠٠٠٠٠٠٠٠\\u0080?٠٠٠٠٠٠ð?Æscalar\\u0082⒙٠٠٠⒍Hello1⒌bye 1⒋hi 1‡٠٠٠٠٠٠٠٠٠٠٠"; break; } assertEquals(expected, wire.bytes().toDebugString()); PrimDto dto2 = new PrimDto(); ScalarDto sdto2 = new ScalarDto(); for (int i = 0; i < 2; i++) { wire.bytes().readPosition(0); wire.read("prim").marshallable(dto2); assertEquals(dto1, dto2); wire.read("scalar").marshallable(sdto2); assertEquals(sdto1, sdto2); } } @SuppressWarnings("incomplete-switch") @Test public void primitiveDto2() { Wire wire = createWire(); PrimDto2 dto1 = PrimDto2.init(1); wire.write("prim").marshallable(dto1); ScalarDto2 sdto1 = ScalarDto2.init(1); wire.write("scalar").marshallable(sdto1); String expected = "Unknown wire type"; switch (wireType) { case TEXT: expected = "[pos: 0, rlim: 159, wlim: 2147483632, cap: 2147483632 ] ǁprim: {⒑ flag: true,⒑ s8: 1,⒑ ch: \"\\x01\",⒑ s16: 1,⒑ s32: 1,⒑ s64: 1,⒑ f32: 1.0,⒑ f64: 1.0⒑}⒑scalar: {⒑ text: Hello1,⒑ buffer: bye 1,⒑ bytes: hi 1⒑}⒑‡٠٠٠٠٠٠٠٠"; break; case BINARY_LIGHT: expected = "[pos: 0, rlim: 50, wlim: 2147483632, cap: 2147483632 ] ǁÄprim\\u0082⒑٠٠٠Y⒈⒈⒈⒈⒈\\u009F|\\u009F|Æscalar\\u0082⒙٠٠٠⒍Hello1⒌bye 1⒋hi 1‡٠٠٠٠٠٠٠٠٠٠٠٠٠٠"; break; } assertEquals(expected, wire.bytes().toDebugString()); PrimDto2 dto2 = new PrimDto2(); ScalarDto2 sdto2 = new ScalarDto2(); for (int i = 0; i < 2; i++) { wire.bytes().readPosition(0); wire.read("prim").marshallable(dto2); assertEquals(dto1, dto2); wire.read("scalar").marshallable(sdto2); assertEquals(sdto1, sdto2); } ClassAliasPool.CLASS_ALIASES.addAlias(PrimDto2.class); ClassAliasPool.CLASS_ALIASES.addAlias(ScalarDto2.class); assertEquals("!PrimDto2 {\n" + " flag: true,\n" + " s8: 1,\n" + " ch: \"\\x01\",\n" + " s16: 1,\n" + " s32: 1,\n" + " s64: 1,\n" + " f32: 1.0,\n" + " f64: 1.0\n" + "}\n", dto2.toString()); assertEquals("!ScalarDto2 {\n" + " text: Hello1,\n" + " buffer: bye 1,\n" + " bytes: hi 1\n" + "}\n", sdto2.toString()); } static class PrimDto extends BytesInBinaryMarshallable { boolean flag; byte s8; char ch; short s16; int s32; long s64; float f32; double f64; static PrimDto init(int i) { return init(i, new PrimDto()); } static <T extends PrimDto> T init(int i, T d) { d.flag = i % 2 != 0; d.s8 = (byte) i; d.ch = (char) i; d.s16 = (short) i; d.s32 = i; d.s64 = i * i * i; d.f32 = d.s32; d.f64 = d.s64; return d; } } static class PrimDto2 extends PrimDto { static PrimDto2 init(int i) { return init(i, new PrimDto2()); } @Override public void readMarshallable(BytesIn bytes) throws IORuntimeException { flag = bytes.readBoolean(); s8 = bytes.readByte(); ch = (char) Maths.toUInt16(bytes.readStopBit()); s16 = Maths.toInt16(bytes.readStopBit()); s32 = Maths.toInt32(bytes.readStopBit()); s64 = bytes.readStopBit(); f32 = (float) bytes.readStopBitDouble(); f64 = bytes.readStopBitDouble(); } @Override public void writeMarshallable(BytesOut bytes) { bytes.writeBoolean(flag); bytes.writeByte(s8); bytes.writeStopBit(ch); bytes.writeStopBit(s16); bytes.writeStopBit(s32); bytes.writeStopBit(s64); bytes.writeStopBit(f32); bytes.writeStopBit(f64); } } static class ScalarDto extends BytesInBinaryMarshallable { String text; StringBuilder buffer; Bytes bytes; static ScalarDto init(int i) { return init(i, new ScalarDto()); } static <D extends ScalarDto> D init(int i, D d) { d.text = "Hello" + i; d.buffer = new StringBuilder("bye " + i); d.bytes = Bytes.allocateElasticOnHeap(8).append("hi ").append(i); return d; } } static class ScalarDto2 extends ScalarDto { static ScalarDto2 init(int i) { return init(i, new ScalarDto2()); } @Override public void readMarshallable(BytesIn in) throws IORuntimeException { text = in.read8bit(); if (buffer == null) buffer = new StringBuilder(); in.read8bit(buffer); if (bytes == null) bytes = Bytes.allocateElasticOnHeap(8); in.read8bit(bytes); } @Override public void writeMarshallable(BytesOut out) { out.write8bit(text); out.write8bit(buffer); if (bytes == null) { out.writeStopBit(-1); } else { long offset = bytes.readPosition(); long readRemaining = Math.min(out.writeRemaining(), bytes.readLimit() - offset); out.writeStopBit(readRemaining); try { out.write(bytes, offset, readRemaining); } catch (BufferUnderflowException | IllegalArgumentException e) { throw new AssertionError(e); } } } } }
apache-2.0
YAFNET/YAFNET
yafsrc/Lucene.Net/Lucene.Net/Util/Counter.cs
3691
using J2N.Threading.Atomic; using System; using System.Runtime.CompilerServices; namespace YAF.Lucene.Net.Util { /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// <summary> /// Simple counter class /// <para/> /// @lucene.internal /// @lucene.experimental /// </summary> public abstract class Counter { /// <summary> /// Adds the given delta to the counters current value. /// </summary> /// <param name="delta"> /// The delta to add. </param> /// <returns> The counters updated value. </returns> public abstract long AddAndGet(long delta); /// <summary> /// Gets the counters current value. /// </summary> public abstract long Value { get; } /// <summary> /// Returns the counters current value. /// </summary> /// <returns> The counters current value. </returns> [Obsolete("Use Value instead. This method will be removed in 4.8.0 release candidate.")] public virtual long Get() => Value; /// <summary> /// Returns a new counter. The returned counter is not thread-safe. /// </summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public static Counter NewCounter() { return NewCounter(false); } /// <summary> /// Returns a new counter. /// </summary> /// <param name="threadSafe"> /// <c>true</c> if the returned counter can be used by multiple /// threads concurrently. </param> /// <returns> A new counter. </returns> [MethodImpl(MethodImplOptions.AggressiveInlining)] public static Counter NewCounter(bool threadSafe) { return threadSafe ? (Counter)new AtomicCounter() : new SerialCounter(); } /// <summary> /// Returns this counter's <see cref="Value"/> implicitly. /// </summary> /// <param name="counter"></param> public static implicit operator long(Counter counter) => counter.Value; // LUCENENET specific private sealed class SerialCounter : Counter { private long count = 0; [MethodImpl(MethodImplOptions.AggressiveInlining)] public override long AddAndGet(long delta) { return count += delta; } public override long Value => count; } private sealed class AtomicCounter : Counter { private readonly AtomicInt64 count = new AtomicInt64(); [MethodImpl(MethodImplOptions.AggressiveInlining)] public override long AddAndGet(long delta) { return count.AddAndGet(delta); } public override long Value => count; } } }
apache-2.0
floodlight/loxigen-artifacts
openflowj/gen-src/main/java/org/projectfloodlight/openflow/protocol/bsntlv/OFBsnTlvVxlanEgressLag.java
1751
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University // Copyright (c) 2011, 2012 Open Networking Foundation // Copyright (c) 2012, 2013 Big Switch Networks, Inc. // This library was generated by the LoxiGen Compiler. // See the file LICENSE.txt which should have been included in the source distribution // Automatically generated by LOXI from template of_interface.java // Do not modify package org.projectfloodlight.openflow.protocol.bsntlv; import org.projectfloodlight.openflow.protocol.*; import org.projectfloodlight.openflow.protocol.action.*; import org.projectfloodlight.openflow.protocol.actionid.*; import org.projectfloodlight.openflow.protocol.bsntlv.*; import org.projectfloodlight.openflow.protocol.errormsg.*; import org.projectfloodlight.openflow.protocol.meterband.*; import org.projectfloodlight.openflow.protocol.instruction.*; import org.projectfloodlight.openflow.protocol.instructionid.*; import org.projectfloodlight.openflow.protocol.match.*; import org.projectfloodlight.openflow.protocol.stat.*; import org.projectfloodlight.openflow.protocol.oxm.*; import org.projectfloodlight.openflow.protocol.oxs.*; import org.projectfloodlight.openflow.protocol.queueprop.*; import org.projectfloodlight.openflow.types.*; import org.projectfloodlight.openflow.util.*; import org.projectfloodlight.openflow.exceptions.*; import io.netty.buffer.ByteBuf; public interface OFBsnTlvVxlanEgressLag extends OFObject, OFBsnTlv { int getType(); OFVersion getVersion(); void writeTo(ByteBuf channelBuffer); Builder createBuilder(); public interface Builder extends OFBsnTlv.Builder { OFBsnTlvVxlanEgressLag build(); int getType(); OFVersion getVersion(); } }
apache-2.0
mifodiy4j/smikhailov
chapter_002/src/main/java/ru/job4j/Figure.java
645
package ru.job4j; public abstract class Figure { final Cell position; public Figure(Cell position) { this.position = position; } /*Метод должен работать так. dist - задают ячейку куда следует пойти. Если фигура может туда пойти. то Вернуть массив ячеек. которые должна пройти фигура. Если фигура туда пойти не может. выбросить исключение ImposibleMoveException */ abstract Cell[] way(Cell dist) throws ImpossibleMoveException; abstract void clone(Cell position); }
apache-2.0
gongmingqm10/cmcc
mobile/src/main/java/net/gongmingqm10/cmcc/activity/CMCCApplication.java
325
package net.gongmingqm10.cmcc.activity; import android.app.Application; import cn.jpush.android.api.JPushInterface; public class CMCCApplication extends Application { @Override public void onCreate() { super.onCreate(); JPushInterface.setDebugMode(true); JPushInterface.init(this); } }
apache-2.0