repo_name
stringlengths 6
101
| path
stringlengths 4
300
| text
stringlengths 7
1.31M
|
|---|---|---|
nanotech-empa/aiida-nanotech-empa
|
aiida_nanotech_empa/workflows/gaussian/__init__.py
|
from .scf_workchain import GaussianScfWorkChain
from .relax_workchain import GaussianRelaxWorkChain
from .delta_scf_workchain import GaussianDeltaScfWorkChain
from .natorb_workchain import GaussianNatOrbWorkChain
from .spin_workchain import GaussianSpinWorkChain
from .hf_mp2_workchain import GaussianHfMp2WorkChain
from .constr_opt_chain_workchain import GaussianConstrOptChainWorkChain
from .casscf_workchain import GaussianCasscfWorkChain
from .casscf_series_workchain import GaussianCasscfSeriesWorkChain
__all__ = [
'GaussianScfWorkChain',
'GaussianRelaxWorkChain',
'GaussianDeltaScfWorkChain',
'GaussianNatOrbWorkChain',
'GaussianSpinWorkChain',
'GaussianHfMp2WorkChain',
'GaussianConstrOptChainWorkChain',
'GaussianCasscfWorkChain',
'GaussianCasscfSeriesWorkChain',
]
|
SamGG/mev
|
source/org/tigr/microarray/mev/cluster/gui/impl/terrain/TerrainInitDialog.java
|
<filename>source/org/tigr/microarray/mev/cluster/gui/impl/terrain/TerrainInitDialog.java<gh_stars>1-10
/*******************************************************************************
* Copyright (c) 1999-2005 The Institute for Genomic Research (TIGR).
* Copyright (c) 2005-2008, the Dana-Farber Cancer Institute (DFCI),
* J. Craig Venter Institute (JCVI) and the University of Washington.
* All rights reserved.
*******************************************************************************/
/*
* $RCSfile: TerrainInitDialog.java,v $
* $Revision: 1.5 $
* $Date: 2006-04-20 18:53:39 $
* $Author: eleanorahowe $
* $State: Exp $
*/
package org.tigr.microarray.mev.cluster.gui.impl.terrain;
import java.awt.BorderLayout;
import java.awt.Color;
import java.awt.Dimension;
import java.awt.Frame;
import java.awt.GridBagConstraints;
import java.awt.GridBagLayout;
import java.awt.Toolkit;
import java.awt.event.ActionEvent;
import java.awt.event.WindowEvent;
import javax.swing.BorderFactory;
import javax.swing.ButtonGroup;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JRadioButton;
import javax.swing.JTextField;
import javax.swing.UIManager;
import javax.swing.border.EmptyBorder;
import org.tigr.microarray.mev.cluster.gui.impl.GUIFactory;
import org.tigr.microarray.mev.cluster.gui.impl.dialogs.AlgorithmDialog;
import org.tigr.microarray.mev.cluster.gui.impl.dialogs.DialogListener;
import org.tigr.microarray.mev.cluster.gui.impl.dialogs.dialogHelpUtil.HelpWindow;
public class TerrainInitDialog extends AlgorithmDialog {
private int result;
private JRadioButton genRadio;
private JRadioButton expRadio;
private JTextField neighboursField;
// private static final String text1 = "Cluster Analysis Software Package ";
// private static final String text2 = "The Institute for Genomic Research ";
/**
* Constructs a <code>TerrainInitDialog</code> with default
* initial parameters.
*/
public TerrainInitDialog(Frame parent) {
super((JFrame)parent, "Terrain Initialization", true);
Listener listener = new Listener();
addWindowListener(listener);
JPanel parameters = new JPanel(new GridBagLayout());
parameters.setBorder(new EmptyBorder(20, 10, 20, 10));
parameters.setBackground(Color.white);
GridBagConstraints gbc = new GridBagConstraints();
gbc.anchor = GridBagConstraints.WEST;
genRadio = new JRadioButton("Genes");
genRadio.setFocusPainted(false);
genRadio.setBackground(Color.white);
genRadio.setForeground(UIManager.getColor("Label.foreground"));
gbc.gridx = 0; gbc.gridy = 0;
parameters.add(genRadio, gbc);
expRadio = new JRadioButton("Samples");
expRadio.setForeground(UIManager.getColor("Label.foreground"));
expRadio.setFocusPainted(false);
expRadio.setBackground(Color.white);
gbc.gridx = 1; gbc.gridy = 0; gbc.insets.left = 25;
parameters.add(expRadio, gbc);
gbc.insets.left = 0;
gbc.gridx = 0; gbc.gridy = 1; gbc.insets.top = 20;
parameters.add(new JLabel("Neighbors:"), gbc);
gbc.gridx = 1; gbc.insets.left = 25;
neighboursField = new JTextField(String.valueOf(20), 5);
parameters.add(neighboursField, gbc);
ButtonGroup bg = new ButtonGroup();
bg.add(genRadio);
bg.add(expRadio);
genRadio.setSelected(true);
// JButton button1 = new JButton("OK");
// button1.setActionCommand("ok-command");
// button1.addActionListener(listener);
// button1.setFocusPainted(false);
// JButton button2 = new JButton("Cancel");
// button2.setActionCommand("cancel-command");
// button2.addActionListener(listener);
// button2.setFocusPainted(false);
// JPanel insetsPanel = new JPanel(new GridLayout(2, 1));
// insetsPanel.add(new JLabel(text1));
// insetsPanel.add(new JLabel(text2));
// JPanel buttons = new JPanel(new GridLayout(1, 2, 10, 10));
// buttons.add(button1);
// buttons.add(button2);
// JPanel panel2 = new JPanel(new BorderLayout());
// panel2.setBorder(new EmptyBorder(10, 0, 0, 0));
// panel2.add(insetsPanel, BorderLayout.WEST);
// panel2.add(buttons, BorderLayout.EAST);
JPanel panel3 = new JPanel(new BorderLayout());
panel3.setForeground(Color.white);
panel3.setBorder(BorderFactory.createLineBorder(Color.gray));
// panel3.add(new JLabel(GUIFactory.getIcon("genebar.gif")), BorderLayout.NORTH);
panel3.setBackground(Color.white);
panel3.add(parameters, BorderLayout.WEST);
panel3.add(new JLabel(GUIFactory.getIcon("dialog_button_bar.gif")), BorderLayout.EAST);
// JPanel panel1 = new JPanel(new BorderLayout());
// panel1.setBorder(new EmptyBorder(10, 10, 10, 10));
// panel1.add(panel2, BorderLayout.SOUTH);
// panel1.add(panel3, BorderLayout.NORTH);
setActionListeners(listener);
addContent(panel3);
// getContentPane().add(panel1, BorderLayout.CENTER);
pack();
}
/**
* Shows the dialog.
*/
public int showModal() {
Dimension screenSize = Toolkit.getDefaultToolkit().getScreenSize();
setLocation((screenSize.width - getSize().width)/2, (screenSize.height - getSize().height)/2);
show();
return result;
}
/**
* Returns true if genes radio button was selected.
*/
public boolean isGenes() {
return genRadio.isSelected();
}
/**
* Returns neighbours value.
*/
public int getNeighbours() {
return Integer.parseInt(neighboursField.getText());
}
/**
* The class to listen to the dialog events.
*/
private class Listener extends DialogListener {
public void actionPerformed(ActionEvent e) {
String command = e.getActionCommand();
if (command.equals("ok-command")) {
try {
int value = Integer.parseInt(neighboursField.getText());
if (value < 1)
throw new NumberFormatException("value must be more than 0.");
result = JOptionPane.OK_OPTION;
dispose();
} catch (NumberFormatException nfe) {
JOptionPane.showMessageDialog(TerrainInitDialog.this, "Error number: " + nfe.getMessage(), "Input Error!", JOptionPane.ERROR_MESSAGE);
}
} else if (command.equals("cancel-command")) {
result = JOptionPane.CANCEL_OPTION;
dispose();
} else if (command.equals("reset-command")){
genRadio.setSelected(true);
neighboursField.setText("20");
result = JOptionPane.CANCEL_OPTION;
return;
} else if (command.equals("info-command")){
HelpWindow.launchBrowser(TerrainInitDialog.this, "Terrain Map Initialization Dialog");
}
}
public void windowClosing(WindowEvent e) {
result = JOptionPane.CLOSED_OPTION;
dispose();
}
}
public static void main(String[] args) {
TerrainInitDialog dlg = new TerrainInitDialog(new javax.swing.JFrame());
if (dlg.showModal() == JOptionPane.OK_OPTION) {
System.out.println("ok");
}
System.exit(0);
}
}
|
joe-chacko/yoko
|
yoko-core/src/test/java/test/types/DynAnyTypes/TestStructHelper.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.types.DynAnyTypes;
//
// IDL:test/types/DynAnyTypes/TestStruct:1.0
//
final public class TestStructHelper
{
public static void
insert(org.omg.CORBA.Any any, TestStruct val)
{
org.omg.CORBA.portable.OutputStream out = any.create_output_stream();
write(out, val);
any.read_value(out.create_input_stream(), type());
}
public static TestStruct
extract(org.omg.CORBA.Any any)
{
if(any.type().equivalent(type()))
return read(any.create_input_stream());
else
throw new org.omg.CORBA.BAD_OPERATION();
}
private static org.omg.CORBA.TypeCode typeCode_;
public static org.omg.CORBA.TypeCode
type()
{
if(typeCode_ == null)
{
org.omg.CORBA.ORB orb = org.omg.CORBA.ORB.init();
org.omg.CORBA.StructMember[] members = new org.omg.CORBA.StructMember[17];
members[0] = new org.omg.CORBA.StructMember();
members[0].name = "shortVal";
members[0].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_short);
members[1] = new org.omg.CORBA.StructMember();
members[1].name = "ushortVal";
members[1].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_ushort);
members[2] = new org.omg.CORBA.StructMember();
members[2].name = "longVal";
members[2].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_long);
members[3] = new org.omg.CORBA.StructMember();
members[3].name = "ulongVal";
members[3].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_ulong);
members[4] = new org.omg.CORBA.StructMember();
members[4].name = "floatVal";
members[4].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_float);
members[5] = new org.omg.CORBA.StructMember();
members[5].name = "doubleVal";
members[5].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_double);
members[6] = new org.omg.CORBA.StructMember();
members[6].name = "boolVal";
members[6].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_boolean);
members[7] = new org.omg.CORBA.StructMember();
members[7].name = "charVal";
members[7].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_char);
members[8] = new org.omg.CORBA.StructMember();
members[8].name = "octetVal";
members[8].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_octet);
members[9] = new org.omg.CORBA.StructMember();
members[9].name = "anyVal";
members[9].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_any);
members[10] = new org.omg.CORBA.StructMember();
members[10].name = "tcVal";
members[10].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_TypeCode);
members[11] = new org.omg.CORBA.StructMember();
members[11].name = "objectVal";
members[11].type = orb.create_interface_tc("IDL:omg.org/CORBA/Object:1.0", "Object");
members[12] = new org.omg.CORBA.StructMember();
members[12].name = "stringVal";
members[12].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_string);
members[13] = new org.omg.CORBA.StructMember();
members[13].name = "longlongVal";
members[13].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_longlong);
members[14] = new org.omg.CORBA.StructMember();
members[14].name = "ulonglongVal";
members[14].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_ulonglong);
members[15] = new org.omg.CORBA.StructMember();
members[15].name = "wcharVal";
members[15].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_wchar);
members[16] = new org.omg.CORBA.StructMember();
members[16].name = "wstringVal";
members[16].type = orb.get_primitive_tc(org.omg.CORBA.TCKind.tk_wstring);
typeCode_ = orb.create_struct_tc(id(), "TestStruct", members);
}
return typeCode_;
}
public static String
id()
{
return "IDL:test/types/DynAnyTypes/TestStruct:1.0";
}
public static TestStruct
read(org.omg.CORBA.portable.InputStream in)
{
TestStruct _ob_v = new TestStruct();
_ob_v.shortVal = in.read_short();
_ob_v.ushortVal = in.read_ushort();
_ob_v.longVal = in.read_long();
_ob_v.ulongVal = in.read_ulong();
_ob_v.floatVal = in.read_float();
_ob_v.doubleVal = in.read_double();
_ob_v.boolVal = in.read_boolean();
_ob_v.charVal = in.read_char();
_ob_v.octetVal = in.read_octet();
_ob_v.anyVal = in.read_any();
_ob_v.tcVal = in.read_TypeCode();
_ob_v.objectVal = in.read_Object();
_ob_v.stringVal = in.read_string();
_ob_v.longlongVal = in.read_longlong();
_ob_v.ulonglongVal = in.read_ulonglong();
_ob_v.wcharVal = in.read_wchar();
_ob_v.wstringVal = in.read_wstring();
return _ob_v;
}
public static void
write(org.omg.CORBA.portable.OutputStream out, TestStruct val)
{
out.write_short(val.shortVal);
out.write_ushort(val.ushortVal);
out.write_long(val.longVal);
out.write_ulong(val.ulongVal);
out.write_float(val.floatVal);
out.write_double(val.doubleVal);
out.write_boolean(val.boolVal);
out.write_char(val.charVal);
out.write_octet(val.octetVal);
out.write_any(val.anyVal);
out.write_TypeCode(val.tcVal);
out.write_Object(val.objectVal);
out.write_string(val.stringVal);
out.write_longlong(val.longlongVal);
out.write_ulonglong(val.ulonglongVal);
out.write_wchar(val.wcharVal);
out.write_wstring(val.wstringVal);
}
}
|
jmapi/cdmi-core-monitor
|
src/main/java/pw/cdmi/om/protocol/cim/device/CIM_SCSIController.java
|
package pw.cdmi.om.protocol.cim.device;
import javax.cim.UnsignedInteger16;
import javax.cim.UnsignedInteger32;
import javax.cim.UnsignedInteger64;
public class CIM_SCSIController extends CIM_Controller {
private UnsignedInteger16 ProtectionManagement;
private UnsignedInteger32 MaxDataWidth;
private UnsignedInteger64 MaxTransferRate;
private UnsignedInteger32 ControllerTimeouts;
private UnsignedInteger16[] SignalCapabilities;
}
|
puyoai/puyoai
|
src/core/puyo_controller_performance_test.cc
|
<reponame>puyoai/puyoai<filename>src/core/puyo_controller_performance_test.cc
#include "core/puyo_controller.h"
#include <gtest/gtest.h>
#include "base/time_stamp_counter.h"
#include "core/core_field.h"
#include "core/decision.h"
using namespace std;
TEST(PuyoControllerPerformanceTest, empty)
{
TimeStampCounterData tsc;
CoreField f;
for (int i = 0; i < 100; ++i) {
Decision d(6, 3);
ScopedTimeStampCounter stsc(&tsc);
PuyoController::findKeyStroke(f, d);
}
tsc.showStatistics();
}
TEST(PuyoControllerPerformanceTest, unreachable)
{
TimeStampCounterData tsc;
CoreField f(
" O O "
" O O " // 12
" O O "
" O O "
" O O "
" O O " // 8
" O O "
" O O "
" O O "
" O O " // 4
" O O "
" O O "
" O O ");
for (int i = 0; i < 100; ++i) {
Decision d(6, 3);
ScopedTimeStampCounter stsc(&tsc);
PuyoController::findKeyStroke(f, d);
}
tsc.showStatistics();
}
|
today2098/algorithm
|
src/dp/lcs.hpp
|
<reponame>today2098/algorithm
#ifndef ALGORITHM_LCS_HPP
#define ALGORITHM_LCS_HPP
#include <algorithm>
#include <string>
#include <vector>
namespace algorithm {
// 最長共通部分列 (LCS:Longest Common Subsequence). O(|A|*|B|).
template <class Class>
Class lcs(const Class &a, const Class &b) {
int an = a.size(), bn = b.size();
std::vector<std::vector<int> > dp(an + 1, std::vector<int>(bn + 1, 0)); // dp[i][j]:=(a[:i]とb[:j]のLCSの長さ).
for(int i = 1; i <= an; ++i)
for(int j = 1; j <= bn; ++j) {
if(a[i - 1] == b[j - 1])
dp[i][j] = dp[i - 1][j - 1] + 1;
else
dp[i][j] = std::max(dp[i - 1][j], dp[i][j - 1]);
}
Class sub(dp[an][bn]); // sub[]:=(配列a, bのLCS).
int i = an, j = bn, k = dp[an][bn];
while(k > 0) {
if(a[i - 1] == b[j - 1]) {
sub[k - 1] = a[i - 1];
i--, j--, k--;
} else if(dp[i][j] == dp[i - 1][j]) {
i--;
} else {
j--;
}
}
return sub;
}
} // namespace algorithm
#endif // ALGORITHM_LCS_HPP
|
elcarrion06/mcedit2
|
src/mceditlib/blocktypes/rotation.py
|
"""
rotation
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from collections import defaultdict
import numpy
from mceditlib.blocktypes import parseBlockstate, joinBlockstate, PCBlockTypeSet
log = logging.getLogger(__name__)
def blankRotationTable():
table = numpy.indices((32768, 16))
# Roll array so table[x, y] returns [x, y]
table = numpy.rollaxis(numpy.rollaxis(table, 1), 2, 1)
return table
class BlockRotations(object):
mappings = {
'y': {
'north': 'west',
'west': 'south',
'south': 'east',
'east': 'north',
'down_x': 'down_z',
'down_z': 'down_x',
'up_x': 'up_z',
'up_z': 'up_x',
},
'x': {
'up': 'south',
'south': 'down',
'down': 'north',
'north': 'up',
},
'z': {
'up': 'west',
'west': 'down',
'down': 'east',
'east': 'up',
}
}
axisMappings = {
'y': {
'x': 'z',
'z': 'x',
},
'x': {
'y': 'z',
'z': 'y',
},
'z': {
'x': 'y',
'y': 'x',
},
}
railShapes = {
'ascending_north': 'ascending_west',
'ascending_west': 'ascending_south',
'ascending_south': 'ascending_east',
'ascending_east': 'ascending_north',
'east_west': 'north_south',
'north_east': 'north_west',
'north_west': 'south_west',
'south_west': 'south_east',
'south_east': 'north_east',
}
halfFacingMappings = {
'y': {},
'x': {
('top', 'south'): ('bottom', 'south'),
('bottom', 'south'): ('bottom', 'north'),
('bottom', 'north'): ('top', 'north'),
('top', 'north'): ('top', 'south'),
},
'z': {
('top', 'west'): ('bottom', 'west'),
('bottom', 'west'): ('bottom', 'east'),
('bottom', 'east'): ('top', 'east'),
('top', 'east'): ('top', 'west'),
}
}
def __init__(self, blocktypes):
self.blocktypes = blocktypes
self.blocksByInternalName = defaultdict(list)
for block in self.blocktypes:
self.blocksByInternalName[block.internalName].append(block)
self.rotateY90 = self.buildTable(axis='y')
self.rotateX90 = self.buildTable(axis='x')
self.rotateZ90 = self.buildTable(axis='z')
self.rotateX180 = self.buildTable(axis='x', aboutFace=True)
self.rotateZ180 = self.buildTable(axis='z', aboutFace=True)
def buildTable(self, axis, aboutFace=False):
mapping = self.mappings[axis]
axisMapping = self.axisMappings[axis]
halfFacingMap = self.halfFacingMappings[axis]
if aboutFace:
mapping90 = mapping
mapping = {k: mapping90[v] for k, v in mapping90.iteritems()}
if axis in 'xz':
mapping['down_x'] = 'up_x'
mapping['down_z'] = 'up_z'
mapping['up_x'] = 'down_x'
mapping['up_z'] = 'down_z'
halfFacingMap90 = halfFacingMap
halfFacingMap = {k: halfFacingMap90[v] for k, v in halfFacingMap90.iteritems()}
table = blankRotationTable()
rotIncrement = 8 if aboutFace else 4
for block in self.blocktypes:
oldState = state = block.stateDict
if not len(state):
continue
# First pass: facing=north and similar
newState = {}
for k, v in state.items():
n = mapping.get(v)
if n:
newState[k] = n
else:
newState[k] = v
state = newState
newState = dict(state)
# Second pass: north=true and similar
for k, v in mapping.items():
if k in state:
if state[k] == 'true':
newState[k] = 'false'
newState[v] = 'true'
state = newState
if axis == 'y':
# For signs and banners: rotation=10 and similar
if 'rotation' in state:
rotation = (int(state['rotation']) + rotIncrement) % 16
state['rotation'] = unicode(rotation)
# For rails, powered rails, etc: shape=north_east
if 'shape' in state:
shape = state['shape']
newShape = self.railShapes.get(shape)
if newShape:
state['shape'] = newShape
# For logs and such: axis=x and similar
if not aboutFace and 'axis' in state:
axis = state['axis']
axis = axisMapping.get(axis, axis)
state['axis'] = axis
# For slabs: if x or z axis and 180-degree rotation, flip "half"
if axis in 'xz' and aboutFace:
if 'half' in state:
if state['half'] == 'bottom':
state['half'] = 'top'
elif state['half'] == 'top':
state['half'] = 'bottom'
# For stairs, x or z axis: flip "half" upward or flip east/west to roll
if 'half' in oldState and 'facing' in oldState:
newHalfFacing = halfFacingMap.get((oldState['half'], oldState['facing']))
if newHalfFacing:
state['half'], state['facing'] = newHalfFacing
#print("Changed %s \nto %s" % (stateString, newStateString))
newBlock = self.matchingState(block.internalName, state)
if newBlock is block:
pass
# elif newBlock is None:
# newStateString = joinBlockstate(state)
# print("no mapping for %s%s" % (block.internalName, newStateString))
elif newBlock is not None:
# print("Changed %s \nto %s" % (block, newBlock))
table[block.ID, block.meta] = [newBlock.ID, newBlock.meta]
return table
def matchingState(self, internalName, stateDict):
"""
Find the first block with the given name whose state matches all of the keys
and values in stateDict.
Parameters
----------
internalName : unicode
block's internal name
stateDict : dict
the keys and values that the returned state must match
Returns
-------
block: BlockType
"""
for b in self.blocksByInternalName[internalName]:
bsd = b.stateDict
for k, v in stateDict.iteritems():
if bsd.get(k) != v:
break
else:
return b
return None
def xxxtest_yAxisTable():
from . import PCBlockTypeSet
blocktypes = PCBlockTypeSet()
table = yAxisTable(blocktypes)
assert (table != blankRotationTable()).any(), "Table is blank"
changed = False
changedNames = set()
for i in range(32768):
for j in range(16):
e = table[i,j]
if e[0] != i or e[1] != j:
changed = True
name = blocktypes[i, j].internalName
if name not in changedNames:
# print("%s is changed" % name)
changedNames.add(name)
assert changed, "Table is unchanged"
def main():
from timeit import timeit
blocktypes = PCBlockTypeSet()
secs = timeit(lambda: BlockRotations(blocktypes), number=1)
print("Time: %0.3f" % secs)
assert secs < 0.1
if __name__ == '__main__':
main()
|
qizhenghao/LeetCode
|
src/com/bruce/open/self/Sohu_FindMaxNum.java
|
package com.bruce.open.self;
import java.util.Arrays;
/**
* Created by Administrator on 2017/1/4 0004.
*/
public class Sohu_FindMaxNum {
public static void main(String[] args) {
// System.out.println(findMaxNum(7938265, 3));
// System.out.println(findMaxNum(37938265, 3));
// System.out.println(findMaxNum(437893826, 3));
//
// System.out.println(findMaxNum(7938265, 2));
System.out.println(findMaxNum(37938265, 4));
System.out.println(findMaxNum("37938265", 4));
System.out.println(findMaxNum(437893826, 4));
System.out.println(findMaxNum("437893826", 4));
}
public static int findMaxNum(int origin, int n) {
int originTemp = origin, size = sizeOfInt(origin);
int[] resultArr = new int[size - n], originArr = new int[size];
for (int i = size-1; i >= 0; i--) {
originArr[i] = originTemp % 10;
originTemp /= 10;
}
int result = 0, removeCount = 0, resultIndex = 0, i = 0;
for (; removeCount < n; i++) {
int maxIndex = i;
for (int j = i+1; j < i+n-removeCount+1; j++) {
if (originArr[maxIndex] < originArr[j])
maxIndex = j;
}
resultArr[resultIndex] = originArr[maxIndex];
removeCount += maxIndex - i;
i = maxIndex;
resultIndex++;
}
if (resultIndex < size - n) {
for (int k = i; k < size; k++) {
resultArr[resultIndex++] = originArr[k];
}
}
for (int temp : resultArr) {
result = result * 10 + temp;
}
return result;
}
public static String findMaxNum(String origin, int n) {
StringBuilder result = new StringBuilder();
int removeCount = 0, resultIndex = 0, i = 0;
for (; removeCount < n; i++) {
int maxIndex = i;
for (int j = i+1; j < i+n-removeCount+1; j++) {
if (origin.charAt(maxIndex) < origin.charAt(j))
maxIndex = j;
}
result.append(origin.charAt(maxIndex));
removeCount += maxIndex - i;
i = maxIndex;
resultIndex++;
}
result.append(origin.substring(i, origin.length()));
return result.toString();
}
final static int[] sizeTable = { 9, 99, 999, 9999, 99999, 999999, 9999999,
99999999, 999999999, Integer.MAX_VALUE };
static int sizeOfInt(int x) {
for (int i = 0;; i++)
if (x <= sizeTable[i])
return i + 1;
}
}
|
uk-gov-mirror/ministryofjustice.c100-application
|
app/services/c100_app/solicitor_decision_tree.rb
|
<gh_stars>1-10
module C100App
class SolicitorDecisionTree < BaseDecisionTree
def destination
return next_step if next_step
case step_name
when :personal_details
edit(:address_details)
when :address_details
edit(:contact_details)
when :contact_details
edit('/steps/respondent/names')
else
raise InvalidStep, "Invalid step '#{as || step_params}'"
end
end
end
end
|
undp/transparencyportal
|
undp-transparency-portal-fe/src/shared/actions/downLoadCSV.js
|
<gh_stars>1-10
import Api from '../../lib/api';
export const EXPORT_CSV = {
success: 'fetch_success/export_csv',
reset: 'reset/export_csv'
};
export const exportCSVSuccess = (url) => ({
type: EXPORT_CSV.success,
url
});
export const exportCSVReset = () => ({
type: EXPORT_CSV.reset
});
const saveToDisk = (fileURL, fileName) =>{
// for non-IE
if (!window.ActiveXObject) {
var save = document.createElement('a');
save.href = fileURL;
save.download = fileName || 'unknown';
save.style = 'display:none;opacity:0;color:transparent;';
(document.body || document.documentElement).appendChild(save);
if (typeof save.click === 'function') {
save.click();
} else {
save.target = '_blank';
var event = document.createEvent('Event');
event.initEvent('click', true, true);
save.dispatchEvent(event);
}
(window.URL || window.webkitURL).revokeObjectURL(save.href);
}
// for IE
else if (!!window.ActiveXObject && document.execCommand) {
var _window = window.open(fileURL, '_blank');
_window.document.close();
_window.document.execCommand('SaveAs', true, fileName || fileURL)
_window.close();
}
}
export const downLoadProjectListCsv = (year='', keyword='', source='', sectors='', units='', sdgs='', type='', signatureSolution='', target='', markerId='', markerSubType='', l2marker='', key=0) => (dispatch) => {
dispatch(exportCSVReset());
return Api.downLoadCSV(Api.API_DOWNLOAD_CSV_PROJECT_LISTS(year,keyword,source,sectors,units,sdgs,type,signatureSolution,target,markerId,markerSubType,l2marker, key)).then(resp => {
const url = window.URL.createObjectURL(resp);
saveToDisk(url,'project_list.csv');
dispatch(exportCSVSuccess(url));
}).catch((exception) => {
});
};
export const downLoadProjectDetailsCsv = (projectId,item,search,category,fileName='data.csv') => (dispatch) => {
dispatch(exportCSVReset());
return Api.downLoadCSV(Api.API_DOWNLOAD_CSV_PROJECT_DETAILS(projectId,item,search,category)).then(resp => {
const url = window.URL.createObjectURL(resp)
saveToDisk(url,fileName)
dispatch(exportCSVSuccess(url))
}).catch((exception) => {
});
};
export const downLoadDonorsDetailsCsv = (year,fundType,fundStream,donorType) => (dispatch) => {
dispatch(exportCSVReset())
return Api.downLoadCSV(Api.API_DOWNLOAD_CSV_DONORS(year,fundType,fundStream,donorType)).then(resp => {
const url = window.URL.createObjectURL(resp)
saveToDisk(url,'donors.csv')
dispatch(exportCSVSuccess(url))
}).catch((exception) => {
});
};
|
gburd/wave
|
test/org/waveprotocol/box/server/robots/agent/RobotAgentUtilTest.java
|
/**
* Copyright 2011 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.waveprotocol.box.server.robots.agent;
import junit.framework.TestCase;
/**
* Unit tests for the {@link RobotAgentUtil}.
*
* @author <EMAIL> (<NAME>)
*/
public class RobotAgentUtilTest extends TestCase {
public void testLastEnteredLineOf() throws Exception {
String test1 = "Hello World!\n";
String test2 = "\n\nHello World!\n";
String test3 = "\n\nHello World!\nHello World without line end.";
String test4 = "\n\nHello World!\nHello World with line end.\n";
String test5 = "\n";
assertEquals("Hello World!", RobotAgentUtil.lastEnteredLineOf(test1));
assertEquals("Hello World!", RobotAgentUtil.lastEnteredLineOf(test2));
assertNull(RobotAgentUtil.lastEnteredLineOf(test3));
assertEquals("Hello World with line end.", RobotAgentUtil.lastEnteredLineOf(test4));
assertEquals("", RobotAgentUtil.lastEnteredLineOf(test5));
}
}
|
fysabelah/URI
|
BEGINNER/1096.cpp
|
#include<iostream>
using namespace std;
int main(){
int i = 1, j = 7;
do{
cout << "I=" << i << " J=" << j << endl;
j--;
if(j == 4){
i+=2;
j = 7;
}
}while(i!=11);
return(0);
}
|
jasonwebster/tumblargh
|
lib/tumblargh/renderer/base.rb
|
<gh_stars>10-100
module Tumblargh
module Renderer
class Base
class << self
# Define a simple tag on the block.
# Name being tag name, and optionally the attibute/method to call
# on the context. If the second argument is left off, it'll just use the tag name.
def contextual_tag(name, attribute=nil)
class_eval do
define_method name do
context.send(attribute || name)
end
end
end
end
attr_reader :node, :options
attr_accessor :context
alias_method :config, :options # Backwards compatibility with old Document rendere
def initialize(node, context, options = {})
@node = node
@context = context
@options = options.with_indifferent_access
end
def context_post
real_post = context
while not real_post.is_a?(::Tumblargh::Resource::Post)
real_post = real_post.context
end
real_post
end
def escape_html(str)
CGI.escapeHTML(str)
end
def escape_url(url)
CGI.escape(url)
end
def strip_html(str)
str.gsub(/<\/?[^>]*>/, '')
end
def render
end
alias_method :to_s, :render
def method_missing(method, *arguments)
raise "Can't find anything to do with '#{method}'" if context.nil?
context.send(method, *arguments)
end
end
end
end
|
julienchastang/thredds
|
cdm/src/main/java/ucar/nc2/util/net/EasySSLProtocolSocketFactory.java
|
/*
* Copyright 1998-2009 University Corporation for Atmospheric Research/Unidata
*
* Portions of this software were developed by the Unidata Program at the
* University Corporation for Atmospheric Research.
*
* Access and use of this software shall impose the following obligations
* and understandings on the user. The user is granted the right, without
* any fee or cost, to use, copy, modify, alter, enhance and distribute
* this software, and any derivative works thereof, and its supporting
* documentation for any purpose whatsoever, provided that this entire
* notice appears in all copies of the software, derivative works and
* supporting documentation. Further, UCAR requests that the user credit
* UCAR/Unidata in any publications that result from the use of this
* software or in any product that includes this software. The names UCAR
* and/or Unidata, however, may not be used in any advertising or publicity
* to endorse or promote any products or commercial entity unless specific
* written permission is obtained from UCAR/Unidata. The user also
* understands that UCAR/Unidata is not obligated to provide the user with
* any support, consulting, training or assistance of any kind with regard
* to the use, operation and performance of this software nor to provide
* the user with any updates, revisions, new versions or "bug fixes."
*
* THIS SOFTWARE IS PROVIDED BY UCAR/UNIDATA "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL UCAR/UNIDATA BE LIABLE FOR ANY SPECIAL,
* INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE ACCESS, USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* ====================================================================
*
* Copyright 2002-2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package ucar.nc2.util.net;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetAddress;
import java.net.Socket;
import java.net.UnknownHostException;
import java.security.*;
import org.apache.commons.httpclient.ConnectTimeoutException;
import org.apache.commons.httpclient.Credentials;
import org.apache.commons.httpclient.auth.CredentialsNotAvailableException;
import org.apache.commons.httpclient.params.HttpConnectionParams;
import org.apache.commons.httpclient.protocol.ControllerThreadSocketFactory;
import org.apache.commons.httpclient.protocol.SecureProtocolSocketFactory;
import org.apache.commons.httpclient.protocol.ProtocolSocketFactory;
import org.apache.commons.httpclient.auth.CredentialsProvider;
import javax.net.ssl.*;
/**
* <p/>
* EasySSLProtocolSocketFactory can be used to creats SSL {@link Socket}s
* that accept self-signed certificates.
* </p>
* <p/>
* This socket factory SHOULD NOT be used for productive systems
* due to security reasons, unless it is a concious decision and
* you are perfectly aware of security implications of accepting
* self-signed certificates
* </p>
* <p/>
* <p/>
* Example of using custom protocol socket factory for a specific host:
* <pre>
* Protocol easyhttps = new Protocol("https", new EasySSLProtocolSocketFactory(), 443);
* <p/>
* HttpClient client = new HttpClient();
* client.getHostConfiguration().setHost("localhost", 443, easyhttps);
* // use relative url only
* GetMethod httpget = new GetMethod("/");
* client.executeMethod(httpget);
* </pre>
* </p>
* <p/>
* Example of using custom protocol socket factory per default instead of the standard one:
* <pre>
* Protocol easyhttps = new Protocol("https", new EasySSLProtocolSocketFactory(), 443);
* Protocol.registerProtocol("https", easyhttps);
* <p/>
* HttpClient client = new HttpClient();
* GetMethod httpget = new GetMethod("https://localhost/");
* client.executeMethod(httpget);
* </pre>
* </p>
*
* @author <a href="mailto:oleg -at- ural.ru"><NAME></a>
* <p/>
* <p/>
* DISCLAIMER: HttpClient developers DO NOT actively support this component.
* The component is provided as a reference material, which may be inappropriate
* for use without additional customization.
* </p>
*/
public class EasySSLProtocolSocketFactory implements ProtocolSocketFactory {
//////////////////////////////////////////////////
private SSLContext sslcontext = null;
/**
* Constructor for EasySSLProtocolSocketFactory.
*/
public EasySSLProtocolSocketFactory() {
super();
}
/**
* @see SecureProtocolSocketFactory#createSocket(java.net.Socket, java.lang.String, int, boolean)
*/
public Socket createSocket(
Socket socket,
String host,
int port,
boolean autoClose)
throws IOException, UnknownHostException {
return getSSLContext(null, host, port).getSocketFactory().createSocket(
socket,
host,
port,
autoClose);
}
/**
* @see SecureProtocolSocketFactory#createSocket(java.lang.String, int)
*/
public Socket createSocket(String host, int port)
throws IOException, UnknownHostException {
return getSSLContext(null, host, port).getSocketFactory().createSocket(host, port);
}
/**
* @see SecureProtocolSocketFactory#createSocket(java.lang.String, int, java.net.InetAddress, int)
*/
public Socket createSocket(
String host,
int port,
InetAddress clientHost,
int clientPort)
throws IOException, UnknownHostException {
return createSocket(
host,
port,
clientHost,
clientPort,
new HttpConnectionParams());
}
/**
* Attempts to get a new socket connection to the given host within the given time limit.
* <p/>
* To circumvent the limitations of older JREs that do not support connect timeout a
* controller thread is executed. The controller thread attempts to create a new socket
* within the given limit of time. If socket constructor does not return until the
* timeout expires, the controller terminates and throws an {@link ConnectTimeoutException}
* </p>
*
* @param host the host name/IP
* @param port the port on the host
* @param localAddress the local host name/IP to bind the socket to
* @param localPort the port on the local machine
* @param params {@link HttpConnectionParams Http connection parameters}
* @return Socket a new socket
* @throws IOException if an I/O error occurs while creating the socket
* @throws UnknownHostException if the IP address of the host cannot be
* determined
*/
public Socket createSocket(
final String host,
final int port,
final InetAddress localAddress,
final int localPort,
final HttpConnectionParams params)
throws IOException, UnknownHostException, ConnectTimeoutException {
if (params == null) {
throw new IllegalArgumentException("Parameters may not be null");
}
int timeout = params.getConnectionTimeout();
if (true) {
return getSSLContext(params, host, port).getSocketFactory().createSocket(host, port);
} else {
if (timeout == 0) {
return getSSLContext(params, host, port).getSocketFactory().createSocket(host, port);
} else {
// To be eventually deprecated when migrated to Java 1.4 or above
return ControllerThreadSocketFactory.createSocket(this, host, port, localAddress, localPort, timeout);
}
}
}
private SSLContext getSSLContext(HttpConnectionParams params, String host, int port) throws HTTPException {
if (this.sslcontext == null) {
this.sslcontext = createSSLContext(params, host, port);
}
return this.sslcontext;
}
private SSLContext createSSLContext(HttpConnectionParams params, String host, int port) throws HTTPException {
SSLContext sslcontext = null;
KeyManager[] keymanagers = null;
KeyStore keystore = null;
KeyStore truststore = null;
TrustManager[] trustmanagers = null;
String keypassword = null;
String keypath = null;
String trustpassword = null;
String trustpath = null;
try {
// Get the HTTPAuthProvider
HTTPAuthProvider provider;
provider = (HTTPAuthProvider) params.getParameter(CredentialsProvider.PROVIDER);
if (provider == null) return stdauthenticate();
// Abuse the getCredentials() api
Credentials creds = null;
try {
creds = provider.getCredentials(HTTPSSLScheme.Default, null, 0, false);
if (creds == null) return stdauthenticate();
} catch (CredentialsNotAvailableException e) {
return stdauthenticate();
}
HTTPSSLProvider sslprovider = (creds == null ? null : (HTTPSSLProvider) creds);
if (sslprovider == null)
return stdauthenticate();
keypath = (String) sslprovider.getKeystore();
keypassword = (String) sslprovider.getKeypassword();
trustpath = (String) sslprovider.getTruststore();
trustpassword = (String) sslprovider.getTrustpassword();
keystore = buildstore(keypath, keypassword, "key");
if (keystore != null) {
KeyManagerFactory kmfactory = KeyManagerFactory.getInstance("SunX509");
kmfactory.init(keystore, keypassword.toCharArray());
keymanagers = kmfactory.getKeyManagers();
}
truststore = buildstore(trustpath, trustpassword, "trust");
if (truststore != null) {
//TrustManagerFactory trfactory = TrustManagerFactory.getInstance("SunX509");
//trfactory.init(truststore, trustpassword.toCharArray());
//trustmanagers = trfactory.getTrustManagers();
trustmanagers = new TrustManager[]{new EasyX509TrustManager(truststore)};
} else {
trustmanagers = new TrustManager[]{new EasyX509TrustManager(null)};
}
sslcontext = SSLContext.getInstance("SSL");
sslcontext.init(keymanagers, trustmanagers, null);
return sslcontext;
} catch (KeyManagementException e) {
throw new HTTPException("Key Management exception: " + e.getMessage());
} catch (NoSuchAlgorithmException e) {
throw new HTTPException("Unsupported algorithm exception: " + e.getMessage());
} catch (KeyStoreException e) {
throw new HTTPException("Keystore exception: " + e.getMessage());
} catch (GeneralSecurityException e) {
throw new HTTPException("Key management exception: " + e.getMessage());
} catch (IOException e) {
throw new HTTPException("I/O error reading keystore/truststore file: " + e.getMessage());
}
}
// Do no authentication
static private SSLContext
stdauthenticate()
throws KeyManagementException,NoSuchAlgorithmException, KeyStoreException
{
TrustManager[] trustmanagers = new TrustManager[]{new EasyX509TrustManager(null)};
SSLContext sslcontext = SSLContext.getInstance("SSL");
sslcontext.init(null, trustmanagers, null);
return sslcontext;
}
static KeyStore
buildstore(String path, String password, String prefix) throws HTTPException {
KeyStore store = null;
try {
if (path != null && password != null) {
File storefile = new File(path);
if (!storefile.canRead())
throw new HTTPException("Cannot read specified " + prefix + "store:" + storefile.getAbsolutePath());
store = KeyStore.getInstance("JKS");
InputStream is = null;
try {
is = new FileInputStream(storefile);
store.load(is, password.toCharArray());
} finally {
if (is != null) is.close();
}
}
} catch (Exception e) {
throw new HTTPException(e);
}
return store;
}
}
|
sahilpaudel/AfterGlow
|
frontend/app/helpers/get-chart-icon.js
|
<filename>frontend/app/helpers/get-chart-icon.js
import Ember from 'ember';
import ResultViewMixin from 'frontend/mixins/result-view-mixin';
export function getChartIcon(params /*, hash*/ ) {
if (params && params[0]) {
return ResultViewMixin.mixins[0].properties.resultViewIcons[params[0].toLowerCase()];
}
return ResultViewMixin.mixins[0].properties.resultViewIcons['Line'];
}
export default Ember.Helper.helper(getChartIcon);
|
WorldThirteen/cell-simulator
|
src/constants/world.js
|
export const PARTS_NUM = 15;
export const UNIT_RAD = 30;
export const FOOD_RAD = 8;
export const LINE_LENGTH = 25;
|
maximaximal/BomberPi
|
src/Client/BombPlaceSystem.cpp
|
<reponame>maximaximal/BomberPi
#include <Client/BombPlaceSystem.hpp>
#include <easylogging++.h>
#include <Client/PlayerInputComponent.hpp>
#include <Client/BombLayerComponent.hpp>
namespace Client
{
BombPlaceSystem::BombPlaceSystem(EntityFactory *entityFactory)
{
m_entityFactory = entityFactory;
}
BombPlaceSystem::~BombPlaceSystem()
{
}
void BombPlaceSystem::update()
{
if(m_noBombPlacing)
{
return;
}
for(auto &entity : getEntities())
{
auto &input = entity.getComponent<PlayerInputComponent>();
auto &bombLayer = entity.getComponent<BombLayerComponent>();
if(input.isActive(PlayerInputEnum::ACTION))
{
if(bombLayer.canPlace())
{
m_entityFactory->createBomb(bombLayer.placePos, entity, bombLayer.getBombTiles(), bombLayer.getBombTurns());
if(bombLayer.bombsRemaining == 0)
{
bombLayer.powerupQueue.removeOneOfPredefined(Powerup::ADDITIONAL_BOMB);
}
else
{
bombLayer.bombsRemaining -= 1;
}
bombLayer.lastPlacedBomb = -1;
bombLayer.positionOkay = false;
}
}
}
}
void BombPlaceSystem::lockBombPlacing(bool state)
{
m_noBombPlacing = state;
}
}
|
saintaxl/work
|
web/src/main/java/com/spring/demo/UserLogonController.java
|
<reponame>saintaxl/work<gh_stars>0
/**
*
*/
package com.spring.demo;
import java.util.Date;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.servlet.ModelAndView;
/**
* @author Shawn
*
*/
@Controller
public class UserLogonController {
public Logger logger = LoggerFactory.getLogger(UserLogonController.class);
@RequestMapping("/getUser")
public ModelAndView getUser(){
logger.error("GetUserError {}",new Date());
logger.info("GetUserInfo {}","info");
logger.info("throw exception errorcode:[{}] | errormessage:{}","ER001032","no password");
ModelAndView mv = new ModelAndView();
return mv;
}
}
|
oldbk/bookmaker
|
common/extensions/yii-node-socket/lib/js/server/events/client/public.data.js
|
var public_data = {
componentManager : null,
name : 'public_data',
init : function () {},
handler : function (key, fn) {
fn(
public_data.componentManager.get('publicData').get(key)
);
}
};
module.exports = public_data;
|
atool/mybatis-fluent
|
fluent-mybatis-test/src/test/java/cn/org/atool/fluent/mybatis/test/issue/Issue_I3QVPB.java
|
<reponame>atool/mybatis-fluent<gh_stars>0
package cn.org.atool.fluent.mybatis.test.issue;
import cn.org.atool.fluent.mybatis.base.crud.IQuery;
import cn.org.atool.fluent.mybatis.base.crud.JoinBuilder;
import cn.org.atool.fluent.mybatis.base.free.FreeQuery;
import cn.org.atool.fluent.mybatis.base.model.SqlOp;
import cn.org.atool.fluent.mybatis.generator.shared3.mapper.MemberMapper;
import cn.org.atool.fluent.mybatis.test.BaseTest;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.test4j.hamcrest.matcher.string.StringMode;
public class Issue_I3QVPB extends BaseTest {
@Autowired
MemberMapper mapper;
/**
* <pre>
* SELECT * FROM
* (SELECT crs.* FROM
* (SELECT repository_id, COALESCE(total_sec_issue_count, 0) as total_sec_issue_count
* FROM code_repository_statistics
* WHERE organization_id = ? AND gmt_create = ? AND organization_id = ?) crs
* JOIN
* (SELECT identifier
* FROM code_repository
* WHERE organization_id = ? AND project_report_value = ?) cr
* ON crs.repository_id = cr.identifier) ccsr
* ORDER BY total_sec_issue_count DESC LIMIT ? OFFSET ?
* </pre>
*/
@Test
public void issue() {
FreeQuery query11 = new FreeQuery("code_repository_statistics")
.select("repository_id", "COALESCE(total_sec_issue_count, 0) as total_sec_issue_count")
.where.apply("repository_id", SqlOp.EQ, "1")
.and.apply("gmt_create", SqlOp.EQ, "2021-05-11 17:12:33.322")
.and.apply("organization_id", SqlOp.EQ, "1")
.end();
FreeQuery query1 = new FreeQuery(query11, "crs")
.select("csr.*");
FreeQuery query21 = new FreeQuery("code_repository")
.select("identifier")
.where.apply("organization_id", SqlOp.EQ, "1")
.and.apply("project_report_value", SqlOp.EQ, "2")
.end();
FreeQuery query2 = new FreeQuery(query21, "cr");
IQuery join = JoinBuilder.from(query1)
.join(query2)
.onEq("repository_id", "identifier").endJoin()
.build();
try {
mapper.listEntity(new FreeQuery(join, "ccsr")
.select("*")
.orderBy.desc("total_sec_issue_count").end()
.limit(10));
} catch (Exception ignored) {
}
db.sqlList().wantFirstSql().eq("" +
"SELECT * FROM " +
"(SELECT csr.* FROM " +
"(SELECT `repository_id`, COALESCE(total_sec_issue_count, 0) as total_sec_issue_count " +
"FROM `code_repository_statistics` " +
"WHERE `repository_id` = ? " +
"AND `gmt_create` = ? " +
"AND `organization_id` = ?) crs " +
"JOIN " +
"(SELECT `identifier` FROM `code_repository` " +
"WHERE `organization_id` = ? AND `project_report_value` = ?) cr " +
"ON crs.`repository_id` = cr.`identifier`) ccsr " +
"ORDER BY ccsr.`total_sec_issue_count` DESC " +
"LIMIT ?, ?");
db.sqlList().wantFirstPara().eq(new Object[]{"1", "2021-05-11 17:12:33.322", "1", "1", "2", 0, 10});
}
@Test
public void issue_1() {
FreeQuery query1 = new FreeQuery("code_repository_statistics")
.select("repository_id", "COALESCE(total_sec_issue_count, 0) as total_sec_issue_count")
.where.apply("repository_id", SqlOp.EQ, "1")
.and.apply("gmt_create", SqlOp.EQ, "2021-05-11 17:12:33.322")
.and.apply("organization_id", SqlOp.EQ, "1")
.end();
FreeQuery query2 = new FreeQuery("code_repository")
.select("identifier")
.where.apply("organization_id", SqlOp.EQ, "1")
.and.apply("project_report_value", SqlOp.EQ, "2")
.end();
IQuery join = JoinBuilder.from(query1, "crs")
.join(query2, "cr")
.onEq("repository_id", "identifier").endJoin()
.select("csr.*")
.build();
try {
mapper.listEntity(new FreeQuery(join, "ccsr")
.select("*")
.orderBy.desc("total_sec_issue_count").end()
.limit(10));
} catch (Exception ignored) {
}
db.sqlList().wantFirstSql().eq("" +
"SELECT * FROM " +
"(SELECT csr.* FROM " +
"(SELECT `repository_id`, COALESCE(total_sec_issue_count, 0) as total_sec_issue_count " +
"FROM `code_repository_statistics` " +
"WHERE `repository_id` = ? " +
"AND `gmt_create` = ? " +
"AND `organization_id` = ?) crs " +
"JOIN " +
"(SELECT `identifier` FROM `code_repository` " +
"WHERE `organization_id` = ? AND `project_report_value` = ?) cr " +
"ON crs.`repository_id` = cr.`identifier`) ccsr " +
"ORDER BY ccsr.`total_sec_issue_count` DESC " +
"LIMIT ?, ?", StringMode.SameAsSpace);
db.sqlList().wantFirstPara().eqList("1", "2021-05-11 17:12:33.322", "1", "1", "2", 0, 10);
}
}
|
githubwua/cdap
|
cdap-ui/app/cdap/components/RulesEngineHome/ImportRulebookWizard/ImportRulebookStore.js
|
/*
* Copyright © 2017 <NAME>, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
import { createStore, combineReducers } from 'redux';
import { defaultAction } from 'services/helpers';
const DEFAULTUPLOADSTEP = {
file: {
name: '',
contents: '',
},
__complete: false,
};
const DEFAULTSTATE = {
upload: DEFAULTUPLOADSTEP,
};
const IMPORTRULEBOOKACTIONS = {
UPLOADFILE: 'UPLOADFILE',
};
const upload = (state = DEFAULTUPLOADSTEP, action = defaultAction) => {
switch (action.type) {
case IMPORTRULEBOOKACTIONS.UPLOADFILE:
return Object.assign({}, state, {
file: action.payload.file,
__complete: true,
});
default:
return state;
}
};
const ImportRulebookStore = createStore(
combineReducers({
upload,
}),
DEFAULTSTATE,
window.__REDUX_DEVTOOLS_EXTENSION__ && window.__REDUX_DEVTOOLS_EXTENSION__()
);
export { IMPORTRULEBOOKACTIONS };
export default ImportRulebookStore;
|
fossabot/me-1
|
packages/service/src/lib/authInfo.js
|
import {compositeKeySeparator} from "@randy.tarampi/js";
import {Record} from "immutable";
export const AUTH_INFO_TYPE = {
oAuth: "OAUTH_TOKEN",
oAuth2: "OAUTH2_TOKEN"
};
export class AuthInfo extends Record({
token: null,
tokenSecret: null,
id: null,
source: null,
type: AUTH_INFO_TYPE.oAuth2,
raw: null
}) {
get uid() {
return `${this.source}${compositeKeySeparator}${this.id}`;
}
}
export default AuthInfo;
|
micmc422/woo-next-photo-paris
|
src/components/checkout/CartItemsList.js
|
import { uniqueId } from "lodash";
import { useTranslation } from "next-i18next";
import Image from "next/image";
import Link from "next/link";
import { getUpdatedItems, updateCart } from "../../functions";
import { Cross } from "../icons";
const CartItemsList = ({ cart }) => {
const { t } = useTranslation("shop");
/*
* Handle remove product click.
*
* @param {Object} event event
* @param {Integer} Product Id.
*
* @return {void}
*/
const handleRemoveProductClick = (event, cartKey, products) => {
event.stopPropagation();
if (products.length) {
// By passing the newQty to 0 in updateCart Mutation, it will remove the item.
const newQty = 0;
const updatedItems = getUpdatedItems(products, newQty, cartKey);
updateCart({
variables: {
input: {
clientMutationId: v4(),
items: updatedItems,
},
},
});
}
};
// console.log(cart);
// console.log(!cart?.products.length);
if (!cart?.products.length) {
return <div></div>;
}
return (
<div className="flex flex-col md:hidden">
{cart.products.map((item) => (
<div
className={`relative text-xs shadow-lg rounded p-1 bg-gray-100`}
key={uniqueId(item.productId)}
>
<div className={`flex w-full mb-2 h-48 relative`}>
<Image
layout="fill"
objectFit="cover"
src={item.image.sourceUrl}
alt={item.image.title}
/>
<div
className="absolute top-0 right-0 flex transform scale-90"
onClick={(event) =>
handleRemoveProductClick(event, item.cartKey, cart.products)
}
>
<Cross />
</div>
</div>
<div className={`flex w-full mb-1`}>
<div className="flex-shrink-0 w-1/6 p-1 text-gray-400 bg-gray-200 rounded">
{t("photo")}
</div>
<div className="relative flex w-5/6 p-1 font-bold">
<span>{item.name}</span>
</div>
</div>
<div className={`items-center flex justify-between`}>
<div className="p-1 text-gray-400 bg-gray-200 rounded">
{t("quantite")}
</div>
<div className="p-1 font-bold"> {item.qty} </div>
<div className="p-1 text-gray-400 bg-gray-200 rounded">
{t("prix")}
</div>
<div className="p-1 font-bold">
{"string" !== typeof item.price
? item.price.toFixed(2)
: item.price}
</div>
<div className="p-1 text-gray-400 bg-gray-200 rounded">
{t("soustotal")}
</div>
<div className="p-1 font-bold">
{"string" !== typeof item.totalPrice
? item.totalPrice.toFixed(2)
: item.totalPrice}
</div>
</div>
</div>
))}
<span className="block pt-4 text-right">
<span className="text-gray-500">{t("total")} : </span>
<span className="font-extrabold">
{"string" !== typeof cart.totalProductsPrice
? cart.totalProductsPrice.toFixed(2)
: cart.totalProductsPrice}
</span>
</span>
<Link href="/commande">
<button
className="w-auto px-5 py-3 mt-5 text-white rounded-sm bg-brand-500 xl:w-full"
aria-label={t("finalisercommande")}
>
<span className="woo-next-cart-checkout-txt">
{t("finalisercommande")}
</span>
<i className="fas fa-long-arrow-alt-right" />
</button>
</Link>
</div>
);
};
export default CartItemsList;
|
drigols/Studies
|
modules/python-codes/modules/tips-and-tricks/debugger/src/example-02.py
|
<filename>modules/python-codes/modules/tips-and-tricks/debugger/src/example-02.py<gh_stars>0
count = 1
while count <= 10:
store = count
count += 1
breakpoint()
breakpoint()
name = "Rodrigo"
age = 32
breakpoint()
hello = "Hello World!"
breakpoint()
# Test
assert hello == "Hello World!"
|
Darkartt/SoftUni
|
SoftUni-Basic/pre_exam/test_3.py
|
<gh_stars>0
amount_of_dancers = int(input())
amount_of_points = float(input())
season = input()
place = input()
money_left = 0
charity = 0
dancers_point_won = amount_of_dancers * amount_of_points
dancers_point_won_aboard = dancers_point_won + (dancers_point_won * 0.50)
money_per_dancer = 0
if place == "Bulgaria":
if season == "summer":
dancers_point_won = dancers_point_won - (dancers_point_won * 0.05)
charity = dancers_point_won * 0.75
money_left = dancers_point_won - charity
elif season == "winter":
dancers_point_won = dancers_point_won - (dancers_point_won * 0.08)
charity = dancers_point_won * 0.75
money_left = dancers_point_won - charity
if place == "Abroad":
if season == "summer":
dancers_point_won_aboard = dancers_point_won_aboard - (dancers_point_won_aboard * 0.10)
charity = dancers_point_won_aboard * 0.75
money_left = dancers_point_won_aboard - charity
elif season == "winter":
dancers_point_won_aboard = dancers_point_won_aboard - (dancers_point_won_aboard * 0.15)
charity = dancers_point_won_aboard * 0.75
money_left = dancers_point_won_aboard - charity
money_per_dancer = money_left / amount_of_dancers
print(f"Charity - {charity:.2f} ")
print(f"Money per dancer - {money_per_dancer:.2f}")
|
nattatorn-dev/react-filters
|
test/Count.test.js
|
<filename>test/Count.test.js<gh_stars>10-100
import React from 'react';
const { describe, it, __base } = global;
import { shallow } from 'enzyme';
import sinon from 'sinon';
import { expect } from 'chai';
const { Count } = require(`${__base}components`);
describe('Count Component', () => {
it('should call onChange function with correct args on click', () => {
const onChange = sinon.spy();
const wrapper = shallow(
<Count
name='count'
onChange={onChange}
value={5}
/>
);
const args1 = {
name: 'count',
value: 6,
action: 'increased'
};
const args2 = {
name: 'count',
value: 4,
action: 'decreased'
};
wrapper.find('.cb-upper').simulate('click');
expect(onChange.calledWith(args1)).to.equal(true);
wrapper.find('.cb-lower').simulate('click');
expect(onChange.calledWith(args2)).to.equal(true);
expect(onChange.calledTwice).to.equal(true);
});
it('should be inactive when disabled', () => {
const onChange = sinon.spy();
const wrapper = shallow(
<Count
name='count'
onChange={onChange}
value={5}
disabled
/>
);
wrapper.find('.cb-lower').simulate('click');
expect(onChange.calledOnce).to.equal(false);
});
it('should only be active in the provided range', () => {
const onChange = sinon.spy();
const wrapper = shallow(
<Count
name='count'
onChange={onChange}
value={5}
max={5}
min={0}
/>
);
wrapper.find('.cb-upper').simulate('click');
expect(onChange.calledOnce).to.equal(false);
wrapper.find('.cb-lower').simulate('click');
expect(onChange.calledOnce).to.equal(true);
});
it('should change with the provided step', () => {
const onChange = sinon.spy();
const wrapper = shallow(
<Count
name='count'
onChange={onChange}
value={5}
step={2}
/>
);
wrapper.find('.cb-upper').simulate('click');
const args1 = {
name: 'count',
value: 7,
action: 'increased'
};
expect(onChange.calledWith(args1)).to.equal(true);
wrapper.find('.cb-lower').simulate('click');
const args2 = {
name: 'count',
value: 3,
action: 'decreased'
};
expect(onChange.calledWith(args2)).to.equal(true);
});
});
|
mdoshi96/beacls
|
docs/0.0.1/search/variables_e.js
|
var searchData=
[
['quiet',['quiet',['../dd/dfa/classhelperOC_1_1HJIPDE__extraArgs.html#abf0a76a1c7e38dbb797f7859fff9a791',1,'helperOC::HJIPDE_extraArgs']]]
];
|
WJLGH/jnoa
|
src/main/java/com/thinkgem/jeesite/modules/oa/web/OaTaskRecordController.java
|
/**
* Copyright © 2012-2016 <a href="https://github.com/thinkgem/jeesite">JeeSite</a> All rights reserved.
*/
package com.thinkgem.jeesite.modules.oa.web;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.thinkgem.jeesite.modules.infc.entity.DataStatusList;
import com.thinkgem.jeesite.modules.oa.entity.OaTask;
import com.thinkgem.jeesite.modules.oa.service.OaTaskService;
import com.thinkgem.jeesite.modules.sys.entity.User;
import com.thinkgem.jeesite.modules.sys.utils.UserUtils;
import org.apache.shiro.authz.annotation.RequiresPermissions;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.servlet.mvc.support.RedirectAttributes;
import com.thinkgem.jeesite.common.config.Global;
import com.thinkgem.jeesite.common.persistence.Page;
import com.thinkgem.jeesite.common.web.BaseController;
import com.thinkgem.jeesite.common.utils.StringUtils;
import com.thinkgem.jeesite.modules.oa.entity.OaTaskRecord;
import com.thinkgem.jeesite.modules.oa.service.OaTaskRecordService;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* 人员任务回复详情Controller
* @author ctt
* @version 2019-01-10
*/
@Controller
@RequestMapping(value = "${adminPath}/oa/oaTaskRecord")
public class OaTaskRecordController extends BaseController {
@Autowired
private OaTaskRecordService oaTaskRecordService;
@Autowired
private OaTaskService oaTaskService;
@ModelAttribute
public OaTaskRecord get(@RequestParam(required=false) String id) {
OaTaskRecord entity = null;
if (StringUtils.isNotBlank(id)){
entity = oaTaskRecordService.get(id);
}
if (entity == null){
entity = new OaTaskRecord();
}
return entity;
}
@RequiresPermissions("oa:oaTaskRecord:view")
@RequestMapping(value = {"list", ""})
public String list(OaTaskRecord oaTaskRecord, HttpServletRequest request, HttpServletResponse response, Model model) {
Page<OaTaskRecord> page = oaTaskRecordService.findPage(new Page<OaTaskRecord>(request, response), oaTaskRecord);
model.addAttribute("page", page);
return "modules/oa/oaTaskRecordList";
}
@RequiresPermissions("oa:oaTaskRecord:view")
@RequestMapping(value = "form")
public String form(OaTaskRecord oaTaskRecord, Model model) {
model.addAttribute("oaTaskRecord", oaTaskRecord);
return "modules/oa/oaTaskRecordForm";
}
@RequiresPermissions("oa:oaTaskRecord:edit")
@RequestMapping(value = "save")
public String save(OaTaskRecord oaTaskRecord, Model model, RedirectAttributes redirectAttributes) {
if (!beanValidator(model, oaTaskRecord)){
return form(oaTaskRecord, model);
}
oaTaskRecordService.save(oaTaskRecord);
addMessage(redirectAttributes, "保存回复信息成功");
return "redirect:"+Global.getAdminPath()+"/oa/oaTaskRecord/?repage";
}
@RequiresPermissions("oa:oaTaskRecord:edit")
@RequestMapping(value = "delete")
public String delete(OaTaskRecord oaTaskRecord, RedirectAttributes redirectAttributes) {
oaTaskRecordService.delete(oaTaskRecord);
addMessage(redirectAttributes, "删除回复信息成功");
return "redirect:"+Global.getAdminPath()+"/oa/oaTaskRecord/?repage";
}
}
|
ewcchan/logging-log4j-audit
|
log4j-audit/log4j-audit-api/src/main/java/org/apache/logging/log4j/audit/generator/MethodDefinition.java
|
<gh_stars>0
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.audit.generator;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.logging.log4j.audit.util.NamingUtils;
import org.apache.logging.log4j.audit.util.StringUtil;
import static org.apache.logging.log4j.audit.generator.Constants.PUBLIC;
public class MethodDefinition implements Comparable<MethodDefinition> {
private String visibility = PUBLIC;
private String name;
private String returnType;
private String annotation = null;
private boolean isStatic = false;
private boolean isFinal = false;
private boolean isAbstract = false;
private boolean isInterface = false;
private List<Parameter> parameters = new ArrayList<>();
private List<String> exceptions = new ArrayList<>();
private String content;
private String javadocComments = null;
public class StandardSingleton extends MethodDefinition {
/**
* this must be used with the local variable
*/
public StandardSingleton(String returnType, String name, List<String> parameters) {
super(returnType, name);
setStatic(true);
String prefix = "get";
setName(prefix + name.substring(0, 1).toUpperCase()
+ name.substring(1));
StringBuilder sb = new StringBuilder();
sb.append("\tif (").append(name).append(" == null) {\n");
sb.append("\t\t").append(name).append(" = new ").append(returnType)
.append("(");
boolean first = true;
if (parameters != null) {
for (Iterator<String> iter = parameters.iterator(); iter.hasNext(); ) {
String element = iter.next();
if (!first) {
sb.append(", ");
}
sb.append(element);
first = false;
}
}
sb.append(");\n\t}\n\treturn ").append(name).append(";");
setContent(sb.toString());
}
}
private static MethodDefinition definition = new MethodDefinition("dumb",
"dumb");
public static MethodDefinition getStandardSingleton(String returnType,
String name, List<String> parameters) {
return definition.new StandardSingleton(returnType, name, parameters);
}
public MethodDefinition(String returnType, String name, String content) {
this.returnType = returnType;
this.name = name;
if (content != null) {
this.content = StringUtil.filterContent(content, name, returnType);
} else {
createStubContent();
}
}
public MethodDefinition(String returnType, String name) {
this(returnType, name, null);
}
private void createStubContent() {
String content = "// default stub - please modify\n";
setContent(content);
if (!returnType.equals("void")) {
if (returnType.equals("int")) {
setContent(content + "return 0;");
} else if (returnType.equals("boolean")) {
setContent(content + "return false;");
} else if (returnType.equals("double")) {
setContent(content + "return 0.0;");
} else if (returnType.equals("long")) {
setContent(content + "return 0;");
} else if (returnType.equals("float")) {
setContent(content + "return 0.0;");
} else if (returnType.equals("float")) {
setContent(content + "return 0.0;");
} else if (returnType.equals("char")) {
setContent(content + "return ' ';");
} else if (returnType.equals("short")) {
setContent(content + "return 0;");
} else {
setContent(content + "return null;");
}
}
}
public MethodDefinition(Method method) {
this(method, null);
}
public MethodDefinition(Method method, String content) {
this.returnType = method.getReturnType().getName();
this.name = method.getName();
if (content == null) {
createStubContent();
} else {
this.content = content;
}
int pName = 'a';
for (Class<?> param : method.getParameterTypes()) {
addParameter(new Parameter(Character.toString((char) pName++),
param.getName(), ""));
}
for (Class<?> param : method.getExceptionTypes()) {
exceptions.add(param.getName());
}
}
public void addParameter(Parameter paramater) {
parameters.add(paramater);
}
public String getAnnotation() {
return annotation;
}
public String getContent() {
return content;
}
public List<String> getExceptions() {
return exceptions;
}
public String getName() {
return name;
}
public List<Parameter> getParameters() {
return parameters;
}
public String getReturnType() {
return returnType;
}
public String getVisability() {
return visibility;
}
public boolean isAbstract() {
return isAbstract;
}
public boolean isFinal() {
return isFinal;
}
public boolean isInterface() {
return isInterface;
}
public boolean isStatic() {
return isStatic;
}
public void setAbstract(boolean isAbstract) {
this.isAbstract = isAbstract;
}
public void setAnnotation(String annotation) {
this.annotation = annotation;
}
public void setContent(String content) {
this.content = content;
}
public void setExceptions(List<String> exceptions) {
this.exceptions = exceptions;
}
public void setFinal(boolean isFinal) {
this.isFinal = isFinal;
}
public void setInterface(boolean isInterface) {
this.isInterface = isInterface;
}
public void setName(String name) {
this.name = name;
}
public void setReturnType(String returnType) {
this.returnType = returnType;
}
public void setStatic(boolean isStatic) {
this.isStatic = isStatic;
}
public void setVisability(String visability) {
this.visibility = visability;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(" /**\n");
if (getJavadocComments() != null) {
sb.append(" * ").append(getJavadocComments());
}
if (getParameters() != null) {
for (Parameter param : getParameters()) {
sb.append("\n * @param ").append(param.getName())
.append(" ").append(param.getDescription());
}
}
sb.append("\n */\n");
sb.append(" ");
if (getAnnotation() != null) {
sb.append(getAnnotation());
sb.append("\n ");
}
if (getVisability() != null) {
sb.append(getVisability()).append(" ");
}
if (isFinal() && !isInterface()) {
sb.append("final ");
}
if (isStatic() && !isInterface()) {
sb.append("static ");
}
if (isAbstract() && !isInterface()) {
sb.append("abstract ");
}
sb.append(returnType).append(" ");
sb.append(getName()).append("(");
if (getParameters() != null) {
boolean first = true;
for (Parameter element : getParameters()) {
if (!first) {
sb.append(", ");
}
sb.append(element);
first = false;
}
}
sb.append(")");
if (getExceptions() != null && getExceptions().size() > 0) {
sb.append(" throws ");
boolean first = true;
for (String element : getExceptions()) {
if (!first) {
sb.append(", ");
}
sb.append(element);
first = false;
}
}
if (isAbstract() || isInterface()) {
sb.append(";");
return sb.toString();
}
sb.append(" {\n");
sb.append(getContent());
sb.append("\n}");
return sb.toString();
}
@Override
public int compareTo(MethodDefinition arg0) {
int res = NamingUtils.getMethodShortName(getName()).compareTo(
NamingUtils.getMethodShortName(arg0.getName()));
if (res == 0) {
return getName().compareTo(arg0.getName());
}
return res;
}
public String getJavadocComments() {
return javadocComments;
}
public void setJavadocComments(String javadocComments) {
this.javadocComments = javadocComments;
}
}
|
grahamu/flyingcracker
|
flyingcracker/config/settings/secrets.py
|
import json
from unipath import Path
from django.core.exceptions import ImproperlyConfigured
SECRETS_DIR = Path(__file__).parent
with open(SECRETS_DIR.child("secrets.json")) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
"""
Get the secret constant or return explicit exception.
"""
try:
return secrets[setting]
except KeyError:
error_msg = "Set the {} environment variable".format(setting)
raise ImproperlyConfigured(error_msg)
|
Manuzor/ezEngine
|
Code/Engine/Foundation/Memory/Policies/StackAllocation.h
|
#pragma once
#include <Foundation/Basics.h>
#include <Foundation/Containers/HybridArray.h>
#include <Foundation/Memory/AllocatorBase.h>
#include <Foundation/Memory/MemoryUtils.h>
namespace ezMemoryPolicies
{
/// \brief This allocation policy works like a stack. You can only "push" and "pop" allocations
/// in the correct order.
///
/// \note It is also possible to free all allocations at once.
///
/// \see ezAllocator
class ezStackAllocation
{
private:
struct Bucket
{
ezArrayPtr<ezUInt8> memory;
ezUInt8* pLastAllocation;
};
ezAllocatorBase* m_pParent;
ezUInt32 m_uiCurrentBucketIndex;
ezUInt32 m_uiCurrentBucketSize;
ezArrayPtr<ezUInt8> m_currentBucket;
ezUInt8* m_pNextAllocation;
ezHybridArray<Bucket, 4> m_buckets;
public:
enum
{
Alignment = sizeof(void*)
};
EZ_FORCE_INLINE ezStackAllocation(ezAllocatorBase* pParent)
: m_pParent(pParent),
m_uiCurrentBucketIndex(0),
m_uiCurrentBucketSize(4096),
m_pNextAllocation(nullptr) {}
EZ_FORCE_INLINE ~ezStackAllocation()
{
EZ_ASSERT_DEV(m_uiCurrentBucketIndex == 0 && m_pNextAllocation == m_currentBucket.GetPtr(),
"There is still something allocated!");
for (auto& bucket : m_buckets)
{
EZ_DELETE_ARRAY(m_pParent, bucket.memory);
}
}
EZ_FORCE_INLINE void SetNextBucketSize(ezUInt32 uiSize)
{
m_uiCurrentBucketSize = uiSize;
}
EZ_FORCE_INLINE void* Allocate(size_t uiSize, size_t uiAlign)
{
EZ_ASSERT_DEV(uiAlign <= Alignment && Alignment % uiAlign == 0, "Unsupported alignment %d", uiAlign);
uiSize = ezMemoryUtils::AlignSize(uiSize, (size_t)Alignment);
// Do we need a new bucket?
if (m_pNextAllocation + uiSize > m_currentBucket.GetPtr() + m_currentBucket.GetCount())
{
if (m_currentBucket.GetPtr() != nullptr)
{
m_buckets[m_uiCurrentBucketIndex].pLastAllocation = m_pNextAllocation;
m_uiCurrentBucketIndex++;
}
while (uiSize > m_uiCurrentBucketSize)
m_uiCurrentBucketSize *= 2;
// Is there still a not yet freed bucket?
if (m_uiCurrentBucketIndex < m_buckets.GetCount())
{
// is the allocation to big for the current bucket?
while (m_uiCurrentBucketIndex < m_buckets.GetCount() &&
uiSize > m_buckets[m_uiCurrentBucketIndex].memory.GetCount())
{
m_uiCurrentBucketIndex++;
}
if (m_uiCurrentBucketIndex >= m_buckets.GetCount())
goto AllocNewBucket;
else
m_currentBucket = m_buckets[m_uiCurrentBucketIndex].memory;
}
else
{
AllocNewBucket:
m_currentBucket = EZ_NEW_ARRAY(m_pParent, ezUInt8, m_uiCurrentBucketSize);
m_buckets.ExpandAndGetRef().memory = m_currentBucket;
m_uiCurrentBucketSize *= 2;
}
m_pNextAllocation = m_currentBucket.GetPtr();
}
EZ_ASSERT_DEBUG(m_pNextAllocation + uiSize <= m_currentBucket.GetPtr() + m_currentBucket.GetCount(), "");
auto pResult = m_pNextAllocation;
m_pNextAllocation += uiSize;
return pResult;
}
EZ_FORCE_INLINE void Deallocate(void* ptr)
{
EZ_ASSERT_DEV(ptr >= m_currentBucket.GetPtr() && ptr <= m_pNextAllocation, "Invalid free");
// does this empty the current bucket?
if (ptr == m_currentBucket.GetPtr())
{
m_pNextAllocation = (ezUInt8*)ptr;
m_buckets[m_uiCurrentBucketIndex].pLastAllocation = (ezUInt8*)ptr;
while (m_pNextAllocation == m_currentBucket.GetPtr() && m_uiCurrentBucketIndex > 0)
{
m_uiCurrentBucketIndex--;
Bucket& currentBucket = m_buckets[m_uiCurrentBucketIndex];
m_currentBucket = currentBucket.memory;
m_pNextAllocation = currentBucket.pLastAllocation;
}
}
else
{
m_pNextAllocation = (ezUInt8*)ptr;
}
}
EZ_FORCE_INLINE void Reset()
{
for (auto& bucket : m_buckets)
{
bucket.pLastAllocation = bucket.memory.GetPtr();
}
m_uiCurrentBucketIndex = 0;
m_currentBucket = m_buckets[0].memory;
m_pNextAllocation = m_currentBucket.GetPtr();
}
EZ_FORCE_INLINE ezAllocatorBase* GetParent() const { return m_pParent; }
};
}
|
hknochi/silk
|
silk-core/src/main/scala/org/silkframework/entity/metadata/FailureClassSerializer.scala
|
package org.silkframework.entity.metadata
import org.silkframework.entity.paths.UntypedPath
import org.silkframework.failures.{AccumulatedFailureClass, FailureClass}
import org.silkframework.runtime.serialization.{ReadContext, WriteContext}
import org.silkframework.failures.FailureClass._
import scala.xml.Node
case class FailureClassSerializer() extends XmlMetadataSerializer[FailureClass] {
override def read(node: Node)(implicit readContext: ReadContext): FailureClass = {
val taskId = (node \ TASK_ID_TAG).text.trim
val message = (node \ MESSAGE_TAG).text.trim
val rootCause = ExceptionSerializer().readException((node \ ROOT_CAUSE_TAG).headOption.flatMap(_.child.headOption).orNull)
val property = (node \ PROPERTY_TAG).headOption.map(p => UntypedPath(p.text))
val accumulated = (node \ ACCUMULATED_TAG).text.trim.toBoolean
val fc = FailureClass(rootCause, message, taskId, property)
if(accumulated) {
new AccumulatedFailureClass(fc)
}
else {
fc
}
}
override def write(fc: FailureClass)(implicit writeContext: WriteContext[Node]): Node = {
<FailureClass>
<RootCause>{ExceptionSerializer().write(fc.rootCause)}</RootCause>
<Message>{fc.originalMessage}</Message>
<TaskId>{fc.taskId}</TaskId>
<Property>{fc.property}</Property>
<Accumulated>{fc.accumulated()}</Accumulated>
</FailureClass>
}
/**
* The identifier used to define metadata objects in the map of [[org.silkframework.entity.metadata.EntityMetadata]]
* NOTE: This method has to be implemented as def and not as val, else the serialization format registration will fail !!!!!!!!!
*/
override def metadataId: String = FailureClassSerializer.METADATA_ID
/**
* An indicator whether the LazyMetadata object produced with this serializer will be replaceable (overridable in the Metadata map)
*/
override def replaceableMetadata: Boolean = false
}
object FailureClassSerializer{
val METADATA_ID: String = "failure_class"
}
|
pmisteliac/spoofax-pie
|
example/tiger/manual/tiger.spoofax/src/main/java/mb/tiger/spoofax/task/TigerShowPrettyPrintedText.java
|
package mb.tiger.spoofax.task;
import mb.common.region.Region;
import mb.common.util.ListView;
import mb.jsglr.common.TermTracer;
import mb.jsglr1.common.JSGLR1ParseResult;
import mb.pie.api.ExecContext;
import mb.pie.api.ResourceStringSupplier;
import mb.pie.api.Task;
import mb.pie.api.TaskDef;
import mb.resource.ResourceKey;
import mb.spoofax.core.language.command.CommandFeedback;
import mb.spoofax.core.language.command.CommandOutput;
import mb.stratego.common.StrategoRuntime;
import mb.stratego.common.StrategoRuntimeBuilder;
import mb.stratego.common.StrategoUtil;
import mb.tiger.spoofax.task.reusable.TigerParse;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.spoofax.interpreter.terms.IStrategoTerm;
import javax.inject.Inject;
public class TigerShowPrettyPrintedText implements TaskDef<TigerShowArgs, CommandOutput> {
private final TigerParse parse;
private final StrategoRuntimeBuilder strategoRuntimeBuilder;
private final StrategoRuntime prototypeStrategoRuntime;
@Inject public TigerShowPrettyPrintedText(
TigerParse parse,
StrategoRuntimeBuilder strategoRuntimeBuilder,
StrategoRuntime prototypeStrategoRuntime
) {
this.parse = parse;
this.strategoRuntimeBuilder = strategoRuntimeBuilder;
this.prototypeStrategoRuntime = prototypeStrategoRuntime;
}
@Override public String getId() {
return getClass().getName();
}
@Override public CommandOutput exec(ExecContext context, TigerShowArgs input) throws Exception {
final ResourceKey key = input.key;
final @Nullable Region region = input.region;
@SuppressWarnings("ConstantConditions") final JSGLR1ParseResult parseResult = context.require(parse, new ResourceStringSupplier(key));
@SuppressWarnings("ConstantConditions") final IStrategoTerm ast = parseResult.getAst()
.orElseThrow(() -> new RuntimeException("Cannot show pretty-printed text, parsed AST for '" + key + "' is null"));
final IStrategoTerm term;
if(region != null) {
term = TermTracer.getSmallestTermEncompassingRegion(ast, region);
} else {
term = ast;
}
final StrategoRuntime strategoRuntime = strategoRuntimeBuilder.buildFromPrototype(prototypeStrategoRuntime);
final String strategyId = "pp-Tiger-string";
final @Nullable IStrategoTerm result = strategoRuntime.invoke(strategyId, term);
if(result == null) {
throw new RuntimeException("Cannot show pretty-printed text, executing Stratego strategy '" + strategyId + "' failed");
}
final String formatted = StrategoUtil.toString(result);
return new CommandOutput(ListView.of(CommandFeedback.showText(formatted, "Pretty-printed text for '" + key + "'")));
}
@Override public Task<CommandOutput> createTask(TigerShowArgs input) {
return TaskDef.super.createTask(input);
}
}
|
nowkoai/test
|
spec/frontend/pipelines/notification/deprecated_type_keyword_notification_spec.js
|
import VueApollo from 'vue-apollo';
import { createLocalVue, shallowMount } from '@vue/test-utils';
import { GlAlert, GlSprintf } from '@gitlab/ui';
import createMockApollo from 'helpers/mock_apollo_helper';
import waitForPromises from 'helpers/wait_for_promises';
import DeprecatedTypeKeywordNotification from '~/pipelines/components/notification/deprecated_type_keyword_notification.vue';
import getPipelineWarnings from '~/pipelines/graphql/queries/get_pipeline_warnings.query.graphql';
import {
mockWarningsWithoutDeprecation,
mockWarningsRootType,
mockWarningsType,
mockWarningsTypesAll,
} from './mock_data';
const defaultProvide = {
deprecatedKeywordsDocPath: '/help/ci/yaml/index.md#deprecated-keywords',
fullPath: '/namespace/my-project',
pipelineIid: 4,
};
let wrapper;
const mockWarnings = jest.fn();
const createComponent = ({ isLoading = false, options = {} } = {}) => {
return shallowMount(DeprecatedTypeKeywordNotification, {
stubs: {
GlSprintf,
},
provide: {
...defaultProvide,
},
mocks: {
$apollo: {
queries: {
warnings: {
loading: isLoading,
},
},
},
},
...options,
});
};
const createComponentWithApollo = () => {
const localVue = createLocalVue();
localVue.use(VueApollo);
const handlers = [[getPipelineWarnings, mockWarnings]];
const mockApollo = createMockApollo(handlers);
return createComponent({
options: {
localVue,
apolloProvider: mockApollo,
mocks: {},
},
});
};
const findAlert = () => wrapper.findComponent(GlAlert);
const findAlertItems = () => findAlert().findAll('li');
afterEach(() => {
wrapper.destroy();
});
describe('Deprecated keyword notification', () => {
describe('while loading the pipeline warnings', () => {
beforeEach(() => {
wrapper = createComponent({ isLoading: true });
});
it('does not display the notification', () => {
expect(findAlert().exists()).toBe(false);
});
});
describe('if there is an error in the query', () => {
beforeEach(async () => {
mockWarnings.mockResolvedValue({ errors: ['It didnt work'] });
wrapper = createComponentWithApollo();
await waitForPromises();
});
it('does not display the notification', () => {
expect(findAlert().exists()).toBe(false);
});
});
describe('with a valid query result', () => {
describe('if there are no deprecation warnings', () => {
beforeEach(async () => {
mockWarnings.mockResolvedValue(mockWarningsWithoutDeprecation);
wrapper = createComponentWithApollo();
await waitForPromises();
});
it('does not show the notification', () => {
expect(findAlert().exists()).toBe(false);
});
});
describe('with a root type deprecation message', () => {
beforeEach(async () => {
mockWarnings.mockResolvedValue(mockWarningsRootType);
wrapper = createComponentWithApollo();
await waitForPromises();
});
it('shows the notification with one item', () => {
expect(findAlert().exists()).toBe(true);
expect(findAlertItems()).toHaveLength(1);
expect(findAlertItems().at(0).text()).toContain('types');
});
});
describe('with a job type deprecation message', () => {
beforeEach(async () => {
mockWarnings.mockResolvedValue(mockWarningsType);
wrapper = createComponentWithApollo();
await waitForPromises();
});
it('shows the notification with one item', () => {
expect(findAlert().exists()).toBe(true);
expect(findAlertItems()).toHaveLength(1);
expect(findAlertItems().at(0).text()).toContain('type');
expect(findAlertItems().at(0).text()).not.toContain('types');
});
});
describe('with both the root types and job type deprecation message', () => {
beforeEach(async () => {
mockWarnings.mockResolvedValue(mockWarningsTypesAll);
wrapper = createComponentWithApollo();
await waitForPromises();
});
it('shows the notification with two items', () => {
expect(findAlert().exists()).toBe(true);
expect(findAlertItems()).toHaveLength(2);
expect(findAlertItems().at(0).text()).toContain('types');
expect(findAlertItems().at(1).text()).toContain('type');
expect(findAlertItems().at(1).text()).not.toContain('types');
});
});
});
});
|
jenergy/jzIntvImGui
|
app/src/main/cpp/jzintv/event/event_null.c
|
/*
* ============================================================================
* Title: Null Event Handling Subsystem
* Author: <NAME>
* ============================================================================
* This backend generates no events, and has the event core short-circuit.
* ============================================================================
*/
#include "config.h"
#include "event/event_tbl.h"
#include "event/event_plat.h"
/* ======================================================================== */
/* EVENT_PLAT_INIT -- Initializes the platform-specific code. */
/* ======================================================================== */
int event_plat_init
(
const bool enable_mouse, /* Enable mouse events? */
evt_pvt_t *const evt_pvt, /* Pass back to event core. */
void **const ptr_plat_pvt /* For plat-specifc private struct. */
)
{
UNUSED(enable_mouse);
UNUSED(evt_pvt);
*ptr_plat_pvt = NULL;
return 0;
}
/* ======================================================================== */
/* EVENT_PLAT_DTOR -- Shuts down the platform-specific code. */
/* ======================================================================== */
void event_plat_dtor(void *const plat_pvt) { UNUSED(plat_pvt); return; }
/* ======================================================================== */
/* EVENT_PLAT_PUMP -- Pump events, even if we're not going to consume. */
/* Called at the start of a tick, but before we decide to "cork." */
/* ======================================================================== */
void event_plat_pump(evt_pvt_t *const pvt, void *const plat_pvt)
{
UNUSED(pvt);
UNUSED(plat_pvt);
}
/* ======================================================================== */
/* EVENT_PLAT_TICK -- Performs the bulk of the tick cycle. */
/* */
/* Returns true if we should make an early exit before dequeuing. This */
/* typically happens when we need to "cork" for a combo. */
/* ======================================================================== */
bool event_plat_tick(evt_pvt_t *const pvt, void *const plat_pvt)
{
UNUSED(pvt);
UNUSED(plat_pvt);
return true; /* always short-circuit the main event core. */
}
/* ======================================================================== */
/* EVENT_PLAT_TICK_LATE -- Performs deferred tick cycle tasks, after we */
/* have drained our internal event queue. */
/* */
/* Currently this is only used by SDL's experimental mouse processing. */
/* Not sure if this is really necessary. */
/* ======================================================================== */
void event_plat_tick_late(evt_pvt_t *const pvt, void *const plat_pvt)
{
UNUSED(pvt);
UNUSED(plat_pvt);
}
/* ======================================================================== */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU */
/* General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License along */
/* with this program; if not, write to the Free Software Foundation, Inc., */
/* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
/* ======================================================================== */
/* Copyright (c) 1998-2020, <NAME> */
/* ======================================================================== */
|
liweitianux/DeltaPorts
|
ports/sysutils/nomad/dragonfly/patch-vendor_github.com_hashicorp_consul-template_signals_signals__unix.go
|
<reponame>liweitianux/DeltaPorts<filename>ports/sysutils/nomad/dragonfly/patch-vendor_github.com_hashicorp_consul-template_signals_signals__unix.go
--- vendor/github.com/hashicorp/consul-template/signals/signals_unix.go.orig 2021-02-23 22:29:46 UTC
+++ vendor/github.com/hashicorp/consul-template/signals/signals_unix.go
@@ -1,4 +1,4 @@
-// +build linux darwin freebsd openbsd solaris netbsd
+// +build linux darwin freebsd openbsd solaris netbsd dragonfly
package signals
|
tcmoore32/sheer-madness
|
gosu-core/src/main/java/gw/internal/gosu/parser/statements/ThrowStatement.java
|
/*
* Copyright 2014 <NAME>, Inc.
*/
package gw.internal.gosu.parser.statements;
import gw.internal.gosu.parser.Expression;
import gw.internal.gosu.parser.CannotExecuteGosuException;
import gw.lang.parser.statements.IThrowStatement;
import gw.lang.parser.statements.ITerminalStatement;
import gw.lang.parser.statements.TerminalType;
/**
* Represents a throw-statement as specified in the Gosu grammar:
* <pre>
* <i>throw-statement</i>
* <b>throw</b> <expression>
* </pre>
* <p/>
*
* @see gw.lang.parser.IGosuParser
*/
public final class ThrowStatement extends TerminalStatement implements IThrowStatement
{
private Expression _expression;
public Expression getExpression()
{
return _expression;
}
public void setExpression( Expression expression )
{
_expression = expression;
}
public Object execute()
{
if( !isCompileTimeConstant() )
{
return super.execute();
}
throw new CannotExecuteGosuException();
}
@Override
public TerminalType getTerminalType() {
return TerminalType.ReturnOrThrow;
}
@Override
protected ITerminalStatement getLeastSignificantTerminalStatement_internal( boolean[] bAbsolute )
{
bAbsolute[0] = true;
return this;
}
@Override
public String toString()
{
return "throw " + getExpression().toString();
}
}
|
smorenburg/python
|
src/old/hello.py
|
<filename>src/old/hello.py
#!/usr/bin/env python3
# This is a full line comment
# Comment each line
print('Hello, World!') # This is a trailing comment
"""
This is not a block comment.
This is a multiline string.
"""
|
zasey/market
|
src/main/java/market/service/DistilleryService.java
|
<filename>src/main/java/market/service/DistilleryService.java
package market.service;
import market.domain.Distillery;
import market.domain.Region;
import java.util.List;
public interface DistilleryService {
/**
* @return all the distilleries sorted by title
*/
List<Distillery> findAll();
/**
* @return all the distilleries of the specified region sorted by title
*/
List<Distillery> findByRegion(Region region);
/**
* @return distillery with the specified id
*/
Distillery findById(long distilleryId);
/**
* @return distillery with the specified title
*/
Distillery findByTitle(String title);
/**
* Creates new distillery.
*/
void create(Distillery newDistillery, String regionName);
/**
* Updates existing distillery.
*/
void update(long distilleryId, Distillery changedDistillery, String regionTitle);
/**
* Removes distillery.
*/
void delete(long distilleryId);
}
|
sffc/fuchsia-clone
|
sdk/lib/sys/component/cpp/tests/utils.h
|
// Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef LIB_SYS_COMPONENT_CPP_TESTS_UTILS_H_
#define LIB_SYS_COMPONENT_CPP_TESTS_UTILS_H_
#include <fuchsia/component/decl/cpp/fidl.h>
#include <fuchsia/component/test/cpp/fidl.h>
#include <string>
namespace component {
namespace tests {
namespace fctest = fuchsia::component::test;
namespace fcdecl = fuchsia::component::decl;
namespace fio2 = fuchsia::io2;
std::shared_ptr<fctest::ChildOptions> CreateFidlChildOptions(fcdecl::StartupMode startup_mode,
std::string environment);
std::shared_ptr<fcdecl::Ref> CreateFidlChildRef(std::string name);
std::shared_ptr<fctest::Capability2> CreateFidlProtocolCapability(std::string name, std::string as,
fcdecl::DependencyType type,
std::string path);
std::shared_ptr<fctest::Capability2> CreateFidlProtocolCapability(std::string name);
std::shared_ptr<fctest::Capability2> CreateFidlServiceCapability(std::string name, std::string as,
std::string path);
std::shared_ptr<fctest::Capability2> CreateFidlServiceCapability(std::string name);
std::shared_ptr<fctest::Capability2> CreateFidlDirectoryCapability(std::string name, std::string as,
fcdecl::DependencyType type,
std::string subdir,
fio2::Operations rights,
std::string path);
std::shared_ptr<fctest::Capability2> CreateFidlDirectoryCapability(std::string name);
} // namespace tests
} // namespace component
#endif // LIB_SYS_COMPONENT_CPP_TESTS_UTILS_H_
|
alonmm/VCSamples
|
VC2010Samples/MFC/general/dlgcbr32/Dlgbars.h
|
// DlgBars.h : header file
//
// This is a part of the Microsoft Foundation Classes C++ library.
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// This source code is only intended as a supplement to the
// Microsoft Foundation Classes Reference and related
// electronic documentation provided with the library.
// See these sources for detailed information regarding the
// Microsoft Foundation Classes product.
/*****************************************************************************
Purpose:
Interface for CDlgToolBar, a special type of CToolBar which does not
expect a parent frame window to be available, and CDlgStatusBar, which
does the same for CStatusBars. This allows the control bars
to be used in applications where the main window is a dialog bar.
Functions:
CDlgToolBar::CDlgToolBar() -- constructor
CDlgToolBar::~CDlgToolBar() -- destructor
CDlgToolBar::OnIdleUpdateCmdUI() -- WM_IDLEUPDATECMDUI handler
CDlgStatusBar::CDlgStatusBar() -- constructor
CDlgStatusBar::~CDlgStatusBar() -- destructor
CDlgStatusBar::OnIdleUpdateCmdUI() -- WM_IDLEUPDATECMDUI handler
Development Team:
<NAME>
Ported to 32-bit by:
<NAME>
Created by Microsoft Product Support Services, Premier ISV Support
Copyright (c) Microsoft Corporation. All rights reserved.
\****************************************************************************/
#ifndef __DLGBARS_H__
#define __DLGBARS_H__
class CDlgToolBar : public CToolBar
{
// Construction
public:
CDlgToolBar();
// Implementation
public:
virtual ~CDlgToolBar();
protected:
// Generated message map functions
//{{AFX_MSG(CDlgToolBar)
afx_msg LRESULT OnIdleUpdateCmdUI(WPARAM wParam, LPARAM);
//}}AFX_MSG
DECLARE_MESSAGE_MAP()
};
class CDlgStatusBar : public CStatusBar
{
// Construction
public:
CDlgStatusBar();
// Implementation
public:
virtual ~CDlgStatusBar();
protected:
// Generated message map functions
//{{AFX_MSG(CDlgStatusBar)
afx_msg LRESULT OnIdleUpdateCmdUI(WPARAM wParam, LPARAM);
//}}AFX_MSG
DECLARE_MESSAGE_MAP()
};
#endif //__DLGBARS_H__
|
AlessandroBorges/Bor_Vulkan
|
Vulkan/jni/bor.vulkan.structs.VkDrawIndexedIndirectCommand.h
|
<filename>Vulkan/jni/bor.vulkan.structs.VkDrawIndexedIndirectCommand.h
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class bor_vulkan_structs_VkDrawIndexedIndirectCommand */
#ifndef _Included_bor_vulkan_structs_VkDrawIndexedIndirectCommand
#define _Included_bor_vulkan_structs_VkDrawIndexedIndirectCommand
#ifdef __cplusplus
extern "C" {
#endif
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKAPPLICATIONINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKAPPLICATIONINFO_ID 1L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKINSTANCECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKINSTANCECREATEINFO_ID 2L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKALLOCATIONCALLBACKS_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKALLOCATIONCALLBACKS_ID 3L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICEFEATURES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICEFEATURES_ID 4L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKFORMATPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKFORMATPROPERTIES_ID 5L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKEXTENT3D_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKEXTENT3D_ID 6L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGEFORMATPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGEFORMATPROPERTIES_ID 7L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICELIMITS_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICELIMITS_ID 8L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICESPARSEPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICESPARSEPROPERTIES_ID 9L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICEPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICEPROPERTIES_ID 10L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKQUEUEFAMILYPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKQUEUEFAMILYPROPERTIES_ID 11L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYTYPE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYTYPE_ID 12L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYHEAP_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYHEAP_ID 13L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICEMEMORYPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPHYSICALDEVICEMEMORYPROPERTIES_ID 14L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEVICEQUEUECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEVICEQUEUECREATEINFO_ID 15L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEVICECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEVICECREATEINFO_ID 16L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKEXTENSIONPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKEXTENSIONPROPERTIES_ID 17L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKLAYERPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKLAYERPROPERTIES_ID 18L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSUBMITINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSUBMITINFO_ID 19L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYALLOCATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYALLOCATEINFO_ID 20L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMAPPEDMEMORYRANGE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMAPPEDMEMORYRANGE_ID 21L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYREQUIREMENTS_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYREQUIREMENTS_ID 22L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEFORMATPROPERTIES_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEFORMATPROPERTIES_ID 23L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEMEMORYREQUIREMENTS_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEMEMORYREQUIREMENTS_ID 24L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEMEMORYBIND_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEMEMORYBIND_ID 25L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEBUFFERMEMORYBINDINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEBUFFERMEMORYBINDINFO_ID 26L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEOPAQUEMEMORYBINDINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEOPAQUEMEMORYBINDINFO_ID 27L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGESUBRESOURCE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGESUBRESOURCE_ID 28L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKOFFSET3D_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKOFFSET3D_ID 29L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEMEMORYBIND_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEMEMORYBIND_ID 30L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEMEMORYBINDINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPARSEIMAGEMEMORYBINDINFO_ID 31L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBINDSPARSEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBINDSPARSEINFO_ID 32L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKFENCECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKFENCECREATEINFO_ID 33L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSEMAPHORECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSEMAPHORECREATEINFO_ID 34L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKEVENTCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKEVENTCREATEINFO_ID 35L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKQUERYPOOLCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKQUERYPOOLCREATEINFO_ID 36L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERCREATEINFO_ID 37L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERVIEWCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERVIEWCREATEINFO_ID 38L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGECREATEINFO_ID 39L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSUBRESOURCELAYOUT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSUBRESOURCELAYOUT_ID 40L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMPONENTMAPPING_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMPONENTMAPPING_ID 41L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGESUBRESOURCERANGE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGESUBRESOURCERANGE_ID 42L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGEVIEWCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGEVIEWCREATEINFO_ID 43L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSHADERMODULECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSHADERMODULECREATEINFO_ID 44L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINECACHECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINECACHECREATEINFO_ID 45L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPECIALIZATIONMAPENTRY_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPECIALIZATIONMAPENTRY_ID 46L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPECIALIZATIONINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSPECIALIZATIONINFO_ID 47L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINESHADERSTAGECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINESHADERSTAGECREATEINFO_ID 48L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKVERTEXINPUTBINDINGDESCRIPTION_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKVERTEXINPUTBINDINGDESCRIPTION_ID 49L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKVERTEXINPUTATTRIBUTEDESCRIPTION_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKVERTEXINPUTATTRIBUTEDESCRIPTION_ID 50L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEVERTEXINPUTSTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEVERTEXINPUTSTATECREATEINFO_ID 51L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEINPUTASSEMBLYSTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEINPUTASSEMBLYSTATECREATEINFO_ID 52L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINETESSELLATIONSTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINETESSELLATIONSTATECREATEINFO_ID 53L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKVIEWPORT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKVIEWPORT_ID 54L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKOFFSET2D_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKOFFSET2D_ID 55L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKEXTENT2D_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKEXTENT2D_ID 56L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKRECT2D_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKRECT2D_ID 57L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEVIEWPORTSTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEVIEWPORTSTATECREATEINFO_ID 58L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINERASTERIZATIONSTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINERASTERIZATIONSTATECREATEINFO_ID 59L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEMULTISAMPLESTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEMULTISAMPLESTATECREATEINFO_ID 60L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSTENCILOPSTATE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSTENCILOPSTATE_ID 61L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEDEPTHSTENCILSTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEDEPTHSTENCILSTATECREATEINFO_ID 62L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINECOLORBLENDATTACHMENTSTATE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINECOLORBLENDATTACHMENTSTATE_ID 63L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINECOLORBLENDSTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINECOLORBLENDSTATECREATEINFO_ID 64L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEDYNAMICSTATECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINEDYNAMICSTATECREATEINFO_ID 65L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKGRAPHICSPIPELINECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKGRAPHICSPIPELINECREATEINFO_ID 66L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMPUTEPIPELINECREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMPUTEPIPELINECREATEINFO_ID 67L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPUSHCONSTANTRANGE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPUSHCONSTANTRANGE_ID 68L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINELAYOUTCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINELAYOUTCREATEINFO_ID 69L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSAMPLERCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSAMPLERCREATEINFO_ID 70L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORSETLAYOUTBINDING_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORSETLAYOUTBINDING_ID 71L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORSETLAYOUTCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORSETLAYOUTCREATEINFO_ID 72L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORPOOLSIZE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORPOOLSIZE_ID 73L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORPOOLCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORPOOLCREATEINFO_ID 74L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORSETALLOCATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORSETALLOCATEINFO_ID 75L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORIMAGEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORIMAGEINFO_ID 76L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORBUFFERINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDESCRIPTORBUFFERINFO_ID 77L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKWRITEDESCRIPTORSET_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKWRITEDESCRIPTORSET_ID 78L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOPYDESCRIPTORSET_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOPYDESCRIPTORSET_ID 79L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKFRAMEBUFFERCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKFRAMEBUFFERCREATEINFO_ID 80L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKATTACHMENTDESCRIPTION_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKATTACHMENTDESCRIPTION_ID 81L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKATTACHMENTREFERENCE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKATTACHMENTREFERENCE_ID 82L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSUBPASSDESCRIPTION_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSUBPASSDESCRIPTION_ID 83L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSUBPASSDEPENDENCY_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSUBPASSDEPENDENCY_ID 84L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKRENDERPASSCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKRENDERPASSCREATEINFO_ID 85L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMMANDPOOLCREATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMMANDPOOLCREATEINFO_ID 86L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMMANDBUFFERALLOCATEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMMANDBUFFERALLOCATEINFO_ID 87L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMMANDBUFFERINHERITANCEINFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMMANDBUFFERINHERITANCEINFO_ID 88L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMMANDBUFFERBEGININFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCOMMANDBUFFERBEGININFO_ID 89L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERCOPY_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERCOPY_ID 90L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGESUBRESOURCELAYERS_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGESUBRESOURCELAYERS_ID 91L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGECOPY_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGECOPY_ID 92L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGEBLIT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGEBLIT_ID 93L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERIMAGECOPY_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERIMAGECOPY_ID 94L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARDEPTHSTENCILVALUE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARDEPTHSTENCILVALUE_ID 95L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARATTACHMENT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARATTACHMENT_ID 96L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARRECT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARRECT_ID 97L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGERESOLVE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGERESOLVE_ID 98L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYBARRIER_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMEMORYBARRIER_ID 99L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERMEMORYBARRIER_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKBUFFERMEMORYBARRIER_ID 100L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGEMEMORYBARRIER_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKIMAGEMEMORYBARRIER_ID 101L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKRENDERPASSBEGININFO_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKRENDERPASSBEGININFO_ID 102L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPATCHINDIRECTCOMMAND_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPATCHINDIRECTCOMMAND_ID 103L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDRAWINDEXEDINDIRECTCOMMAND_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDRAWINDEXEDINDIRECTCOMMAND_ID 104L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDRAWINDIRECTCOMMAND_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDRAWINDIRECTCOMMAND_ID 105L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSURFACECAPABILITIESKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSURFACECAPABILITIESKHR_ID 106L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSURFACEFORMATKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSURFACEFORMATKHR_ID 107L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSWAPCHAINCREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKSWAPCHAINCREATEINFOKHR_ID 108L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPRESENTINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPRESENTINFOKHR_ID 109L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYPROPERTIESKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYPROPERTIESKHR_ID 110L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYMODEPARAMETERSKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYMODEPARAMETERSKHR_ID 111L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYMODEPROPERTIESKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYMODEPROPERTIESKHR_ID 112L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYMODECREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYMODECREATEINFOKHR_ID 113L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYPLANECAPABILITIESKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYPLANECAPABILITIESKHR_ID 114L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYPLANEPROPERTIESKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYPLANEPROPERTIESKHR_ID 115L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYSURFACECREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYSURFACECREATEINFOKHR_ID 116L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYPRESENTINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDISPLAYPRESENTINFOKHR_ID 117L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKXLIBSURFACECREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKXLIBSURFACECREATEINFOKHR_ID 118L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKXCBSURFACECREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKXCBSURFACECREATEINFOKHR_ID 119L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKWAYLANDSURFACECREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKWAYLANDSURFACECREATEINFOKHR_ID 120L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMIRSURFACECREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKMIRSURFACECREATEINFOKHR_ID 121L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKANDROIDSURFACECREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKANDROIDSURFACECREATEINFOKHR_ID 122L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKWIN32SURFACECREATEINFOKHR_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKWIN32SURFACECREATEINFOKHR_ID 123L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEBUGREPORTCALLBACKCREATEINFOEXT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEBUGREPORTCALLBACKCREATEINFOEXT_ID 124L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINERASTERIZATIONSTATERASTERIZATIONORDERAMD_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKPIPELINERASTERIZATIONSTATERASTERIZATIONORDERAMD_ID 125L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEBUGMARKEROBJECTNAMEINFOEXT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEBUGMARKEROBJECTNAMEINFOEXT_ID 126L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEBUGMARKEROBJECTTAGINFOEXT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEBUGMARKEROBJECTTAGINFOEXT_ID 127L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEBUGMARKERMARKERINFOEXT_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEBUGMARKERMARKERINFOEXT_ID 128L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEDICATEDALLOCATIONIMAGECREATEINFONV_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEDICATEDALLOCATIONIMAGECREATEINFONV_ID 129L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEDICATEDALLOCATIONBUFFERCREATEINFONV_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEDICATEDALLOCATIONBUFFERCREATEINFONV_ID 130L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEDICATEDALLOCATIONMEMORYALLOCATEINFONV_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKDEDICATEDALLOCATIONMEMORYALLOCATEINFONV_ID 131L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARVALUE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARVALUE_ID 200L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARCOLORVALUE_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_VKCLEARCOLORVALUE_ID 201L
#undef bor_vulkan_structs_VkDrawIndexedIndirectCommand_TAG_ID
#define bor_vulkan_structs_VkDrawIndexedIndirectCommand_TAG_ID 104L
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: setIndexCount0
* Signature: (Ljava/nio/Buffer;I)V
*/
JNIEXPORT void JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_setIndexCount0
(JNIEnv *, jclass, jobject, jint);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: getIndexCount0
* Signature: (Ljava/nio/Buffer;)I
*/
JNIEXPORT jint JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_getIndexCount0
(JNIEnv *, jclass, jobject);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: setInstanceCount0
* Signature: (Ljava/nio/Buffer;I)V
*/
JNIEXPORT void JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_setInstanceCount0
(JNIEnv *, jclass, jobject, jint);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: getInstanceCount0
* Signature: (Ljava/nio/Buffer;)I
*/
JNIEXPORT jint JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_getInstanceCount0
(JNIEnv *, jclass, jobject);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: setFirstIndex0
* Signature: (Ljava/nio/Buffer;I)V
*/
JNIEXPORT void JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_setFirstIndex0
(JNIEnv *, jclass, jobject, jint);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: getFirstIndex0
* Signature: (Ljava/nio/Buffer;)I
*/
JNIEXPORT jint JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_getFirstIndex0
(JNIEnv *, jclass, jobject);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: setVertexOffset0
* Signature: (Ljava/nio/Buffer;I)V
*/
JNIEXPORT void JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_setVertexOffset0
(JNIEnv *, jclass, jobject, jint);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: getVertexOffset0
* Signature: (Ljava/nio/Buffer;)I
*/
JNIEXPORT jint JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_getVertexOffset0
(JNIEnv *, jclass, jobject);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: setFirstInstance0
* Signature: (Ljava/nio/Buffer;I)V
*/
JNIEXPORT void JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_setFirstInstance0
(JNIEnv *, jclass, jobject, jint);
/*
* Class: bor_vulkan_structs_VkDrawIndexedIndirectCommand
* Method: getFirstInstance0
* Signature: (Ljava/nio/Buffer;)I
*/
JNIEXPORT jint JNICALL Java_bor_vulkan_structs_VkDrawIndexedIndirectCommand_getFirstInstance0
(JNIEnv *, jclass, jobject);
#ifdef __cplusplus
}
#endif
#endif
|
BYU-Open-Analytics/BYUOpenAssessments
|
config/routes.rb
|
class CustomDomain
def matches?(request)
return false if request.subdomain.length <= 0 || request.subdomain == 'www'
true
end
end
Rails.application.routes.draw do
root to: "default#index"
resources :lti_installs
devise_for :users, controllers: {
sessions: "sessions",
registrations: "registrations",
omniauth_callbacks: "omniauth_callbacks"
}
resources :users do
resources :assessments, except: [:update, :edit, :show], controller: "assessments"
end
as :user do
get '/auth/failure' => 'sessions#new'
get 'users/auth/:provider' => 'users/omniauth_callbacks#passthru'
get 'sign_in' => 'sessions#new'
post 'sign_in' => 'sessions#create'
get 'sign_up' => 'devise/registrations#new'
delete 'sign_out' => 'sessions#destroy'
end
resources :canvas_authentications
resources :admin, only: [:index]
namespace :api do
resources :accounts do
resources :users
end
end
post 'assessments/lti', to: 'assessments#lti', as: 'lti'
resources :assessments do
member do
post :show
end
end
resources :assessment_loaders
resources :assessment_results
resources :item_results
get 'saml', to: 'saml#index'
get 'saml/metadata', to: 'saml#metadata'
post 'saml/consume', to: 'saml#consume'
# oembed
match 'oembed' => 'oembed#endpoint', :via => [:get, :post]
post 'api/xapi', to: 'api/xapi#index'
namespace :api do
resources :user_assessments
resources :assessments
resources :assessment_results
resources :item_results
resources :grades
resources :check_question
resources :assessment_settings
resources :question_text
end
match '/proxy' => 'default#proxy', via: [:get, :post]
match '/contact' => 'default#contact', via: [:get, :post]
match '/about' => 'default#about', via: [:get]
match '/take' => 'default#take', via: [:get]
mount MailPreview => 'mail_view' if Rails.env.development?
end
|
cybervisiontech/coopr
|
coopr-ui/app/features/crud/controllers/subnav-ctrl.js
|
/**
* SubnavCtrl
*/
angular.module(PKG.name+'.feature.crud').controller('SubnavCtrl',
function ($scope, $state, myApi) {
var path = $state.current.name.split('.')[0],
modelName = $state.current.data.modelName;
$scope.fetchSubnavList = fetchSubnavList;
fetchSubnavList();
/* ----------------------------------------------------------------------- */
function fetchSubnavList () {
$scope.subnavList = myApi[modelName].query(function (list) {
$scope.dropdown = list.map(function (item) {
return {
text: item.name,
href: $state.href($state.get(path+'.detail') || $state.get(path+'.edit'), item)
};
});
});
}
});
|
h2ssh/Vulcan
|
src/graph/edge_ordered_graph.h
|
<reponame>h2ssh/Vulcan
/* Copyright (C) 2010-2019, The Regents of The University of Michigan.
All rights reserved.
This software was developed as part of the The Vulcan project in the Intelligent Robotics Lab
under the direction of <NAME>, <EMAIL>. Use of this code is governed by an
MIT-style License that can be found at "https://github.com/h2ssh/Vulcan".
*/
/**
* \file edge_ordered_graph.h
* \author <NAME>
*
* Definition of EdgeOrderedGraph, a subclass of Graph that allows for easy creation of
* edge-ordered graphs for fast checking of map planarity.
*/
#ifndef GRAPH_EDGE_ORDERED_GRAPH_H
#define GRAPH_EDGE_ORDERED_GRAPH_H
#include <graph/graph.h>
namespace vulcan
{
namespace graph
{
/**
* EdgeOrderedGraph
*/
template <class Vertex, class Edge>
class EdgeOrderedGraph : public Graph<Vertex, Edge>
{
public:
// Graph interface
virtual void addVertex(const Vertex& vertex)
{
Graph<Vertex, Edge>::addVertex(vertex);
}
virtual void addEdge(const Edge& edge)
{
Graph<Vertex, Edge>::addEdge(edge);
}
};
}
}
#endif // GRAPH_EDGE_ORDERED_GRAPH_H
|
franticsoftware/starports
|
Game/src/game/main.cpp
|
<filename>Game/src/game/main.cpp
/*
<NAME>
Created: February 21th 2010
*/
#include "Game.h"
#include "GameEngine.h"
#include "GameApp.h"
#include "APIHook.h"
#include <Dbghelp.h>
#include <iostream>
#include <tchar.h>
#include <strsafe.h>
#include <shlobj.h>
#pragma comment(lib, "DbgHelp")
#define MAX_BUFF_SIZE 1024
//! Entity registration function
extern void RegisterObjects();
LONG WINAPI RedirectedSetUnhandledExceptionFilter(EXCEPTION_POINTERS* ExceptionInfo)
{
// When the CRT calls SetUnhandledExceptionFilter with NULL parameter
// our handler will not get removed.
return 0;
}
void make_minidump(EXCEPTION_POINTERS* e)
{
HANDLE hFile = CreateFile("minidump.dmp", GENERIC_WRITE, FILE_SHARE_READ, 0, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0);
if (hFile == INVALID_HANDLE_VALUE)
return;
MINIDUMP_EXCEPTION_INFORMATION exceptionInfo;
exceptionInfo.ThreadId = GetCurrentThreadId();
exceptionInfo.ExceptionPointers = e;
exceptionInfo.ClientPointers = FALSE;
MiniDumpWriteDump(
GetCurrentProcess(),
GetCurrentProcessId(),
hFile,
MINIDUMP_TYPE(MiniDumpNormal),
e ? &exceptionInfo : NULL,
NULL,
NULL);
if (hFile)
{
CloseHandle(hFile);
hFile = NULL;
}
return;
}
LONG WINAPI OurSetUnhandledExceptionFilter(EXCEPTION_POINTERS* ExceptionInfo)
{
shoot::Log.Print("GOTCHA!\n");
make_minidump(ExceptionInfo);
return EXCEPTION_CONTINUE_SEARCH;
}
int main(int argc, char** argv)
{
::SetUnhandledExceptionFilter(OurSetUnhandledExceptionFilter);
CAPIHook apiHook("kernel32.dll", "SetUnhandledExceptionFilter", (PROC)RedirectedSetUnhandledExceptionFilter);
{
shoot::GameEngine* pEngine = new shoot::GameEngine(&RegisterObjects);
shoot::GameApp app(shoot::Size(1024, 768), true, true);
pEngine->Init();
app.Run();
}
return 0;
}
|
cclauss/discovery-artifact-manager
|
src/snippetgen/mergesnippets/main.go
|
<reponame>cclauss/discovery-artifact-manager<filename>src/snippetgen/mergesnippets/main.go
// Package main contains the main driver for the mergesnippets tool,
// which merges manual and automatic snippets into public snippets
// that can be published to GCS.
//
// The simplest invocation is:
//
// main --primary=PATH1 --secondary=PATH2 --merged=PATH3 [APISUBPATHS...]
//
// where APISUBPATHS are specific subpaths under both PATH1 and PATH2
// that identify the content to merge.
//
// Note that since this is a general tool, it does not delete the
// contents of PATH3 before writing to it.
package main
import (
"flag"
"log"
"discovery-artifact-manager/snippetgen/mergesnippets/snippet"
)
var (
primaryLocation = flag.String("primary", "manual/", "Location of the manual snippets to merge. If the prefix is gcs://, the location is taken to be in GCS; otherwise, the location is interpreted as a local path.")
secondaryLocation = flag.String("secondary", "automatic/", "Location of the automatic snippets to merge. If the prefix is gcs://, the location is taken to be in GCS; otherwise, the location is interpreted as a local path.")
mergedLocation = flag.String("merged", "public/", "Location of the public, merged snippets. If the prefix is gcs://, the location is taken to be in GCS; otherwise, the location is interpreted as a local path.")
gsutilPath = flag.String("gsutil", "gsutil", "Path to the gsutil command (if not in your $PATH)")
tmpDir = flag.String("tmp", "", "Path under which to create temporary directories")
simpleMetadata = flag.Bool("simple_metadata", false, "Whether to have simple metadata in the merged artifact rather than metadata that traces both merge sources")
currentOnly = flag.Bool("current_only", false, "Whether to only merge API versions at the current revision")
renameLanguages = flag.Bool("rename_languages", false, "Whether to rename languages to their canonical display names that will be displayed to users.")
)
func main() {
flag.Parse()
mrg := &snippet.Merger{}
mrg.Init(*gsutilPath, *primaryLocation, *secondaryLocation, *mergedLocation, *tmpDir, *simpleMetadata, *currentOnly, *renameLanguages, flag.Args())
if err := mrg.Error(); err != nil {
log.Fatalf("initialization errors:\n%s", err)
}
mrg.GetFragments()
mrg.MergeFragments()
if err := mrg.Error(); err != nil {
log.Fatalf("merging errors:\n%s", err)
}
mrg.PublishMergedFragments()
if err := mrg.Error(); err != nil {
log.Fatalf("publishing errors:\n%s", err)
}
}
|
Jocker08/metadata
|
metadata-web/src/main/java/com/ryo/metadata/web/support/jdbc/impl/OracleJdbcImpl.java
|
/*
* Copyright (c) 2018. houbinbin Inc.
* metadata All rights reserved.
*/
package com.ryo.metadata.web.support.jdbc.impl;
import com.ryo.metadata.core.constant.DriverNameConstant;
import com.ryo.metadata.core.service.DBService;
import com.ryo.metadata.core.service.impl.OracleDBService;
import com.ryo.metadata.core.util.vo.JdbcVo;
import com.ryo.metadata.web.support.jdbc.IJdbc;
import com.ryo.metadata.web.support.jdbc.JdbcContainer;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.springframework.stereotype.Service;
import static com.ryo.metadata.web.support.jdbc.impl.OracleJdbcImpl.DATABASE;
/**
* <p> </p>
*
* <pre> Created: 2018/6/20 下午4:09 </pre>
* <pre> Project: metadata </pre>
*
* @author houbinbin
* @version 1.0
* @since JDK 1.7
*/
@Service(DATABASE + JdbcContainer.SERVICE_NAME)
public class OracleJdbcImpl implements IJdbc {
/**
* OracleJdbcImpl's Logger
*/
private static final org.apache.logging.log4j.Logger LOGGER = LogManager.getLogger(OracleJdbcImpl.class);
static final String DATABASE = "oracle";
@Override
public void execute(String host, String port, String database, String username, String password)
throws Exception {
JdbcVo jdbcVo = buildOracleJdbcVo(host, port, database, username, password);
DBService dbService = OracleDBService.getInstance(jdbcVo);
dbService.execute();
}
/**
* Oracle 的链接信息
* PS: oracle 是没有用户的概念的
* @param port 端口号
* @param database 数据库信息 类似于 service 此处填写 XE/orcl 等
* @param username 用户名称
* @param password 密码
* @return 链接信息
*/
private JdbcVo buildOracleJdbcVo(final String host, final String port,
final String database,
final String username, final String password) {
final String hostActual = StringUtils.defaultIfEmpty(host, "127.0.0.1");
final String portActual = StringUtils.defaultIfEmpty(port, "49161");
final String serviceActual = StringUtils.defaultIfEmpty(database, "XE");
final String usernameActual = StringUtils.defaultIfEmpty(username, "system");
final String passwordActual = StringUtils.defaultIfEmpty(password, "<PASSWORD>");
JdbcVo jdbcVo = new JdbcVo();
jdbcVo.setDriverClassName(DriverNameConstant.ORACLE);
jdbcVo.setUrl(String.format("jdbc:oracle:thin:@%s:%s:%s",
hostActual, portActual, serviceActual));
jdbcVo.setUsername(usernameActual);
jdbcVo.setPassword(password<PASSWORD>);
LOGGER.info("JDBC:{}", jdbcVo);
return jdbcVo;
}
// jdbc.driverClassName=oracle.jdbc.OracleDriver
// jdbc.url=jdbc:oracle:thin:@127.0.0.1:49161:XE
// jdbc.username=system
// jdbc.password=<PASSWORD>
}
|
katemihalikova/test262
|
test/harness/verifyProperty-arguments.js
|
// Copyright (C) 2017 <NAME>. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
description: >
verifyProperty should receive at least 3 arguments: obj, name, and descriptor
includes: [propertyHelper.js]
---*/
assert.throws(Test262Error, () => {
verifyProperty();
}, "0 arguments");
assert.throws(Test262Error, () => {
verifyProperty(Object);
}, "1 argument");
assert.throws(Test262Error, () => {
verifyProperty(Object, 'foo');
}, "2 arguments");
|
amyjko/citrus-barista
|
edu/cmu/hcii/citrus/views/widgets/Checkbox.java
|
/*
*
* Citrus - A object-oriented, interpreted language that is designed to simplify
* the creation of dynamic, immediate feedback graphical desktop applications.
*
* Copyright (c) 2005 <NAME>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
package edu.cmu.hcii.citrus.views.widgets;
import edu.cmu.hcii.citrus.*;
import edu.cmu.hcii.citrus.views.*;
import edu.cmu.hcii.citrus.views.layouts.*;
public class Checkbox extends ElementView {
public static final Dec<Property<Bool>> property = new Dec<Property<Bool>>((Element)null, true);
public static final Dec<Bool> model = new Dec<Bool>();
public static final Dec<Layout> layout = new Dec<Layout>(new HorizontalLayout(0, 5));
public static final Dec<Real> width = new Dec<Real>(true, "(this lastChildsRight)");
public static final Dec<Real> height = new Dec<Real>(true, "(this tallestChildsHeight)");
public static final Dec<Bool> clipsChildren = new Dec<Bool>(new Bool(false));
public static final Dec<List<Behavior>> behaviors = new Dec<List<Behavior>>(new List<Behavior>(
new Behavior(App.mouse.leftButton.pressed, App.mouse.pointer.pick),
new Behavior(App.mouse.leftButton.released, new Action() {
public boolean evaluate(View t) {
Property<Bool> flag = t.get(property);
if(flag != null && App.mouse.pointer.isPicked(t)) {
if(t.contains(App.mouse.pointer.getPosition()).value) flag.set(flag.get().not(), t.getStyle().getQuickTransition());
App.mouse.pointer.release();
return true;
} else return false;
}})
));
public static final Dec<List<View>> children = new Dec<List<View>>(View.<List<View>>parseExpression("[(a Box)]"));
public Checkbox(ArgumentList arguments) { super(arguments); }
public Checkbox(Namespace type, ArgumentList arguments) { super(type, arguments); }
public ElementView updatedViewFor(Property p, Element newValue, Transition t) { return this; }
public static class Box extends View {
public static final Dec<Bool> clipsChildren = new Dec<Bool>(new Bool(false));
public static final Dec<Real> width = new Dec<Real>(new Real(13));
public static final Dec<Real> height = new Dec<Real>(new Real(13));
public static final Dec<List<Paint>> content = new Dec<List<Paint>>(View.<List<Paint>>parseExpression("[ (this getStyle).checkboxPaint]"));
public static final Dec<List<Behavior>> behaviors = new Dec<List<Behavior>>(new List<Behavior>(
new Behavior(App.focusReceived, App.keyboard.addFocusPaint),
new Behavior(App.focusLost, App.keyboard.removeFocusPaint),
new Behavior(App.keyboard.typed, new Action() { public boolean evaluate(View t) {
Property<Bool> flag = t.get(property);
if(flag != null) {
flag.set(flag.get().not(), t.getStyle().getQuickTransition());
return true;
} else return false;
}})
));
public static final Dec<List<View>> children = new Dec<List<View>>(View.<List<View>>parseExpression("[(a Checkmark)]"));
public Box() { super(); }
public Box(ArgumentList args) { super(args); }
}
public static class Checkmark extends View {
public static final Dec<Bool> clipsChildren = new Dec<Bool>(new Bool(false));
public static final Dec<Real> width = new Dec<Real>(true, View.<Real>parseExpression("(this parentsWidth)"));
public static final Dec<Real> height = new Dec<Real>(true, View.<Real>parseExpression("(this parentsHeight)"));
public static final Dec<List<Paint>> content = new Dec<List<Paint>>(View.<List<Paint>>parseExpression("[(this getStyle).checkmarkPaint]"));
public static final Dec<Bool> hidden = new Dec<Bool>(true, new Expression<Bool>() { public Bool evaluate(Element<?> env) {
if(env.get(property) == null) return new Bool(true);
else return env.get(property).get().not(); }});
public Checkmark() { super(); }
public Checkmark(ArgumentList args) { super(args); }
}
}
|
jnthn/intellij-community
|
plugins/InspectionGadgets/test/com/siyeh/igfixes/style/replace_with_string/Comment1.java
|
class Comment1 {
String s = new /*a*//*b*/ <caret>StringBuffer("asdf1"/*c*/)/*d*/./*e*/append(/*f*/"asdf2").toString(); /* one */ /* two */
}
|
ljsvn/noveltysearch
|
WebContent/extjs/ux/CalendarPanel.js
|
Ext.calendar.CalendarPanel = Ext.extend(Ext.Panel, {
showDayView: false,
showWeekView: false,
showMonthView: true,
showNavBar: true,
todayText: '今天',
showTodayText: true,
showTime: true,
dayText: '天',
monthText: '本月',
// private
layoutConfig: {
layoutOnCardChange: true,
deferredRender: true
},
// private property
startDate: new Date(),
// 设置月份的时间值
setMonthField: function(){
Ext.getCmp(this.id + '-month-field').setValue(this.startDate.getFullYear()+' 年 '+(this.startDate.getMonth()+1)+' 月');
},
// private
initComponent: function() {
this.tbar = {
cls: 'ext-cal-toolbar',
border: true,
buttonAlign: 'center',
items: [
new Ext.ux.MonthField({
id: this.id + '-month-field',
onSelect:function(menu, date){
this.findParentByType('ods.calendarpanel').setStartDate(new Date(menu.selYear,menu.selMonth-1));
//时间控件被选中后
this.setValue(date);
this.fireEvent('select', this, date);
this.menu.hide();
}
})
]
};
//设置当前时间
this.setMonthField();
this.viewCount = 0;
if (this.showMonthView || this.viewCount == 0) {
this.tbar.items.push({
id: this.id + '-tb-prev',
scope: this,iconCls: 'x-tbar-page-prev',
handler: function(){
this.onPrevClick();
this.setMonthField();
}
});
this.tbar.items.push({
id: this.id + '-tb-month',
scope: this,text: this.monthText,
handler: function(){
this.setStartDate(new Date());
Ext.getCmp(this.id + '-month-field').setValue(new Date().getFullYear()+' 年 '+(new Date().getMonth()+1)+' 月');
}
});
this.tbar.items.push({
id: this.id + '-tb-next',
scope: this,iconCls: 'x-tbar-page-next',
handler: function(){
this.onNextClick();
this.setMonthField();
}
});
this.tbar.items.push('->');
// this.tbar.items.push({
// text: '导入',
// scope: this,
// iconCls: 'x-icon-oper-end'
// });
this.viewCount++;
this.showMonthView = true;
}
var idx = this.viewCount - 1;
this.activeItem = this.activeItem === undefined ? idx: (this.activeItem > idx ? idx: this.activeItem);
Ext.calendar.CalendarPanel.superclass.initComponent.call(this);
this.addEvents({
eventadd: true,
eventupdate: true,
eventdelete: true,
eventcancel: true,
viewchange: true
});
this.layout = 'card';
// do not allow override
if (this.showMonthView) {
var month = Ext.applyIf({
xtype: 'monthview',
title: this.dayText,
showToday: this.showToday,
showTodayText: this.showTodayText,
showTime: this.showTime,
listeners: {
'weekclick': {
fn: function(vw, dt) {
//this.showWeek(dt);
},
scope: this
}
}
},
this.monthViewCfg);
month.id = this.id + '-month';
month.store = month.store || this.eventStore;
this.initEventRelay(month);
this.add(month);
}
/*
this.add(Ext.applyIf({
xtype: 'eventeditform',
id: this.id + '-edit',
calendarStore: this.calendarStore,
listeners: {
'eventadd': {
scope: this,
fn: this.onEventAdd
},
'eventupdate': {
scope: this,
fn: this.onEventUpdate
},
'eventdelete': {
scope: this,
fn: this.onEventDelete
},
'eventcancel': {
scope: this,
fn: this.onEventCancel
}
}
},
this.editViewCfg));
* */
},
// private
initEventRelay: function(cfg) {
cfg.listeners = cfg.listeners || {};
cfg.listeners.afterrender = {
fn: function(c) {
// relay the view events so that app code only has to handle them in one place
this.relayEvents(c, ['eventsrendered', 'eventclick', 'eventover', 'eventout', 'dayclick',
'eventmove', 'datechange', 'rangeselect', 'eventdelete', 'eventresize', 'initdrag']);
},
scope: this,
single: true
};
},
// private
afterRender: function() {
Ext.calendar.CalendarPanel.superclass.afterRender.call(this);
this.fireViewChange();
},
// private
onLayout: function() {
Ext.calendar.CalendarPanel.superclass.onLayout.call(this);
if (!this.navInitComplete) {
this.updateNavState();
this.navInitComplete = true;
}
},
// private
onEventAdd: function(form, rec) {
rec.data[Ext.calendar.EventMappings.IsNew.name] = false;
this.eventStore.add(rec);
this.hideEditForm();
this.fireEvent('eventadd', this, rec);
},
// private
onEventUpdate: function(form, rec) {
rec.commit();
this.hideEditForm();
this.fireEvent('eventupdate', this, rec);
},
// private
onEventDelete: function(form, rec) {
this.eventStore.remove(rec);
this.hideEditForm();
this.fireEvent('eventdelete', this, rec);
},
// private
onEventCancel: function(form, rec) {
this.hideEditForm();
this.fireEvent('eventcancel', this, rec);
},
/**
* Shows the built-in event edit form for the passed in event record. This method automatically
* hides the calendar views and navigation toolbar. To return to the calendar, call {@link #hideEditForm}.
* @param {Ext.calendar.EventRecord} record The event record to edit
* @return {Ext.calendar.CalendarPanel} this
*/
showEditForm: function(rec) {
this.preEditView = this.layout.activeItem.id;
this.setActiveView(this.id + '-edit');
this.layout.activeItem.loadRecord(rec);
return this;
},
/**
* Hides the built-in event edit form and returns to the previous calendar view. If the edit form is
* not currently visible this method has no effect.
* @return {Ext.calendar.CalendarPanel} this
*/
hideEditForm: function() {
if (this.preEditView) {
this.setActiveView(this.preEditView);
delete this.preEditView;
}
return this;
},
// private
setActiveView: function(id) {
var l = this.layout;
l.setActiveItem(id);
if (id == this.id + '-edit') {
this.getTopToolbar().hide();
this.doLayout();
}
else {
l.activeItem.refresh();
this.getTopToolbar().show();
this.updateNavState();
}
this.activeView = l.activeItem;
this.fireViewChange();
},
// private
fireViewChange: function() {
var info = null,
view = this.layout.activeItem;
if (view.getViewBounds) {
vb = view.getViewBounds();
info = {
activeDate: view.getStartDate(),
viewStart: vb.start,
viewEnd: vb.end
};
};
this.fireEvent('viewchange', this, view, info);
},
// private
updateNavState: function() {
if (this.showNavBar !== false) {
var item = this.layout.activeItem,
suffix = item.id.split(this.id + '-')[1];
var btn = Ext.getCmp(this.id + '-tb-' + suffix);
btn.toggle(true);
}
},
/**
* Sets the start date for the currently-active calendar view.
* @param {Date} dt
*/
setStartDate: function(dt) {
this.layout.activeItem.setStartDate(dt, true);
this.updateNavState();
this.fireViewChange();
},
// private
showWeek: function(dt) {
this.setActiveView(this.id + '-week');
this.setStartDate(dt);
},
// private
onPrevClick: function() {
this.startDate = this.layout.activeItem.movePrev();
this.updateNavState();
this.fireViewChange();
},
// private
onNextClick: function() {
this.startDate = this.layout.activeItem.moveNext();
this.updateNavState();
this.fireViewChange();
},
// private
onDayClick: function() {
this.setActiveView(this.id + '-day');
},
// private
onWeekClick: function() {
this.setActiveView(this.id + '-week');
},
// private
onMonthClick: function() {
this.setActiveView(this.id + '-month');
},
/**
* Return the calendar view that is currently active, which will be a subclass of
* {@link Ext.calendar.CalendarView CalendarView}.
* @return {Ext.calendar.CalendarView} The active view
*/
getActiveView: function() {
return this.layout.activeItem;
}
});
Ext.reg('ods.calendarpanel', Ext.calendar.CalendarPanel);
|
V-Fib/FlurryClone
|
MMOCoreORB/src/templates/tangible/SharedShipObjectTemplate.h
|
<reponame>V-Fib/FlurryClone
/*
* SharedShipObjectTemplate.h
*
* Created on: 06/05/2010
* Author: victor
*/
#ifndef SHAREDSHIPOBJECTTEMPLATE_H_
#define SHAREDSHIPOBJECTTEMPLATE_H_
#include "templates/SharedTangibleObjectTemplate.h"
class SharedShipObjectTemplate : public SharedTangibleObjectTemplate {
StringParam interiorLayoutFileName;
StringParam cockpitFilename;
BoolParam hasWings;
BoolParam playerControlled;
public:
SharedShipObjectTemplate() {
}
~SharedShipObjectTemplate() {
}
void readObject(LuaObject* templateData) {
SharedTangibleObjectTemplate::readObject(templateData);
}
void parseVariableData(const String& varName, Chunk* data) {
if (varName == "interiorLayoutFileName") {
interiorLayoutFileName.parse(data);
} else if (varName == "cockpitFilename") {
cockpitFilename.parse(data);
} else if (varName == "hasWings") {
hasWings.parse(data);
} else if (varName == "playerControlled") {
playerControlled.parse(data);
}
}
void parseFileData(IffStream* iffStream) {
iffStream->openChunk('PCNT');
int variableCount = iffStream->getInt();
iffStream->closeChunk('PCNT');
for (int i = 0; i < variableCount; ++i) {
//while (iffStream->getRemainingSubChunksNumber() > 0) {
Chunk* chunk = iffStream->openChunk('XXXX');
if (chunk == nullptr)
continue;
String varName;
iffStream->getString(varName);
//std::cout << "parsing wtf shit:[" << varName.toStdString() << "]\n";
parseVariableData(varName, chunk);
iffStream->closeChunk();
}
}
void readObject(IffStream* iffStream) {
uint32 nextType = iffStream->getNextFormType();
if (nextType != 'SSHP') {
//Logger::console.error("expecting SHOT got " + String::hexvalueOf((int)nextType));
SharedTangibleObjectTemplate::readObject(iffStream);
return;
}
iffStream->openForm('SSHP');
uint32 derv = iffStream->getNextFormType();
if (derv == 'DERV') {
loadDerv(iffStream);
derv = iffStream->getNextFormType();
}
iffStream->openForm(derv);
try {
parseFileData(iffStream);
} catch (Exception& e) {
String msg;
msg += "exception caught parsing file data ->";
msg += e.getMessage();
Logger::console.error(msg);
}
iffStream->closeForm(derv);
if (iffStream->getRemainingSubChunksNumber() > 0) {
readObject(iffStream);
}
iffStream->closeForm('SSHP');
}
};
#endif /* SHAREDSHIPOBJECTTEMPLATE_H_ */
|
envisioning/tdb-storybook
|
src/components/OrganizationEditForm/stories.js
|
import React from 'react';
import { storiesOf, action } from '@kadira/storybook';
import OrganizationEditForm from './';
import { mockA } from '../CardGrid/mock.js'
import { getCardUrl, getCloudinaryPath } from '../../utils/cloudinary_url'
const technologiesId = mockA.slice(0, 10).map(res => ({...res, image: getCardUrl(getCloudinaryPath(res.image))}));;
const projectsId = mockA.slice(11, 20).map(res => ({...res, image: getCardUrl(getCloudinaryPath(res.image))}));;
const attachmentsId = mockA.slice(21, 30).map(res => ({...res, image: getCardUrl(getCloudinaryPath(res.image))}));;
storiesOf('OrganizationEditForm', module)
.add('default', () => (
<OrganizationEditForm
technologiesId={technologiesId}
projectsId={projectsId}
attachmentsId={attachmentsId} />
))
|
briancostabile/tiva-bsp
|
source/bsp/bsp_Reset.h
|
/**
* Copyright 2021 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/*============================================================================*/
/**
* @file bsp_Reset.h
* @brief Contains Macros and defines for the reset reason.
*/
#pragma once
#include "bsp_Types.h"
/*==============================================================================
* Types
*============================================================================*/
/*===========================================================================*/
// A type to hold the concatenated HW+SW reset reason
typedef uint32_t bsp_Reset_Reason_t;
/*===========================================================================*/
enum
{
BSP_RESET_HWREASON_EXT = 0x0001,
BSP_RESET_HWREASON_POR = 0x0002,
BSP_RESET_HWREASON_BOR = 0x0004,
BSP_RESET_HWREASON_WDT0 = 0x0008,
BSP_RESET_HWREASON_SW = 0x0010,
BSP_RESET_HWREASON_WDT1 = 0x0020,
BSP_RESET_HWREASON_HIB = 0x0040,
BSP_RESET_HWREASON_HREQ = 0x1000
};
typedef uint8_t bsp_Reset_HwReason_t;
/*===========================================================================*/
enum
{
BSP_RESET_SWREASON_UNKNOWN = 0x0000,
BSP_RESET_SWREASON_ASSERT = 0x0001,
BSP_RESET_SWREASON_UNHANDLED_INT = 0x0002,
BSP_RESET_SWREASON_FAULT = 0x0003,
BSP_RESET_SWREASON_EXIT = 0x0004,
BSP_RESET_SWREASON_OS_MALLOC = 0x0005,
BSP_RESET_SWREASON_OS_STACK = 0x0006,
};
typedef uint8_t bsp_Reset_SwReason_t;
/*==============================================================================
* Prototypes
*============================================================================*/
/*===========================================================================*/
void
bsp_Reset_init( void );
/*===========================================================================*/
bsp_Reset_Reason_t
bsp_Reset_getReason( void );
/*===========================================================================*/
bsp_Reset_SwReason_t
bsp_Reset_getSwReason( void );
/*===========================================================================*/
bsp_Reset_HwReason_t
bsp_Reset_getHwReason( void );
/*===========================================================================*/
const char*
bsp_Reset_getAssertFunction( void );
/*===========================================================================*/
uint16_t
bsp_Reset_getAssertLine( void );
/*===========================================================================*/
uint32_t
bsp_Reset_getResetCount( void );
/*===========================================================================*/
void
bsp_Reset_setAssertInfo( const char* function_str, uint16_t line );
/*===========================================================================*/
void
bsp_Reset_systemReset( bsp_Reset_SwReason_t reason );
/*===========================================================================*/
void
bsp_Reset_systemFault( uint32_t* stackRegs );
/*===========================================================================*/
uint32_t *
bsp_Reset_getFaultRegs( void );
|
kirino17/ecef
|
src/aquarius2/proxy/ProxyFileDialogCallback.h
|
#pragma once
#include "../def/def.h"
class AQUADLL ProxyFileDialogCallback : public refcounted {
public:
ProxyFileDialogCallback(void* ptr);
~ProxyFileDialogCallback();
public:
///
// Continue the file selection. |selected_accept_filter| should be the 0-based
// index of the value selected from the accept filters array passed to
// CefDialogHandler::OnFileDialog. |file_paths| should be a single value or a
// list of values depending on the dialog mode. An empty |file_paths| value is
// treated the same as calling Cancel.
///
/*--cef(capi_name=cont,index_param=selected_accept_filter,
optional_param=file_paths)--*/
void Continue(int selected_accept_filter, const char* file_paths);
///
// Cancel the file selection.
///
/*--cef()--*/
void Cancel();
public:
PRIME_IMPLEMENT_REFCOUNTING(ProxyFileDialogCallback);
AQUA_DECL_PUBLIC_ORIGIN;
private:
void* _rawptr;
};
|
fergy/aplit_linux-5
|
include/linux/crc-ccitt.h
|
<reponame>fergy/aplit_linux-5
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC_CCITT_H
#define _LINUX_CRC_CCITT_H
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
{
return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
}
#endif /* _LINUX_CRC_CCITT_H */
|
gthreepwood/yats
|
modules/yats/templatetags/search.py
|
# -*- coding: utf-8 -*-
from django import template
register = template.Library()
@register.filter
def cut_text_for_xapian(text):
text = text.replace(';', ' ').replace(',', ' ').replace('"', '')
content = []
for word in text.split(' '):
if len(word) <= 245:
content.append(word)
return ' '.join(content)
|
noecl1/multihammer
|
src/Model/ClientesModel.java
|
package Model;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
public class ClientesModel {
private dbConnection con;
public ArrayList<Cliente> getClientes() {
ArrayList<Cliente> clientes = new ArrayList<>();
con = new dbConnection();
Connection reg = con.getConnection();
PreparedStatement ps;
ResultSet rs;
try {
ps = reg.prepareStatement("SELECT * FROM clientes");
rs = ps.executeQuery();
while(rs.next()) {
Cliente x = new Cliente(rs.getString("id_cliente"), rs.getString("domicilio"), rs.getString("rfc"), rs.getString("nombre"),rs.getString("apellido"), rs.getString("telefono"));
clientes.add(x);
}
} catch (SQLException ex) {
System.out.println(ex);
}
con.disconnect();
return clientes;
}
public String agregarCliente(String nombre, String apellido, String domicilio, String telefono, String rfc) {
String message = "Cliente agregado exitosamente";
con = new dbConnection();
Connection reg = con.getConnection();
PreparedStatement ps;
ResultSet rs;
try {
ps = reg.prepareStatement("INSERT INTO clientes (nombre, apellido, domicilio, telefono, rfc) VALUES (?,?,?,?,?)");
ps.setString(1, nombre);
ps.setString(2, apellido);
ps.setString(3, domicilio);
ps.setString(4, telefono);
ps.setString(5, rfc);
ps.executeUpdate();
} catch (SQLException ex) {
System.out.println(ex);
message = "El RFC de cliente no se puede repetir";
}
con.disconnect();
return message;
}
public String modificarCliente(String nombre, String apellido, String domicilio, String telefono, String rfc, String id_cliente) {
String message = "Cliente modificado exitosamente";
con = new dbConnection();
Connection reg = con.getConnection();
PreparedStatement ps;
ResultSet rs;
try {
ps = reg.prepareStatement("UPDATE clientes SET nombre = ?, apellido = ?, domicilio = ?, telefono = ?, rfc = ? WHERE id_cliente = ?");
ps.setString(1, nombre);
ps.setString(2, apellido);
ps.setString(3, domicilio);
ps.setString(4, telefono);
ps.setString(5, rfc);
ps.setString(6, id_cliente);
ps.executeUpdate();
} catch (SQLException ex) {
System.out.println(ex);
message = "El RFC de cliente no se puede repetir";
}
con.disconnect();
return message;
}
}
|
testWorkbench-daily/h5ds
|
src/h5ds-core/components/layer-menu/PluginItem.js
|
import React, { Component } from 'react';
import { bindSelf } from '../../utils';
export default class PluginItem extends Component {
@bindSelf
onClick(e) {
this.props.onClick && this.props.onClick(e, this.data);
}
render() {
this.data = this.props.data;
const { pid, name = '未命名', version = '1.0.0', icon } = this.props.data;
return (
<div className="h5ds-plugin-item">
<div onClick={this.onClick} className="h5ds-plugin-span h5ds-plugin-pic">
{icon || <i className="h5ds-ico h5ds-ico-domnode" />}
</div>
<div className="h5ds-plugin-span h5ds-plugin-info">
<h1 onClick={this.onClick}>{name}</h1>
<p>
<em>version:</em> {version}
</p>
<p>
<a href={pid} target="_blank" rel="noopener noreferrer">
插件详情
</a>
</p>
</div>
</div>
);
}
}
|
shankar1729/qimpy
|
src/qimpy/electrons/_basis.py
|
<filename>src/qimpy/electrons/_basis.py
from __future__ import annotations
import qimpy as qp
import numpy as np
import torch
from ._basis_ops import _apply_ke, _apply_potential, _collect_density
from ._basis_real import BasisReal
from typing import Optional, Tuple, Union
class Basis(qp.TreeNode):
"""Plane-wave basis for electronic wavefunctions. The underlying
:class:`qimpy.utils.TaskDivision` splits plane waves over `rc.comm_b`"""
__slots__ = (
"comm",
"comm_kb",
"lattice",
"ions",
"kpoints",
"n_spins",
"n_spinor",
"k",
"wk",
"w_sk",
"real_wavefunctions",
"ke_cutoff",
"grid",
"iG",
"n",
"n_min",
"n_max",
"n_avg",
"n_tot",
"n_ideal",
"fft_index",
"fft_block_size",
"mpi_block_size",
"pad_index",
"pad_index_mine",
"division",
"mine",
"real",
)
comm: qp.MPI.Comm #: Basis/bands communicator
comm_kb: qp.MPI.Comm #: Overall k-points and basis/bands communicator
lattice: qp.lattice.Lattice #: Lattice vectors of unit cell
ions: qp.ions.Ions #: Ionic system: implicit in basis for ultrasoft / PAW
kpoints: qp.electrons.Kpoints #: Corresponding k-point set
n_spins: int #: Default number of spin channels
n_spinor: int #: Default number of spinorial components
k: torch.Tensor #: Subset of k handled by this basis (due to MPI division)
wk: torch.Tensor #: Subset of weights corresponding to `k`
w_sk: torch.Tensor #: Combined spin and k-point weights
real_wavefunctions: bool #: Whether wavefunctions are real
ke_cutoff: float #: Kinetic energy cutoff
grid: qp.grid.Grid #: Wavefunction grid (always process-local)
iG: torch.Tensor #: Plane waves in reciprocal lattice coordinates
n: torch.Tensor #: Number of plane waves for each `k`
n_min: int #: Minimum of `n` across all `k` (including on other processes)
n_max: int #: Maximum of `n` across all `k` (including on other processes)
n_avg: float #: Average `n` across all `k` (weighted by `wk`)
n_tot: int #: Actual common `n` stored for each `k` including padding
n_ideal: float #: Ideal `n_avg` based on `ke_cutoff` G-sphere volume
fft_index: torch.Tensor #: Index of each plane wave in reciprocal grid
fft_block_size: int #: Number of bands to FFT together
mpi_block_size: int #: Number of bands to MPI transfer together
PadIndex = Tuple[
slice, torch.Tensor, slice, slice, torch.Tensor
] #: Indexing datatype for `pad_index` and `pad_index_mine`
pad_index: PadIndex #: Which basis entries are padding (beyond `n`)
pad_index_mine: PadIndex #: Subset of `pad_index` on this process
division: qp.utils.TaskDivision #: Division of basis across `rc.comm_b`
mine: slice #: Slice of basis entries local to this process
real: BasisReal #: Extra indices for real wavefunctions
apply_ke = _apply_ke
apply_potential = _apply_potential
collect_density = _collect_density
def __init__(
self,
*,
process_grid: qp.utils.ProcessGrid,
lattice: qp.lattice.Lattice,
ions: qp.ions.Ions,
symmetries: qp.symmetries.Symmetries,
kpoints: qp.electrons.Kpoints,
n_spins: int,
n_spinor: int,
checkpoint_in: qp.utils.CpPath = qp.utils.CpPath(),
ke_cutoff: float = 20.0,
real_wavefunctions: bool = False,
grid: Optional[Union[qp.grid.Grid, dict]] = None,
fft_block_size: int = 0,
mpi_block_size: int = 0,
) -> None:
"""Initialize plane-wave basis with `ke_cutoff`.
Parameters
----------
lattice
Lattice whose reciprocal lattice vectors define plane-wave basis.
ions
Ions that specify the pseudopotential portion of the basis;
the basis implicitly depends on the ion positions for ultrasoft
or PAW due to the augmentation of all operators at each ion.
symmetries
Symmetries with which the wavefunction grid should be commensurate.
kpoints
Set of k-points to initialize basis for. Note that the basis is
only initialized for k-points to be operated on by current process
i.e. for k = kpoints.k[kpoints.i_start : kpoints.i_stop].
n_spins
Default number of spin channels for wavefunctions in this basis.
n_spinor
Default number of spinor components for wavefunctions in this
basis. Also used only to check support for real_wavefunctions.
ke_cutoff
:yaml:`Wavefunction kinetic energy cutoff in Hartrees.`
real_wavefunctions
:yaml:`Whether to use real wavefunctions (instead of complex).`
This is only supported for non-spinorial, Gamma-point-only
calculations, where conjugate symmetry allows real wavefunctions.
grid
:yaml:`Override parameters of grid for wavefunction operations.`
fft_block_size
:yaml:`Number of wavefunction bands to FFT simultaneously.`
Higher numbers require more memory, but can achieve
better occupancy of GPUs or high-core-count CPUs.
The default of 0 auto-selects the block size based on the number
of bands and k-points being processed by each process.
mpi_block_size
:yaml:`Number of wavefunction bands to MPI transfer simultaneously.`
Lower numbers may allow better overlap between computation and transfers,
which is beneficial if MPI implementation supports asynchronous progress
and/or CUDA streams are used to compute asynrchronously.
Higher numbers mitigate MPI latency, but may require more memory.
This number is automatically rounded up to nearest multiple of
`fft_block_size * comm.size`. The default of 0 selects the block size
based on the number of bands and k-points being processed by each process.
"""
super().__init__()
self.comm = process_grid.get_comm("b")
self.comm_kb = process_grid.get_comm("kb")
self.lattice = lattice
self.ions = ions
self.kpoints = kpoints
self.n_spins = n_spins
self.n_spinor = n_spinor
self.fft_block_size = int(fft_block_size)
self.mpi_block_size = int(mpi_block_size)
# Select subset of k-points relevant on this process:
k_mine = slice(kpoints.division.i_start, kpoints.division.i_stop)
self.k = kpoints.k[k_mine]
self.wk = kpoints.wk[k_mine]
w_spin = 2 // (self.n_spins * self.n_spinor) # spin weight
self.w_sk = w_spin * self.wk.view(1, -1, 1) # combined k and spin
# Check real wavefunction support:
self.real_wavefunctions = real_wavefunctions
if self.real_wavefunctions:
if n_spinor == 2:
raise ValueError(
"real-wavefunctions not compatible" " with spinorial calculations"
)
if kpoints.k.norm().item(): # i.e. not all k = 0 (Gamma)
raise ValueError(
"real-wavefunctions only compatible with"
" Gamma-point-only calculations"
)
# Initialize grid to match cutoff:
self.ke_cutoff = float(ke_cutoff)
qp.log.info("\nInitializing wavefunction grid:")
self.add_child(
"grid",
qp.grid.Grid,
grid,
checkpoint_in,
lattice=lattice,
symmetries=symmetries,
comm=None, # Never parallel
ke_cutoff_wavefunction=self.ke_cutoff,
)
# Initialize basis:
self.iG = self.grid.get_mesh("H" if self.real_wavefunctions else "G").view(
-1, 3
)
within_cutoff = self.get_ke() < ke_cutoff # mask of which iG to keep
# --- determine statistics of basis count across all k:
self.n = within_cutoff.count_nonzero(dim=1)
self.n_min = qp.utils.globalreduce.min(self.n, kpoints.comm)
self.n_max = qp.utils.globalreduce.max(self.n, kpoints.comm)
n_procs_b = self.comm.size
self.n_tot = qp.utils.ceildiv(self.n_max, n_procs_b) * n_procs_b
self.n_avg = qp.utils.globalreduce.sum(self.n * self.wk, kpoints.comm)
self.n_ideal = ((2.0 * ke_cutoff) ** 1.5) * lattice.volume / (6 * np.pi ** 2)
qp.log.info(
f"n_basis: min: {self.n_min} max: {self.n_max}"
f" avg: {self.n_avg:.3f} ideal: {self.n_ideal:.3f}"
)
# --- create indices from basis set to FFT grid:
n_fft = self.iG.shape[0] # number of points on FFT grid
assert self.n_tot <= n_fft # make sure padding doesn't exceed grid
fft_range = torch.arange(n_fft, device=qp.rc.device)
self.fft_index = (
torch.where(within_cutoff, 0, n_fft) + fft_range[None, :]
).argsort( # ke<cutoff to front
dim=1
)[
:, : self.n_tot
] # same count all k
self.iG = self.iG[self.fft_index] # basis plane waves for each k
pad_index = torch.where(
fft_range[None, : self.n_tot] >= self.n[:, None]
) # padded entries
self.pad_index = (
slice(None),
pad_index[0],
slice(None),
slice(None),
pad_index[1],
) # add spin, band and spinor dims
# Divide basis on comm_b:
div = qp.utils.TaskDivision(
n_tot=self.n_tot,
n_procs=n_procs_b,
i_proc=self.comm.rank,
name="padded basis",
)
self.division = div
self.mine = slice(div.i_start, div.i_stop)
# --- initialize local pad index separately (not trivially sliceable):
pad_index = torch.where(
fft_range[None, div.i_start : div.i_stop] >= self.n[:, None]
) # local padded entries
self.pad_index_mine = (
slice(None),
pad_index[0],
slice(None),
slice(None),
pad_index[1],
) # add other dims
if self.real_wavefunctions and kpoints.division.n_mine:
self.real = BasisReal(self)
def get_ke(self, basis_slice: slice = slice(None)) -> torch.Tensor:
"""Kinetic energy (KE) of each plane wave in basis in :math:`E_h`
Parameters
----------
basis_slice
Selection of basis functions to get KE for (default: full basis)
Returns
-------
torch.Tensor
KE for each plane-wave, dimensions: `nk_mine` x len(`basis_slice`)
"""
return 0.5 * (
((self.iG[:, basis_slice] + self.k[:, None, :]) @ self.lattice.Gbasis.T)
** 2
).sum(dim=-1)
def get_fft_block_size(self, n_batch: int, n_bands: int) -> int:
"""Number of FFTs to perform together. Equals `fft_block_size`, if that is
non-zero, and uses a heuristic based on batch dimension and number of bands."""
if self.fft_block_size:
block_size = self.fft_block_size
else:
if not (n_batch and n_bands):
return 1 # Irrelevant since no FFTs to perform anyway
# TODO: better heuristics on how much data to FFT at once
min_data = 16_000_000 if qp.rc.use_cuda else 100_000
min_block = qp.utils.ceildiv(min_data, n_batch * np.prod(self.grid.shape))
max_block = qp.utils.ceildiv(n_bands, 16) # based on memory limit
block_size = min(min_block, max_block)
# Report selected block-size once:
if not Basis._fft_block_size_reported:
qp.log.info(f"Selected block-size for band FFT: {block_size}")
Basis._fft_block_size_reported = True
return block_size
def get_mpi_block_size(
self, n_batch: int, n_bands: int, fft_block_size: int
) -> int:
"""Number of bands to MPI transfer together for `collect_density` and
`apply_potential`. Uses `mpi_block_size`, if that is non-zero, and uses a
heuristic based on batch dimension and number of bands. The final number is
coerced to a multiple of `fft_block_size * comm.size` or rounded up to
`n_bands`, if it is already close to that limit."""
if self.mpi_block_size:
mpi_block_size = self.mpi_block_size
else:
if not (n_batch and n_bands):
return 1 # Irrelevant since nothing to transfer anyway
# TODO: better heuristics on how much data to MPI-transfer at once
min_data = 2_000_000 # TODO: incorporate MPI latency info somehow
mpi_block_size = qp.utils.ceildiv(min_data, n_batch * self.n_tot)
# Enforce multiple of fft_block_size * comm.size:
divisor = fft_block_size * self.division.n_procs
mpi_block_size = qp.utils.ceildiv(mpi_block_size, divisor) * divisor
# Round up to n_bands if not enough blocks:
if mpi_block_size * 2 > n_bands:
mpi_block_size = n_bands # no gain in working with <= 2 blocks
# Report selected block-size once for each mode:
if not Basis._mpi_block_size_reported:
qp.log.info(f"Selected block-size for band MPI: {mpi_block_size}")
Basis._mpi_block_size_reported = True
return mpi_block_size
_fft_block_size_reported = False #: Make sure FFT block size reported once
_mpi_block_size_reported = False #: Make sure MPI block size reported once
|
fujiehuang/ecto
|
src/pybindings/cells/And.cpp
|
<reponame>fujiehuang/ecto<gh_stars>10-100
/*
* Copyright (c) 2011, <NAME>, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <NAME>, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <ecto/ecto.hpp>
#include <boost/format.hpp>
namespace ecto
{
namespace bp = boost::python;
struct And
{
static std::string get_input_string(unsigned int i)
{
return str(boost::format("in%i")%(i+1));
}
static void declare_params(tendrils& p)
{
p.declare<unsigned int>("ninput","Number of inputs to AND together",2);
}
static void declare_io(const tendrils& p, tendrils& in, tendrils& out)
{
unsigned int ninput = p.get<unsigned int>("ninput");
//inputs
for(unsigned int i=0; i<ninput; i++){
in.declare<bool>(And::get_input_string(i),"A boolean input to be ANDed with the others",true);
}
//output
out.declare<bool>("out","AND of the inputs");
}
void configure(const tendrils& p, const tendrils& in, const tendrils& out)
{
for(unsigned int i=0; i<in.size(); i++){
inputs_.push_back(in[And::get_input_string(i)]);
}
output_ = out["out"];
}
int process(const tendrils& in, const tendrils& out)
{
*output_ = true;
for(unsigned int i=0; i<inputs_.size(); i++){
*output_ = *output_ && *inputs_[i];
}
return ecto::OK;
}
std::vector<spore<bool> > inputs_;
spore<bool> output_;
};
}
ECTO_CELL(cells, ecto::And, "And", "AND together some number of boolean inputs");
|
area55git/guard
|
vendor/appscode.com/client-go/credential/auth.go
|
package credential
import (
"google.golang.org/grpc/credentials"
)
type AuthType string
const (
Basic AuthType = "Basic"
Bearer AuthType = "Bearer"
)
type Auth struct {
Namespace string
Username string
AuthType AuthType
Secret string
}
func NewBasicAuth(ns, username, secret string) *Auth {
return &Auth{
Namespace: ns,
Username: username,
AuthType: Basic,
Secret: secret,
}
}
func NewBearerAuth(ns, secret string) *Auth {
return &Auth{
Namespace: ns,
AuthType: Bearer,
Secret: secret,
}
}
func (a *Auth) Credential() credentials.PerRPCCredentials {
if a.AuthType == Basic {
return NewBasicAuthCredential(a.Namespace, a.Username, a.Secret)
}
return NewBearerAuthCredential(a.Namespace, a.Secret)
}
|
ariegg/webiopi-drivers
|
chips/sensor/tcs3472x/drivers.py
|
# This code has to be added to the corresponding __init__.py
DRIVERS["tcs3472X"] = ["TCS34721", "TCS34723", "TCS34725", "TCS34727"]
|
waruqi/SakuraEngine
|
SakuraEngine/StaticBuilds/GraphicsInterface/GraphicsCommon/GpuTimer.h
|
/*
* @This File is Part of Sakura by SaeruHikari:
* @Description: Copyright SaeruHikari
* @Version: 0.1.0
* @Autor: SaeruHikari
* @Date: 2020-02-11 00:41:20
* @LastEditors : SaeruHikari
* @LastEditTime : 2020-02-12 12:31:10
*/
#pragma once
#include "Core/CoreMinimal/sinterface.h"
#include "Core/CoreMinimal/SDefination.h"
namespace Sakura::Graphics
{
sinterface CommandContext;
}
namespace Sakura::Graphics::GpuTimeManager
{
void Initialize(uint32 MaxNumTimers = 4096);
void Shutdown();
// Reserve a unique timer index
uint32 NewTimer(void);
/**
* @description: Start a gpu timer with timer index
* @param {CommandContext& context, uint32 timerIdx}
* @return: void
* @author: SaeruHikari
*/
void StartTimer(CommandContext& context, uint32 timerIdx);
/**
* @description: Stop a gpu timer with timer index
* @param {CommandContext& context, uint32 timerIdx}
* @return: void
* @author: SaeruHikari
*/
void StopTimer(CommandContext& context, uint32 timerIdx);
/**
* @description: Bookend all calls to GetTime() with
* Begin/End which correspond to Map/Unmap.
* This needs to happen either at the very start or very end of a frame.
* @param {void}
* @return: void
* @author: SaeruHikari
*/
void BeginReadBack(void);
/**
* @description: Bookend all calls to GetTime() with
* Begin/End which correspond to Map/Unmap.
* This needs to happen either at the very start or very end of a frame.
* @param {void}
* @return: void
* @author: SaeruHikari
*/
void EndReadBack(void);
// Returns the time in milliseconds between start and stop queries
float GetTime(uint32 timerIndex);
}
|
allenlili/vc-language-compiler-by-java
|
src/Parser/vc.java
|
<gh_stars>0
/*
*
*/
package VC.Parser;
import VC.Scanner.Scanner;
import VC.Scanner.SourceFile;
import VC.Parser.Parser;
import VC.TreeDrawer.Drawer;
import VC.TreePrinter.Printer;
import VC.UnParser.UnParser;
import VC.ErrorReporter;
import VC.ASTs.AST;
public class vc {
private static Scanner scanner;
private static ErrorReporter reporter;
private static Parser parser;
private static Drawer drawer;
private static Printer printer;
private static UnParser unparser;
private static int drawingAST = 0;
private static String inputFilename;
private static String VCFilename = null;
private static String ASTFilename = null;
private static AST theAST;
private static void cmdLineOptions() {
System.out.println("Usage: java VC.vc [-options] filename");
System.out.println();
System.out.println("where options include:");
System.out.println(" -ast display the AST (without SourcePosition)");
System.out.println(" -astp display the AST (with SourcePosition)");
System.out.println(" -t file print the AST into <file>");
System.out.println(" -u file unparse the AST into <file>");
System.exit(1);
}
public static void main(String[] args) {
int i = 0;
String arg;
while (i < args.length && args[i].startsWith("-")) {
arg = args[i++];
if (arg.equals("-ast"))
drawingAST = 1;
else if (arg.equals("-astp"))
drawingAST = 2;
else if (arg.equals("-u")) {
if (i < args.length)
VCFilename = args[i++];
else {
System.out.println("[# vc #]: invalid option " + arg);
cmdLineOptions();
}
} else if (arg.equals("-t")) {
if (i < args.length)
ASTFilename = args[i++];
else {
System.out.println("[# vc #]: invalid option " + arg);
cmdLineOptions();
}
}
}
if (i == args.length)
cmdLineOptions();
else
inputFilename = args[i];
// drawingAST = 2;
// inputFilename = "src/VC/Parser/ASS3-TEST0/t5.vc";
System.out.println("======= The VC compiler =======");
SourceFile source = new SourceFile(inputFilename);
reporter = new ErrorReporter();
scanner = new Scanner(source, reporter);
parser = new Parser(scanner, reporter);
if (ASTFilename == null)
ASTFilename = inputFilename + "t";
printer = new Printer(ASTFilename);
if (VCFilename == null)
VCFilename = inputFilename + "u";
unparser = new UnParser(VCFilename);
try {
theAST = parser.parseProgram();
}
catch (Exception e) {
System.out.println("PANIC:");
e.printStackTrace();
System.exit(1);
}
if (reporter.numErrors == 0) {
System.out.println ("Compilation was successful.");
drawer = new Drawer();
if (drawingAST == 2)
drawer.enableDebugging(); // show SourcePosition
if (drawingAST != 0)
drawer.draw(theAST); // draw the AST
printer.print(theAST);
unparser.unparse(theAST);
} else
System.out.println ("Compilation was unsuccessful.");
}
}
|
rueckstiess/react-playground
|
src/components/random-data.js
|
<reponame>rueckstiess/react-playground<filename>src/components/random-data.js<gh_stars>0
import React from 'react'
/**
* Injects random, changing data in its children as `data` prop.
*/
export default React.createClass({
displayName: 'RandomDataGenerator',
getInitialState () {
return {
interval: 1000,
data: [
{ id: 0, name: 'A', value: Math.random() },
{ id: 1, name: 'B', value: Math.random() },
{ id: 2, name: 'C', value: Math.random() },
{ id: 3, name: 'D', value: Math.random() },
{ id: 4, name: 'E', value: Math.random() },
{ id: 5, name: 'F', value: Math.random() },
{ id: 6, name: 'G', value: Math.random() },
{ id: 7, name: 'H', value: Math.random() },
{ id: 8, name: 'I', value: Math.random() },
{ id: 9, name: 'J', value: Math.random() }
]
}
},
componentDidMount () {
setInterval(() => {
if (this.isMounted()) {
this.setState(this.getInitialState())
}
}, this.state.interval)
},
render () {
var children = React.Children.map(this.props.children, (child) => {
return React.cloneElement(child, {data: this.state.data})
})
return (
<div className='randomDataGenerator'>
{children}
</div>
)
}
})
|
mike10004/http-capture
|
http-capture-lib/src/main/java/io/github/mike10004/httpcapture/MultimapCookieCollection.java
|
package io.github.mike10004.httpcapture;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.collect.Multimap;
import com.google.common.collect.Ordering;
import java.util.Comparator;
import java.util.function.Function;
import static java.util.Objects.requireNonNull;
class MultimapCookieCollection implements CookieCollection {
private final ImmutableMultimap<CookieKey, DeserializableCookie> cookies;
private MultimapCookieCollection(Multimap<CookieKey, DeserializableCookie> cookiesByEntry) {
this.cookies = ImmutableMultimap.copyOf(cookiesByEntry);
}
static CookieCollection build(Iterable<DeserializableCookie> cookies) {
return new MultimapCookieCollection(buildCookieKeyMultimap(cookies));
}
@Override
public ImmutableList<DeserializableCookie> makeCookieList(Function<? super CookieKey, Comparator<? super DeserializableCookie>> comparatorFactory) {
requireNonNull(comparatorFactory, "comparator");
ImmutableList.Builder<DeserializableCookie> b = ImmutableList.builder();
cookies.asMap().forEach((key, cookieList) -> {
Comparator<? super DeserializableCookie> comparator = comparatorFactory.apply(key);
b.add(Ordering.from(comparator).max(cookieList));
});
return b.build();
}
private static Multimap<CookieKey, DeserializableCookie> buildCookieKeyMultimap(Iterable<DeserializableCookie> cookies) {
Multimap<CookieKey, DeserializableCookie> updatingMap = ArrayListMultimap.create();
cookies.forEach(cookie -> {
CookieKey key = CookieKey.from(cookie);
updatingMap.put(key, cookie);
});
return updatingMap;
}
}
|
MatheusKraisfeld/sales-api
|
src/main/java/com/github/matheuskraisfeld/exception/PedidoNaoEncontradoException.java
|
package com.github.matheuskraisfeld.exception;
public class PedidoNaoEncontradoException extends RuntimeException {
public PedidoNaoEncontradoException() {
super("Pedido não encontrado.");
}
}
|
njlr/seq
|
src/none.test.js
|
<filename>src/none.test.js
import * as seq from '../src';
test('none works for simple cases', () => {
expect([] |> seq.fromArray |> seq.none()).toEqual(true);
expect([ 1, 2, 3 ] |> seq.fromArray |> seq.none()).toEqual(false);
expect([ 1, 2, 3 ] |> seq.fromArray |> seq.none(x => x < 7)).toEqual(false);
expect([ 1, 2, 3 ] |> seq.fromArray |> seq.none(x => x > 2)).toEqual(false);
expect([ 1, 2, 3 ] |> seq.fromArray |> seq.none(x => x > 5)).toEqual(true);
});
|
shaojiankui/iOS10-Runtime-Headers
|
Frameworks/AVFoundation.framework/AVMutableAudioMixInputParameters.h
|
/* Generated by RuntimeBrowser
Image: /System/Library/Frameworks/AVFoundation.framework/AVFoundation
*/
@interface AVMutableAudioMixInputParameters : AVAudioMixInputParameters {
AVMutableAudioMixInputParametersInternal * _mutableInputParameters;
}
@property (nonatomic, retain) const struct opaqueMTAudioProcessingTap { }*audioTapProcessor;
@property (nonatomic, copy) NSString *audioTimePitchAlgorithm;
@property (nonatomic) int trackID;
+ (id)audioMixInputParameters;
+ (id)audioMixInputParametersWithTrack:(id)arg1;
- (const struct opaqueMTAudioProcessingTap { }*)audioTapProcessor;
- (id)audioTimePitchAlgorithm;
- (void)setAudioTapProcessor:(struct opaqueMTAudioProcessingTap { }*)arg1;
- (void)setAudioTimePitchAlgorithm:(id)arg1;
- (void)setTrackID:(int)arg1;
- (void)setVolume:(float)arg1 atTime:(struct { long long x1; int x2; unsigned int x3; long long x4; })arg2;
- (void)setVolumeRampFromStartVolume:(float)arg1 toEndVolume:(float)arg2 timeRange:(struct { struct { long long x_1_1_1; int x_1_1_2; unsigned int x_1_1_3; long long x_1_1_4; } x1; struct { long long x_2_1_1; int x_2_1_2; unsigned int x_2_1_3; long long x_2_1_4; } x2; })arg3;
- (int)trackID;
@end
|
muffinresearch/solitude
|
lib/bango/tests/test_forms.py
|
import json
import mock
from nose.tools import eq_, ok_
from ..forms import (CreateBankDetailsForm,
CreateBillingConfigurationForm as BillingForm, EventForm,
PriceForm, VatNumberForm)
from .samples import (event_notification, good_bank_details,
good_billing_request)
from lib.sellers.models import Seller, SellerProduct
from lib.transactions import constants
from lib.transactions.models import Transaction
from solitude.base import APITest
@mock.patch('lib.bango.forms.URLField.clean')
class TestBankDetails(APITest):
def setUp(self):
self.bank = good_bank_details.copy()
self.bank['seller_product'] = '/generic/seller/1/'
def test_valid(self, clean):
assert CreateBankDetailsForm(self.bank).is_valid()
def test_missing(self, clean):
del self.bank['bankAccountNumber']
assert not CreateBankDetailsForm(self.bank).is_valid()
def test_iban(self, clean):
del self.bank['bankAccountNumber']
self.bank['bankAccountIban'] = 'foo'
assert CreateBankDetailsForm(self.bank).is_valid()
@mock.patch('lib.bango.forms.URLField.clean')
class TestBilling(APITest):
def setUp(self):
self.billing = good_billing_request.copy()
self.billing['transaction_uuid'] = 'foo'
self.billing['seller_product_bango'] = '/blah/'
def test_form(self, clean):
ok_(PriceForm({'amount': 1, 'currency': 'NZD'}))
def test_billing(self, clean):
ok_(BillingForm(self.billing).is_valid())
def test_no_json(self, clean):
del self.billing['prices']
assert not BillingForm(self.billing).is_valid()
def test_bad_json(self, clean):
self.billing['prices'] = 'blargh'
assert not BillingForm(self.billing).is_valid()
self.billing['prices'] = json.dumps(['foo'])
assert not BillingForm(self.billing).is_valid()
def test_no_prices(self, clean):
self.billing['prices'] = []
form = BillingForm(self.billing)
form.is_valid()
eq_(form.errors['prices'], ['This field is required.'])
def test_price_error(self, clean):
self.billing['prices'] = [{'amount': 1, 'currency': 'FOO'}]
form = BillingForm(self.billing)
form.is_valid()
ok_('Select a valid choice' in form.errors['prices'][0])
def test_iterate(self, clean):
form = BillingForm(self.billing)
form.is_valid()
for price in form.cleaned_data['prices']:
ok_(price.is_valid())
class TestVat(APITest):
def test_delete(self):
form = VatNumberForm({})
ok_(form.is_valid())
eq_(form.bango_data, {})
eq_(form.bango_meta['method'], 'DeleteVATNumber')
def test_change(self):
form = VatNumberForm({'vatNumber': '123'})
ok_(form.is_valid())
eq_(form.bango_data, {'vatNumber': '123'})
eq_(form.bango_meta['method'], 'SetVATNumber')
class TestEvent(APITest):
def test_empty(self):
form = EventForm()
ok_(not form.is_valid())
def test_gunk(self):
form = EventForm({'notification': 'fooo!'})
ok_(not form.is_valid())
def test_wrong_action(self):
sample = event_notification.replace('PAYMENT', 'NOT')
form = EventForm({'notification': sample})
ok_(not form.is_valid())
def test_no_action(self):
sample = event_notification.replace('OK', 'NOT OK')
form = EventForm({'notification': sample})
ok_(not form.is_valid())
def test_no_transaction(self):
form = EventForm({'notification': event_notification})
ok_(not form.is_valid())
def create(self):
# TODO this isn't great.
self.trans_uuid = 'some-transaction-uid'
self.seller = Seller.objects.create(uuid='seller-uuid')
self.product = SellerProduct.objects.create(seller=self.seller,
external_id='xyz')
self.trans = Transaction.objects.create(
amount=1, provider=constants.SOURCE_BANGO,
seller_product=self.product,
uuid=self.trans_uuid,
uid_pay='external-trans-uid'
)
def test_check_good(self):
self.create()
form = EventForm({'notification': event_notification})
ok_(form.is_valid(), form.errors)
def test_wierd(self):
self.create()
self.trans.status = constants.STATUS_CANCELLED
self.trans.save()
form = EventForm({'notification': event_notification})
ok_(not form.is_valid())
|
css4j/echosvg
|
echosvg-ext/src/main/java/io/sf/carte/echosvg/w3c/dom/events/CustomEvent.java
|
<gh_stars>1-10
/*
* Copyright (c) 2006 World Wide Web Consortium,
*
* (Massachusetts Institute of Technology, European Research Consortium for
* Informatics and Mathematics, Keio University). All Rights Reserved. This
* work is distributed under the W3C(r) Software License [1] in the hope that
* it will be useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* [1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
*/
package io.sf.carte.echosvg.w3c.dom.events;
import org.w3c.dom.events.Event;
/**
* The CustomEvent interface is the recommended interface for
* application-specific event types. Unlike the <code>Event</code> interface, it
* allows applications to provide contextual information about the event type.
* Application-specific event types should have an associated namespace to avoid
* clashes with future general-purpose event types.
* <p>
* To create an instance of the <code>CustomEvent</code> interface, use the
* <code>DocumentEvent.createEvent("CustomEvent")</code> method call.
* <p>
* See also the
* <a href='http://www.w3.org/TR/2006/WD-DOM-Level-3-Events-20060413'> Document
* Object Model (DOM) Level 3 Events Specification </a>.
*
* @since DOM Level 3
*/
public interface CustomEvent extends Event {
/**
* Specifies some detail information about the <code>Event</code>.
*/
Object getDetail();
/**
* The <code>initCustomEventNS</code> method is used to initialize the value of
* a <code>CustomEvent</code> object and has the same behavior as
* <code>Event.initEventNS()</code>.
*
* @param namespaceURI Refer to the <code>Event.initEventNS()</code> method for
* a description of this parameter.
* @param typeArg Refer to the <code>Event.initEventNS()</code> method for
* a description of this parameter.
* @param canBubbleArg Refer to the <code>Event.initEventNS()</code> method for
* a description of this parameter.
* @param cancelableArg Refer to the <code>Event.initEventNS()</code> method for
* a description of this parameter.
* @param detailArg Specifies <code>CustomEvent.detail</code>. This value
* may be <code>null</code>.
*/
void initCustomEventNS(String namespaceURI, String typeArg, boolean canBubbleArg, boolean cancelableArg,
Object detailArg);
}
|
lechium/iOS1351Headers
|
System/Library/Frameworks/AVFoundation.framework/AVAssetCustomURLRequest.h
|
<filename>System/Library/Frameworks/AVFoundation.framework/AVAssetCustomURLRequest.h
/*
* This header is generated by classdump-dyld 1.5
* on Friday, April 30, 2021 at 11:35:52 AM Mountain Standard Time
* Operating System: Version 13.5.1 (Build 17F80)
* Image Source: /System/Library/Frameworks/AVFoundation.framework/AVFoundation
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by <NAME>. Updated by <NAME>.
*/
#import <AVFoundation/AVFoundation-Structs.h>
@interface AVAssetCustomURLRequest : NSObject {
unsigned long long _requestID;
CFDictionaryRef _customURLRequest;
CFDictionaryRef _customURLResponse;
}
@property (nonatomic,readonly) unsigned long long requestID; //@synthesize requestID=_requestID - In the implementation block
+(id)requestWithRequest:(CFDictionaryRef)arg1 id:(unsigned long long)arg2 ;
-(id)init;
-(void)dealloc;
-(unsigned long long)requestID;
-(id)initWithRequest:(CFDictionaryRef)arg1 id:(unsigned long long)arg2 ;
-(CFDictionaryRef)customURLRequest;
-(void)setCustomURLRequest:(CFDictionaryRef)arg1 ;
-(CFDictionaryRef)customURLResponse;
-(void)setCustomURLResponse:(CFDictionaryRef)arg1 ;
@end
|
danjac/podbaby
|
ui/test/components/player.js
|
import React from 'react';
import _ from 'lodash';
import TestUtils from 'react-addons-test-utils';
import { assert } from 'chai';
import jsdom from 'jsdom-global';
import Player from '../../components/player';
import { Wrapper } from './utils';
import { makePodcast, makePlayerProps } from './fixtures';
describe('Player component', function () {
before(function () {
this.jsdom = jsdom();
});
after(function () {
this.jsdom();
});
it('should render the truncated podcast title', function () {
const podcast = makePodcast({
name: 'We do cool podcasts each and every day',
title: _.repeat('testing,', 100) });
const totalLength = podcast.name.length + podcast.title.length + 3;
const props = makePlayerProps(podcast);
const component = <Wrapper><Player {...props} /></Wrapper>;
const rendered = TestUtils.renderIntoDocument(component, 'div');
const $title = TestUtils.findRenderedDOMComponentWithTag(rendered, 'b');
const $link = $title.children[0];
const title = $link.getAttribute('title');
assert.equal(title.length, totalLength);
assert.equal($link.textContent.length, 200);
});
});
|
RobinSchmidt/RS-MET-Preliminary
|
Tests/TestsRosicAndRapt/Source/rosic_tests/PortedFromRSLib/RSLib/Core/Files/File.cpp
|
using namespace RSLib;
// static data members:
//const File File::nonExistent;
// construction/destruction:
rsFile::rsFile(const rsString& absolutePath)
{
this->absolutePath = absolutePath;
}
rsFile::~rsFile()
{
}
// inquiry:
long rsFile::getSizeInBytes() const
{
FILE *f = openFile(absolutePath, "rb");
if( f == NULL )
return 0;
else
{
int size;
if( fseek(f, 0, SEEK_END) != 0 ) // fseek returns 0 in case of success
size = 0;
else
size = ftell(f); // ftell returns current position in the file
fclose(f);
return size;
}
}
rsFile rsFile::withExtension(const rsString &extension) const
{
int finalDotIndex1 = absolutePath.rsFindLastOccurrenceOf('.');
int finalDotIndex2 = extension.rsFindLastOccurrenceOf('.');
rsString path;
if( finalDotIndex1 >= 0 )
path = absolutePath.getSubString(0, finalDotIndex1-1);
else
path = absolutePath;
path += rsString(".") + extension.getSubString(finalDotIndex2+1, extension.getLength()-1);
return path;
}
// reading:
rsString rsFile::readFileAsString() const
{
FILE *f = openFile(absolutePath, "rb");
if( f == NULL )
return rsString::empty;
else
{
int length = getSizeInBytes();
char *text = new char[length];
void *voidPointer = (void*) text;
fread(voidPointer, sizeof(char), length, f);
fclose(f);
rsString theString;
theString.ensureAllocatedSize(length);
for(int i=0; i<length; i++)
theString.appendElement(text[i]); // \todo: optimize - append all at once
delete[] text;
return theString;
}
}
char* rsFile::readFileAsZeroTerminatedString(long &lengthExludingZero) const
{
FILE *f = openFile(absolutePath, "rb");
if( f == NULL )
return NULL;
else
{
lengthExludingZero = getSizeInBytes();
char *text = new char[lengthExludingZero+1];
void *voidPointer = (void*) text;
fread(voidPointer, sizeof(char), lengthExludingZero, f);
fclose(f);
text[lengthExludingZero] = '\0'; // we must append the terminating zero manually
return text;
}
}
// writing:
bool rsFile::writeStringToFile(const rsString &text) const
{
FILE *f = openFile(absolutePath, "wb");
if( f == NULL )
return false;
else
{
int length = text.getLength();
char *cString = text.getAsZeroTerminatedString();
fwrite(cString, sizeof(char), length, f);
delete[] cString;
fclose(f);
return true;
}
}
// static functions:
FILE* rsFile::openFile(const rsString& path, const char *mode)
{
char *cString = path.getAsZeroTerminatedString();
FILE *fileHandle = fopen(cString, mode);
delete[] cString;
return fileHandle;
}
|
roscopecoltran/SniperKit-Core
|
.References/src/github.com/reinterpretcat/utymap_generative_3d_map/core/test/builders/terrain/LineGridSplitterTest.cpp
|
<filename>.References/src/github.com/reinterpretcat/utymap_generative_3d_map/core/test/builders/terrain/LineGridSplitterTest.cpp
#include "builders/terrain/LineGridSplitter.hpp"
#include <boost/test/unit_test.hpp>
using namespace ClipperLib;
using namespace utymap::builders;
using namespace utymap::math;
namespace {
typedef std::vector<Vector2> DoublePoints;
typedef std::vector<IntPoint> IntPoints;
const double Precision = 0.1e-9;
}
BOOST_AUTO_TEST_SUITE(Builders_Terrain_LineGridSplitter)
BOOST_AUTO_TEST_CASE(GivenHorizontal_WhenSplitWithIntStep_ThenCanSplit) {
IntPoint start(0, 0);
IntPoint end(10, 0);
LineGridSplitter splitter;
DoublePoints result;
splitter.split(start, end, result);
for (int i = 0; i <= 10; ++i) {
BOOST_CHECK_CLOSE(i, result[i].x, Precision);
BOOST_CHECK_CLOSE(0, result[i].y, Precision);
}
}
BOOST_AUTO_TEST_CASE(GivenVertical_WhenSplitWithIntStep_ThenCanSplit) {
IntPoint start(0, 0);
IntPoint end(0, 10);
LineGridSplitter splitter;
DoublePoints result;
splitter.split(start, end, result);
for (int i = 0; i <= 10; ++i) {
BOOST_CHECK_CLOSE(0, result[i].x, Precision);
BOOST_CHECK_CLOSE(i, result[i].y, Precision);
}
}
BOOST_AUTO_TEST_CASE(Given45Angle_WhenSplitWithIntStep_ThenCanSplit) {
IntPoint start(0, 0);
IntPoint end(-10, 10);
LineGridSplitter splitter;
DoublePoints result;
splitter.split(start, end, result);
for (int i = 0; i <= 10; ++i) {
BOOST_CHECK_CLOSE(-i, result[i].x, Precision);
BOOST_CHECK_CLOSE(i, result[i].y, Precision);
}
}
BOOST_AUTO_TEST_CASE(Given45Angle_WhenSplitWithHighLoD_ThenCanSplit) {
LineGridSplitter splitter;
IntPoint start(0, 0);
IntPoint end(10, 10);
DoublePoints result;
splitter.split(start, end, result);
for (int i = 0; i <= 10; ++i) {
BOOST_CHECK_CLOSE(i, result[i].x, Precision);
BOOST_CHECK_CLOSE(i, result[i].y, Precision);
}
}
// These tests are for some bugs observed for real data
BOOST_AUTO_TEST_CASE(GivenSpecificCase1_WhenSplit_ThenCanSplit) {
LineGridSplitter splitter;
splitter.setParams(1E8, 3);
IntPoint start(5286462881, -916628251);
IntPoint end(5388136261, -968852454);
DoublePoints result;
splitter.split(start, end, result);
for (int i = 0; i < result.size(); ++i) {
BOOST_CHECK_LT(std::abs(result[i].x), 54);
BOOST_CHECK_LT(std::abs(result[i].y), 10);
}
}
// These tests are for some bugs observed for real data
BOOST_AUTO_TEST_CASE(GivenSpecificCase2_WhenSplit_ThenCanSplit) {
LineGridSplitter splitter;
splitter.setParams(1E7, 3);
IntPoint start(-428193799, 626823300);
IntPoint end(-411886999, 634824599);
DoublePoints result;
splitter.split(start, end, result);
BOOST_CHECK_EQUAL(result.size(), 4);
}
BOOST_AUTO_TEST_CASE(GivenSpecificCase3_WhenSplit_ThenDoNotInflateDuplicates) {
LineGridSplitter splitter;
splitter.setParams(10000000, 0.0006103515625);
IntPoints points = {
{133691881, 525218163},
{133693424, 525219786},
{133692010, 525218310}
};
DoublePoints result;
auto lastItemIndex = points.size() - 1;
for (int i = 0; i <= lastItemIndex; i++)
splitter.split(points[i], points[i==lastItemIndex ? 0 : i + 1], result);
// TODO better to check duplicates directly
BOOST_CHECK_EQUAL(result.size(), 6);
}
BOOST_AUTO_TEST_SUITE_END()
|
6923403/C
|
server/linux_socket_high/p21_SIGALRM/lst_timer.h
|
#ifndef LST_TIMER_H
#define LST_TIMER_H
#include <iostream>
#include <unistd.h>
#include <time.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
const int BUFFER_SIZE = 64;
class util_timer;
struct client_data
{
sockaddr_in address;
int sockfd;
char buf[BUFFER_SIZE];
util_timer *timer;
};
class util_timer
{
public:
util_timer() : prev(nullptr), next(nullptr)
{}
time_t expire;
void (*cb_func) (client_data *);
client_data *user_data;
util_timer *prev;
util_timer *next;
};
class sort_timer_lst
{
public:
sort_timer_lst() : head(nullptr), tail(nullptr)
{}
~sort_timer_lst();
void add_timer(util_timer *timer);
void adjust_timer(util_timer *timer);
void del_timer(util_timer *timer);
//SIGALRM信号每次被触发时,就在其信号处理函数中执行一次(如果使用统一事件源,则是主函数),
//以处理链表上到期的任务
void tick();
private:
void add_timer(util_timer *timer, util_timer *lst_head);
util_timer *head;
util_timer *tail;
};
#endif //LST_TIMER_H
|
sho25/activemq
|
activemq-amqp/src/test/java/org/apache/activemq/transport/amqp/JMSParallelConnectTest.java
|
<filename>activemq-amqp/src/test/java/org/apache/activemq/transport/amqp/JMSParallelConnectTest.java<gh_stars>0
begin_unit|revision:0.9.5;language:Java;cregit-version:0.0.1
begin_comment
comment|/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */
end_comment
begin_package
package|package
name|org
operator|.
name|apache
operator|.
name|activemq
operator|.
name|transport
operator|.
name|amqp
package|;
end_package
begin_import
import|import static
name|org
operator|.
name|junit
operator|.
name|Assert
operator|.
name|assertTrue
import|;
end_import
begin_import
import|import
name|java
operator|.
name|util
operator|.
name|concurrent
operator|.
name|ExecutorService
import|;
end_import
begin_import
import|import
name|java
operator|.
name|util
operator|.
name|concurrent
operator|.
name|Executors
import|;
end_import
begin_import
import|import
name|java
operator|.
name|util
operator|.
name|concurrent
operator|.
name|TimeUnit
import|;
end_import
begin_import
import|import
name|javax
operator|.
name|jms
operator|.
name|Connection
import|;
end_import
begin_import
import|import
name|javax
operator|.
name|jms
operator|.
name|JMSException
import|;
end_import
begin_import
import|import
name|org
operator|.
name|junit
operator|.
name|Test
import|;
end_import
begin_class
specifier|public
class|class
name|JMSParallelConnectTest
extends|extends
name|AmqpTestSupport
block|{
annotation|@
name|Override
specifier|protected
name|boolean
name|isUseTcpConnector
parameter_list|()
block|{
return|return
literal|true
return|;
block|}
annotation|@
name|Override
specifier|protected
name|boolean
name|isUseSslConnector
parameter_list|()
block|{
return|return
literal|true
return|;
block|}
annotation|@
name|Override
specifier|protected
name|boolean
name|isUseNioConnector
parameter_list|()
block|{
return|return
literal|true
return|;
block|}
annotation|@
name|Override
specifier|protected
name|boolean
name|isUseNioPlusSslConnector
parameter_list|()
block|{
return|return
literal|true
return|;
block|}
annotation|@
name|Test
argument_list|(
name|timeout
operator|=
literal|60000
argument_list|)
specifier|public
name|void
name|testParallelConnectPlain
parameter_list|()
throws|throws
name|Exception
block|{
specifier|final
name|int
name|numThreads
init|=
literal|40
decl_stmt|;
name|ExecutorService
name|executorService
init|=
name|Executors
operator|.
name|newFixedThreadPool
argument_list|(
name|numThreads
argument_list|)
decl_stmt|;
for|for
control|(
name|int
name|i
init|=
literal|0
init|;
name|i
operator|<
name|numThreads
condition|;
name|i
operator|++
control|)
block|{
name|executorService
operator|.
name|execute
argument_list|(
operator|new
name|Runnable
argument_list|()
block|{
annotation|@
name|Override
specifier|public
name|void
name|run
parameter_list|()
block|{
try|try
block|{
name|Connection
name|connection
init|=
name|JMSClientContext
operator|.
name|INSTANCE
operator|.
name|createConnection
argument_list|(
name|amqpURI
argument_list|,
literal|"admin"
argument_list|,
literal|"password"
argument_list|)
decl_stmt|;
name|connection
operator|.
name|start
argument_list|()
expr_stmt|;
name|connection
operator|.
name|close
argument_list|()
expr_stmt|;
block|}
catch|catch
parameter_list|(
name|JMSException
name|e
parameter_list|)
block|{
name|e
operator|.
name|printStackTrace
argument_list|()
expr_stmt|;
block|}
block|}
block|}
argument_list|)
expr_stmt|;
block|}
name|executorService
operator|.
name|shutdown
argument_list|()
expr_stmt|;
name|assertTrue
argument_list|(
literal|"executor done on time"
argument_list|,
name|executorService
operator|.
name|awaitTermination
argument_list|(
literal|30
argument_list|,
name|TimeUnit
operator|.
name|SECONDS
argument_list|)
argument_list|)
expr_stmt|;
block|}
annotation|@
name|Test
argument_list|(
name|timeout
operator|=
literal|60000
argument_list|)
specifier|public
name|void
name|testParallelConnectNio
parameter_list|()
throws|throws
name|Exception
block|{
specifier|final
name|int
name|numThreads
init|=
literal|40
decl_stmt|;
name|ExecutorService
name|executorService
init|=
name|Executors
operator|.
name|newFixedThreadPool
argument_list|(
name|numThreads
argument_list|)
decl_stmt|;
for|for
control|(
name|int
name|i
init|=
literal|0
init|;
name|i
operator|<
name|numThreads
condition|;
name|i
operator|++
control|)
block|{
name|executorService
operator|.
name|execute
argument_list|(
operator|new
name|Runnable
argument_list|()
block|{
annotation|@
name|Override
specifier|public
name|void
name|run
parameter_list|()
block|{
try|try
block|{
name|Connection
name|connection
init|=
name|JMSClientContext
operator|.
name|INSTANCE
operator|.
name|createConnection
argument_list|(
name|amqpNioURI
argument_list|,
literal|"admin"
argument_list|,
literal|"password"
argument_list|)
decl_stmt|;
name|connection
operator|.
name|start
argument_list|()
expr_stmt|;
name|connection
operator|.
name|close
argument_list|()
expr_stmt|;
block|}
catch|catch
parameter_list|(
name|JMSException
name|e
parameter_list|)
block|{
name|e
operator|.
name|printStackTrace
argument_list|()
expr_stmt|;
block|}
block|}
block|}
argument_list|)
expr_stmt|;
block|}
name|executorService
operator|.
name|shutdown
argument_list|()
expr_stmt|;
name|assertTrue
argument_list|(
literal|"executor done on time"
argument_list|,
name|executorService
operator|.
name|awaitTermination
argument_list|(
literal|30
argument_list|,
name|TimeUnit
operator|.
name|SECONDS
argument_list|)
argument_list|)
expr_stmt|;
block|}
annotation|@
name|Test
argument_list|(
name|timeout
operator|=
literal|60000
argument_list|)
specifier|public
name|void
name|testParallelConnectSsl
parameter_list|()
throws|throws
name|Exception
block|{
specifier|final
name|int
name|numThreads
init|=
literal|40
decl_stmt|;
name|ExecutorService
name|executorService
init|=
name|Executors
operator|.
name|newFixedThreadPool
argument_list|(
name|numThreads
argument_list|)
decl_stmt|;
for|for
control|(
name|int
name|i
init|=
literal|0
init|;
name|i
operator|<
name|numThreads
condition|;
name|i
operator|++
control|)
block|{
name|executorService
operator|.
name|execute
argument_list|(
operator|new
name|Runnable
argument_list|()
block|{
annotation|@
name|Override
specifier|public
name|void
name|run
parameter_list|()
block|{
try|try
block|{
name|Connection
name|connection
init|=
name|JMSClientContext
operator|.
name|INSTANCE
operator|.
name|createConnection
argument_list|(
name|amqpSslURI
argument_list|,
literal|"admin"
argument_list|,
literal|"password"
argument_list|)
decl_stmt|;
name|connection
operator|.
name|start
argument_list|()
expr_stmt|;
name|connection
operator|.
name|close
argument_list|()
expr_stmt|;
block|}
catch|catch
parameter_list|(
name|JMSException
name|e
parameter_list|)
block|{
name|e
operator|.
name|printStackTrace
argument_list|()
expr_stmt|;
block|}
block|}
block|}
argument_list|)
expr_stmt|;
block|}
name|executorService
operator|.
name|shutdown
argument_list|()
expr_stmt|;
name|assertTrue
argument_list|(
literal|"executor done on time"
argument_list|,
name|executorService
operator|.
name|awaitTermination
argument_list|(
literal|30
argument_list|,
name|TimeUnit
operator|.
name|SECONDS
argument_list|)
argument_list|)
expr_stmt|;
block|}
annotation|@
name|Test
argument_list|(
name|timeout
operator|=
literal|60000
argument_list|)
specifier|public
name|void
name|testParallelConnectNioPlusSsl
parameter_list|()
throws|throws
name|Exception
block|{
specifier|final
name|int
name|numThreads
init|=
literal|40
decl_stmt|;
name|ExecutorService
name|executorService
init|=
name|Executors
operator|.
name|newFixedThreadPool
argument_list|(
name|numThreads
argument_list|)
decl_stmt|;
for|for
control|(
name|int
name|i
init|=
literal|0
init|;
name|i
operator|<
name|numThreads
condition|;
name|i
operator|++
control|)
block|{
name|executorService
operator|.
name|execute
argument_list|(
operator|new
name|Runnable
argument_list|()
block|{
annotation|@
name|Override
specifier|public
name|void
name|run
parameter_list|()
block|{
try|try
block|{
name|Connection
name|connection
init|=
name|JMSClientContext
operator|.
name|INSTANCE
operator|.
name|createConnection
argument_list|(
name|amqpNioPlusSslURI
argument_list|,
literal|"admin"
argument_list|,
literal|"password"
argument_list|)
decl_stmt|;
name|connection
operator|.
name|start
argument_list|()
expr_stmt|;
name|connection
operator|.
name|close
argument_list|()
expr_stmt|;
block|}
catch|catch
parameter_list|(
name|JMSException
name|e
parameter_list|)
block|{
name|e
operator|.
name|printStackTrace
argument_list|()
expr_stmt|;
block|}
block|}
block|}
argument_list|)
expr_stmt|;
block|}
name|executorService
operator|.
name|shutdown
argument_list|()
expr_stmt|;
name|assertTrue
argument_list|(
literal|"executor done on time"
argument_list|,
name|executorService
operator|.
name|awaitTermination
argument_list|(
literal|30
argument_list|,
name|TimeUnit
operator|.
name|SECONDS
argument_list|)
argument_list|)
expr_stmt|;
block|}
block|}
end_class
end_unit
|
q197585312/testApp
|
Afb88/src/main/java/com/nanyang/app/main/home/sport/basketball/BasketballMixState.java
|
package com.nanyang.app.main.home.sport.basketball;
import android.view.View;
import android.widget.TextView;
import com.nanyang.app.MenuItemInfo;
import com.nanyang.app.R;
import com.nanyang.app.main.home.sport.main.SportAdapterHelper;
import com.nanyang.app.main.home.sport.main.SportContract;
import com.nanyang.app.main.home.sport.main.SportState;
import com.nanyang.app.main.home.sport.model.LeagueBean;
import com.nanyang.app.main.home.sport.model.TableSportInfo;
import com.nanyang.app.main.home.sportInterface.BallItemCallBack;
import com.nanyang.app.main.home.sportInterface.IAdapterHelper;
import com.nanyang.app.main.home.sportInterface.IBetHelper;
import com.unkonw.testapp.libs.utils.ToastUtils;
import com.unkonw.testapp.training.ScrollLayout;
import org.json.JSONArray;
import org.json.JSONException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import cn.finalteam.toolsfinal.DeviceUtils;
/**
* Created by Administrator on 2017/3/10.
*/
public abstract class BasketballMixState extends SportState<BasketballMixInfo, SportContract.View<BasketballMixInfo>> {
protected Map<String, Map<String, Boolean>> localCollectionMap = new HashMap<>();
private boolean isCollection;
public BasketballMixState(SportContract.View baseView) {
super(baseView);
}
public boolean isCollection() {
return isCollection;
}
public boolean collection() {
isCollection = !isCollection;
initAllData(allData);
return isCollection;
}
@Override
public IAdapterHelper<BasketballMixInfo> onSetAdapterHelper() {
return new BasketballAdapterHelper(getBaseView().getContextActivity());
}
@Override
protected SportAdapterHelper.ItemCallBack onSetItemCallBack() {
return new BallItemCallBack<BasketballMixInfo>(baseRecyclerAdapter) {
@Override
public boolean isItemCollection(BasketballMixInfo item) {
return false;
}
@Override
public void clickOdds(TextView v, BasketballMixInfo item, String type, boolean isHf, String odds) {
IBetHelper<BasketballMixInfo> helper = onSetBetHelper();
helper.setCompositeSubscription(mCompositeSubscription);
helper.clickOdds(item, type, odds, v, isHf,"");
}
@Override
public void clickView(View v, BasketballMixInfo item,int position) {
}
@Override
public ScrollLayout onSetHeaderFollower() {
return getBaseView().onSetScrollHeader();
}
};
}
@Override
protected IBetHelper<BasketballMixInfo> onSetBetHelper() {
return new BasketballMixBetHelper(getBaseView());
}
@Override
protected List<TableSportInfo<BasketballMixInfo>> updateJsonData(JSONArray dataListArray) throws JSONException {
ArrayList<TableSportInfo<BasketballMixInfo>> tableModules = new ArrayList<>();
if (dataListArray.length() > 0) {
for (int i = 0; i < dataListArray.length(); i++) {
LeagueBean leagueBean;
List<BasketballMixInfo> matchList = new ArrayList<>();
JSONArray jsonArray3 = dataListArray.getJSONArray(i);
if (jsonArray3.length() > 1) {
JSONArray LeagueArray = jsonArray3.getJSONArray(0);
if (LeagueArray.length() > 1) {
leagueBean = new LeagueBean(LeagueArray.get(0).toString(), LeagueArray.getString(1));
} else {
continue;
}
JSONArray LeagueMatchArray = jsonArray3.getJSONArray(1);
if (LeagueMatchArray.length() > 0) {
for (int j = 0; j < LeagueMatchArray.length(); j++) {
JSONArray matchArray = LeagueMatchArray.getJSONArray(j);
matchList.add(parseMatch(matchArray));
}
} else {
continue;
}
tableModules.add(new TableSportInfo<>(leagueBean, matchList));
}
}
}
return tableModules;
}
private BasketballMixInfo parseMatch(JSONArray matchArray) throws JSONException {
BasketballMixInfo info = new BasketballMixInfo();
info.setSocOddsId(matchArray.opt(0)==null?"":matchArray.getString(0));
info.setHasRunning(matchArray.opt(1)==null?"":matchArray.getString(1));
info.setLive(matchArray.opt(2)==null?"":matchArray.getString(2));
info.setIsLastCall(matchArray.opt(3)==null?"":matchArray.getString(3));
info.setMatchDate(matchArray.opt(4)==null?"":matchArray.getString(4));
info.setIsHomeGive(matchArray.opt(5)==null?"":matchArray.getString(5));
info.setHome(matchArray.opt(6)==null?"":matchArray.getString(6));
info.setAway(matchArray.opt(7)==null?"":matchArray.getString(7));
info.setIsInetBet(matchArray.opt(8)==null?"":matchArray.getString(8));
info.setHasHdp(matchArray.opt(9)==null?"":matchArray.getString(9));
info.setHdp(matchArray.opt(10)==null?"":matchArray.getString(10));
info.setHdpOdds(matchArray.opt(11)==null?"":matchArray.getString(11));
info.setHomeHdpOdds(matchArray.opt(12)==null?"":matchArray.getString(12));
info.setAwayHdpOdds(matchArray.opt(13)==null?"":matchArray.getString(13));
info.setHasOU(matchArray.opt(14)==null?"":matchArray.getString(14));
info.setOU(matchArray.opt(15)==null?"":matchArray.getString(15));
info.setIsHdpNew(matchArray.opt(16)==null?"":matchArray.getString(16));
info.setIsOUNew(matchArray.opt(17)==null?"":matchArray.getString(17));
info.setOUOdds(matchArray.opt(18)==null?"":matchArray.getString(18));
info.setOverOdds(matchArray.opt(19)==null?"":matchArray.getString(19));
info.setUnderOdds(matchArray.opt(20)==null?"":matchArray.getString(20));
info.setIsOENew(matchArray.opt(21)==null?"":matchArray.getString(21));
info.setHasOE(matchArray.opt(22)==null?"":matchArray.getString(22));
info.setOEOdds(matchArray.opt(23)==null?"":matchArray.getString(23));
info.setOddOdds(matchArray.opt(24)==null?"":matchArray.getString(24));
info.setEvenOdds(matchArray.opt(25)==null?"":matchArray.getString(25));
info.setIsX12New(matchArray.opt(26)==null?"":matchArray.getString(26));
info.setHasX12(matchArray.opt(27)==null?"":matchArray.getString(27));
info.setX12_1Odds(matchArray.opt(28)==null?"":matchArray.getString(28));
info.setX12_XOdds(matchArray.opt(29)==null?"":matchArray.getString(29));
info.setX12_2Odds(matchArray.opt(30)==null?"":matchArray.getString(30));
info.setPreSocOddsId(matchArray.opt(31)==null?"":matchArray.getString(31));
return info;
}
@Override
protected List<TableSportInfo<BasketballMixInfo>> filterChildData(List<TableSportInfo<BasketballMixInfo>> allData) {
if (isCollection())
return filterCollection(allData);
else
return allData;
}
private List<TableSportInfo<BasketballMixInfo>> filterCollection(List<TableSportInfo<BasketballMixInfo>> data) {
List<TableSportInfo<BasketballMixInfo>> moduleDate = new ArrayList<>();
for (TableSportInfo<BasketballMixInfo> tableModuleBean : data) {
if (null != localCollectionMap.get(tableModuleBean.getLeagueBean().getModuleTitle())) {
List<BasketballMixInfo> moduleCollectionRows = new ArrayList<>();
TableSportInfo<BasketballMixInfo> moduleCollection = new TableSportInfo<BasketballMixInfo>(tableModuleBean.getLeagueBean(), moduleCollectionRows);
Map<String, Boolean> moduleMap = localCollectionMap.get(tableModuleBean.getLeagueBean().getModuleTitle());
for (BasketballMixInfo matchBean : tableModuleBean.getRows()) {
if (moduleMap.get(matchBean.getHome() + "+" + matchBean.getAway()) != null && moduleMap.get(matchBean.getHome() + "+" + matchBean.getAway())) {
moduleCollectionRows.add(matchBean);
}
}
moduleCollection.setRows(moduleCollectionRows);
moduleDate.add(moduleCollection);
}
}
if (moduleDate.size() > 0)
return moduleDate;
else {
isCollection = false;
ToastUtils.showShort(R.string.no_records);
}
return moduleDate;
}
@Override
protected int getIndexSocOddsId() {
return 0;
}
@Override
protected int getIndexPreSocOddsId() {
return 31;
}
@Override
protected List<MenuItemInfo> getTypes() {
List<MenuItemInfo> types = new ArrayList<>();
types.add(new MenuItemInfo(1, getBaseView().getContextActivity().getString(R.string.Today), "Today"));
types.add(new MenuItemInfo(1, getBaseView().getContextActivity().getString(R.string.Early), "Early"));
return types;
}
@Override
public boolean mix() {
clearMix();
return false;
}
@Override
public boolean isMix() {
return true;
}
@Override
protected List<List<String>> initHeaderList() {
List<List<String>> lists = super.initHeaderList();
lists.get(1).set(0,getBaseView().getContextActivity().getString(R.string.TO_WIN));
lists.get(1).set(1,getBaseView().getContextActivity().getString(R.string.FULL_O_E));
return lists;
}
@Override
public void setScrollHeaderContent(ScrollLayout slHeader, TextView tvAos) {
super.setScrollHeaderContent(slHeader, tvAos);
tvAos.setVisibility(View.GONE);
slHeader.getLayoutParams().width= DeviceUtils.dip2px(getBaseView().getContextActivity(),140);
}
}
|
bashimao/ltudl
|
blaze/src/main/scala/edu/latrobe/blaze/Module.scala
|
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 <NAME> (<EMAIL>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze
import edu.latrobe._
import edu.latrobe.io.graph._
import edu.latrobe.time._
import scala.collection._
import scala.util.hashing._
import spire.implicits._
/**
* Named objects that support binding and updating.
*
* Common base for layers and models.
*
* doFunctions are special. They only operate in the context of the
* current object. They should not be called directly unless you
* know what you do.
*/
abstract class Module
extends InstanceEx[ModuleBuilder]
with ParameterizedInstance {
/**
* Must be overwritten with a constructor argument.
*/
def inputHints
: BuildHints
final val inputLayoutHint
: TensorLayout = inputHints.layout
final val inputSizeHint
: Size = inputLayoutHint.size
/**
* Can be implemented arbitrarily. But lazy val is probably your friend here
* because this is touched during each build and it should somewhat not
* depend on too much other stuff. (see implementation for Layer, to see why)
*/
def outputHints
: BuildHints
/**
* Must be overwritten with a constructor argument.
*/
def weightBufferBuilder
: ValueTensorBufferBuilder
final val handle
: String = builder.handle
// ---------------------------------------------------------------------------
// Statistics
// ---------------------------------------------------------------------------
/**
* Should be implemented as a lazy val.
*
* @return Number of neurons in module.
*/
def noNeurons
: Long
// ---------------------------------------------------------------------------
// Weights related
// ---------------------------------------------------------------------------
/**
* Should be implemented as a lazy val.
*/
@transient
final lazy val weightBuffer
: ValueTensorBuffer = weightBufferBuilder.result()
/**
* Typically you want to override this with a lazy val.
*/
def weightReferences
: Set[LabeledBufferReference]
/**
* Reinitializes the model.
*/
def reset(initializer: Initializer)
: Unit
/**
* Synchronizes any native hardware buffers and JVM buffers.
*/
def refresh()
: Unit
final def extractWeights()
: Array[Array[Real]] = {
val n = noNeurons
require(n < Int.MaxValue)
ArrayEx.tabulate(n.toInt)(extractWeightsFor(_))
}
def extractWeightsFor(neuronNo
: Long): Array[Real]
final def extractWeightsFor(neuronNos: Seq[Long])
: SortedMap[Long, Array[Real]] = {
val builder = SortedMap.newBuilder[Long, Array[Real]]
neuronNos.foreach(
neuronNo => builder += Tuple2(neuronNo, extractWeightsFor(neuronNo))
)
builder.result()
}
// ---------------------------------------------------------------------------
// Traversal related.
// ---------------------------------------------------------------------------
/**
* Traverses forward through all modules.
*
* @param callbackFn Executed for each encountered module.
*/
final def touch(callbackFn: Module => Unit)
: Unit = {
callbackFn(this)
doTouch(callbackFn)
}
protected def doTouch(callbackFn: Module => Unit)
: Unit
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
/**
* Modules that require access to their original input throughout the entire
* forward propagation cycle should set this to true. This is only relevant
* for containers.
*/
def requiresMaintainingInputDuringForwardPropagation
: Boolean
/**
* Predicts output activations based on the current state of the element.
*
* @param input [in] Input activations.
* @return Estimated output activations.
*/
final def predict(mode: Mode,
input: Tensor,
reference: Tensor)
: BackpropagationContext = predict(
mode,
input,
reference,
idleOnEnterPredict,
idleOnLeavePredict
)
/**
* Predicts output activations based on the current state of the element and
* executes a callback.
*
* @param input [in] Input activations.
* @param onLeave Called for each prediction of a sub-module.
* @return Processed output activations.
*/
final def predict(mode: Mode,
input: Tensor,
reference: Tensor,
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: BackpropagationContext = {
var value = Real.zero
val bpBuilder = BackpropagationContext.newBuilder(input, reference)
def onEnterEx(module: Module,
input: Tensor,
reference: Tensor)
: Boolean = {
// Block the tensor if it is required throughout the BPROP or FPROP.
if (module.requiresMaintainingInputDuringForwardPropagation) {
bpBuilder.block(input)
}
else {
bpBuilder.block(null)
}
// Allow in-place modification if not blocked and not used somewhere else.
var inPlaceAllowed = onEnter(module, input, reference)
if (inPlaceAllowed) {
inPlaceAllowed = !bpBuilder.isBlocked(input)
}
if (inPlaceAllowed) {
inPlaceAllowed = !bpBuilder.requiresMaintaining(input)
}
inPlaceAllowed
}
def onLeaveEx(module: Module,
input: Tensor,
reference: Tensor,
output: Tensor,
context: PredictContext)
: Unit = {
// Update cost, call leave callback and unblock top-most input tensor.
value += module.doEvaluate(input, reference, output, context)
onLeave(module, input, reference, output, context)
bpBuilder.unblock()
// Add tensors we need for backprop to the list.
// INP
var needInp = false
// If backpropagation is supported in the selected mode.
if (mode.supportsBackpropagation) {
module.backpropagationRequirementsForInput match {
case TensorDependency.Required =>
// If required for backpropagation by the current module and the mode of operation
needInp = true
case _ =>
}
}
if (!needInp) {
// However, if another module requires this tensor later or if we depend somehow on a tensor that must be kept.
needInp = bpBuilder.requiresMaintaining(input)
}
bpBuilder.stash(if (needInp) input else null)
if (needInp && logger.isTraceEnabled) {
val outSize = StringEx.render(input.layout.noValues * Real.size, 1024)
val totSize = bpBuilder.stashSize.mapValues(
noValues => StringEx.render(noValues * Real.size, 1024)
)
logger.trace(s"Stashed input! $module, ${input.platform} + $outSize | $totSize")
}
// REF
bpBuilder.stash(reference)
// OUT
// If backpropagation is supported in the selected mode.
var needOut = false
if (mode.supportsBackpropagation) {
needOut = module.backpropagationRequirementsForOutput match {
case TensorDependency.Required =>
true
case TensorDependency.NotRequired =>
false
case _ =>
!needInp
}
}
bpBuilder.stash(if (needOut) output else null)
if (needOut && logger.isTraceEnabled) {
val outSize = StringEx.render(input.layout.noValues * Real.size, 1024)
val totSize = bpBuilder.stashSize.mapValues(
noValues => StringEx.render(noValues * Real.size, 1024)
)
logger.trace(s"Stashed output! $module, ${output.platform} + $outSize | $totSize")
}
// Deallocate input tensors that we no longer need.
if (!input.dependsOn(output)) {
if (!bpBuilder.isBlocked(input)) {
if (!bpBuilder.requiresMaintaining(input)) {
input.close()
}
}
}
// CTX
bpBuilder.stash(context)
}
val output = predictEx(
mode,
input, reference,
onEnterEx, onLeaveEx
)
bpBuilder.result(mode, output, value)
}
final def predict(mode: Mode,
batch: Batch)
: BackpropagationContext = {
predict(
mode,
batch,
idleOnEnterPredict,
idleOnLeavePredict
)
}
final def predict(mode: Mode,
batches: Traversable[Batch])
: Traversable[BackpropagationContext] = {
batches.map(
predict(mode, _)
)
}
final def predict(mode: Mode,
batches: Iterable[Batch])
: Iterable[BackpropagationContext] = {
batches.map(
predict(mode, _)
)
}
final def predict(mode: Mode,
batches: Seq[Batch])
: Seq[BackpropagationContext] = {
batches.map(
predict(mode, _)
)
}
final def predict(mode: Mode,
batches: IndexedSeq[Batch])
: IndexedSeq[BackpropagationContext] = {
batches.map(
predict(mode, _)
)
}
final def predict(mode: Mode,
batches: Array[Batch])
: Array[BackpropagationContext] = {
ArrayEx.map(
batches
)(predict(mode, _))
}
final def predict(mode: Mode,
batch: Batch,
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: BackpropagationContext = {
predict(
mode,
batch.input,
batch.output,
onEnter,
onLeave
)
}
final def predict(mode: Mode,
batches: Traversable[Batch],
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: Traversable[BackpropagationContext] = {
batches.map(
batch => predict(
mode,
batch.input,
batch.output,
onEnter,
onLeave
)
)
}
final def predict(mode: Mode,
batches: Iterable[Batch],
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: Iterable[BackpropagationContext] = {
batches.map(batch => predict(
mode,
batch.input,
batch.output,
onEnter,
onLeave
))
}
final def predict(mode: Mode,
batches: Seq[Batch],
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: Seq[BackpropagationContext] = {
batches.map(batch => predict(
mode,
batch.input,
batch.output,
onEnter,
onLeave
))
}
final def predict(mode: Mode,
batches: IndexedSeq[Batch],
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: IndexedSeq[BackpropagationContext] = {
batches.map(batch => predict(
mode,
batch.input,
batch.output,
onEnter,
onLeave
))
}
final def predict(mode: Mode,
batches: Array[Batch],
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: Array[BackpropagationContext] = {
ArrayEx.map(
batches
)(batch => predict(mode, batch.input, batch.output, onEnter, onLeave))
}
// TODO: Find better solution for this.
final protected[blaze] def predictEx(mode: Mode,
input: Tensor,
reference: Tensor,
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: Tensor = {
val clock = if (logger.isTraceEnabled) Stopwatch() else null
val inPlaceAllowed = onEnter(this, input, reference)
val (output, context) = doPredict(
mode,
inPlaceAllowed, input, reference,
onEnter, onLeave
)
onLeave(this, input, reference, output, context)
if (clock != null) {
val ipa = if (inPlaceAllowed) "ipa" else " "
logger.trace(
f"$clock%s => predict($ipa, ${input.platform}%-4s) => $this%s"
)
}
output
}
/**
* Predicts output activations based on the current state of the element and
* executes a callback.
*
* @param input [in] Input activations.
* @param onEnter Called for each prediction of a sub-module.
* @return Processed output activations.
*/
protected def doPredict(mode: Mode,
inPlaceAllowed: Boolean,
input: Tensor,
reference: Tensor,
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: (Tensor, PredictContext)
final def project(mode: Mode, batch: Batch)
: Batch = {
val prediction = predict(mode, batch).dropIntermediates()
batch.derive(prediction.output)
}
final def predictInv(context: BackpropagationContext)
: Tensor = predictInv(context, idleOnLeavePredict)
final def predictInv(contexts: Traversable[BackpropagationContext])
: Traversable[Tensor] = contexts.map(predictInv)
final def predictInv(contexts: Iterable[BackpropagationContext])
: Iterable[Tensor] = contexts.map(predictInv)
final def predictInv(contexts: Seq[BackpropagationContext])
: Seq[Tensor] = contexts.map(predictInv)
final def predictInv(contexts: IndexedSeq[BackpropagationContext])
: IndexedSeq[Tensor] = contexts.map(predictInv)
final def predictInv(contexts: Array[BackpropagationContext])
: Array[Tensor] = ArrayEx.map(contexts)(predictInv)
/**
* Predicts input activations based on the current state of the layer.
* (not possible with all layer types)
*
* @param context [in] Output activations.
* @return Estimated input activations.
*/
final def predictInv(context: BackpropagationContext,
onLeave: OnLeavePredict)
: Tensor = {
def onLeaveEx(module: Module,
input: Tensor,
reference: Tensor,
_output: Tensor,
context: PredictContext)
: Unit = {
onLeave(module, input, reference, _output, context)
// TODO: Does not really work correctly if using tables.
if ((_output ne input) && (_output ne _output)) {
input.close()
}
}
val _contexts = mutable.Stack.concat(context.contexts)
predictInvEx(context.output, onLeaveEx, _contexts)
}
final def predictInv(contexts: Traversable[BackpropagationContext],
onLeave: OnLeavePredict)
: Traversable[Tensor] = contexts.map(predictInv(_, onLeave))
final def predictInv(contexts: Iterable[BackpropagationContext],
onLeave: OnLeavePredict)
: Iterable[Tensor] = contexts.map(predictInv(_, onLeave))
final def predictInv(contexts: Seq[BackpropagationContext],
onLeave: OnLeavePredict)
: Seq[Tensor] = contexts.map(predictInv(_, onLeave))
final def predictInv(contexts: IndexedSeq[BackpropagationContext],
onLeave: OnLeavePredict)
: IndexedSeq[Tensor] = contexts.map(predictInv(_, onLeave))
final def predictInv(contexts: Array[BackpropagationContext],
onLeave: OnLeavePredict)
: Array[Tensor] = ArrayEx.map(contexts)(predictInv(_, onLeave))
final protected[blaze] def predictInvEx(output: Tensor,
onLeave: OnLeavePredict,
contexts: mutable.Stack[PredictContext])
: Tensor = {
val clock = if (logger.isTraceEnabled) Stopwatch() else null
val context = contexts.pop()
val input = doPredictInv(output, context, onLeave, contexts)
onLeave(this, input, null, output, context)
if (clock != null) {
logger.trace(f"$clock%s => predictInv(${output.platform}%-4s) => $this%s")
}
input
}
/**
* Performs inverse prediction (output to input). Not possible for all modules.
*
* @param output [in] Output activations.
* @param onLeave [in] Function to be called after each prediction.
* @return Input activations.
*/
protected def doPredictInv(output: Tensor,
context: PredictContext,
onLeave: OnLeavePredict,
contexts: mutable.Stack[PredictContext])
: Tensor
/*
// Start with what we have.
var contexts = prediction.contexts
// Traverse backward through layers.
var out = prediction.output
traverseBackward(
mode,
module => {
// Get context.
val ctx = contexts.head; contexts = contexts.tail
// Do a local prediction.
val inp = module.doPredictInv(mode, out, ctx)
if (callback != null) {
callback(this, mode, inp, out, ctx)
}
// Deallocate output if:
// 1. It is not identical to the input.
// 2. It was not the original output.
if (!(out eq inp) && !(out eq prediction.output)) {
out.deallocate()
}
// Next!
out = inp
},
module => {},
() => throw new UnsupportedOperationException,
() => throw new UnsupportedOperationException,
() => throw new UnsupportedOperationException,
() => throw new UnsupportedOperationException
)
out
*/
/*
/**
* Performs inverse prediction (output to input). Not possible for all modules.
*
* @param mode [in] Mode of operation.
* @param output [in] Output activations.
* @param callback [in] Function to be called after each prediction.
* @return Input activations.
*/
protected def doPredictInv(mode: ComputeMode,
output: SampleTensor,
context: Any,
callback: PredictInvCallbackS)
: SampleTensor*/
// ---------------------------------------------------------------------------
// Cost/Gradient computation related.
// ---------------------------------------------------------------------------
/**
* Modules that require access to the original layer input at any stage during
* backpropagation should set this to true.
*/
//def requiresInputForBackpropagation(mode: ComputeMode): Boolean
/**
* Modules that require access to the untainted layer output at any stage
* during backpropagation should set this to true.
*/
//def requiresOutputForBackpropagation(mode: ComputeMode): Boolean
/*
final def evaluate(mode: OperationMode,
input: Tensor,
reference: Tensor)
: EvaluationResult = evaluate(
mode, input, reference, idleOnEnterEvaluate, idleOnLeaveEvaluate
)
final def evaluate(mode: OperationMode,
input: Tensor,
reference: Tensor,
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: EvaluationResult = {
val prediction = predict(mode, input, reference, onEnterEx, onLeaveEx)
EvaluationResult(prediction, value)
}
final def evaluate(mode: OperationMode, batch: Batch)
: EvaluationResult = evaluate(
mode, batch, idleOnEnterEvaluate, idleOnLeaveEvaluate
)
final def evaluate(mode: OperationMode, batches: Traversable[Batch])
: Traversable[EvaluationResult] = batches.map(evaluate(mode, _))
final def evaluate(mode: OperationMode, batches: Iterable[Batch])
: Iterable[EvaluationResult] = batches.map(evaluate(mode, _))
final def evaluate(mode: OperationMode, batches: Seq[Batch])
: Seq[EvaluationResult] = batches.map(evaluate(mode, _))
final def evaluate(mode: OperationMode, batches: IndexedSeq[Batch])
: IndexedSeq[EvaluationResult] = batches.map(evaluate(mode, _))
final def evaluate(mode: OperationMode, batches: Array[Batch])
: Array[EvaluationResult] = ArrayEx.map(batches)(evaluate(mode, _))
final def evaluate(mode: OperationMode, batches: DenseVector[Batch])
: Array[EvaluationResult] = VectorEx.map(batches)(evaluate(mode, _))
final def evaluate(mode: OperationMode, batches: DenseMatrix[Batch])
: Array[EvaluationResult] = MatrixEx.map(batches)(evaluate(mode, _))
final def evaluate(mode: OperationMode,
batch: Batch,
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: EvaluationResult = evaluate(
mode, batch.input, batch.output, onEnter, onLeave
)
final def evaluate(mode: OperationMode,
batches: Traversable[Batch],
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: Traversable[EvaluationResult] = batches.map(
evaluate(mode, _, onEnter, onLeave)
)
final def evaluate(mode: OperationMode,
batches: Iterable[Batch],
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: Iterable[EvaluationResult] = batches.map(
evaluate(mode, _, onEnter, onLeave)
)
final def evaluate(mode: OperationMode,
batches: Seq[Batch],
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: Seq[EvaluationResult] = batches.map(evaluate(mode, _, onEnter, onLeave))
final def evaluate(mode: OperationMode,
batches: IndexedSeq[Batch],
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: IndexedSeq[EvaluationResult] = batches.map(
evaluate(mode, _, onEnter, onLeave)
)
final def evaluate(mode: OperationMode,
batches: Array[Batch],
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: Array[EvaluationResult] = ArrayEx.map(batches)(
evaluate(mode, _, onEnter, onLeave)
)
final def evaluate(mode: OperationMode,
batches: DenseVector[Batch],
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: Array[EvaluationResult] = VectorEx.map(batches)(
evaluate(mode, _, onEnter, onLeave)
)
final def evaluate(mode: OperationMode,
batches: DenseMatrix[Batch],
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: Array[EvaluationResult] = MatrixEx.map(batches)(
evaluate(mode, _, onEnter, onLeave)
)
*/
/*
final def evaluate(mode: OperationMode, batches: BatchPool)
: Cost = evaluate(mode, batches, idleOnEnterEvaluate, idleOnLeaveEvaluate)
final def evaluate(mode: OperationMode,
batches: BatchPool,
onEnter: OnEnterEvaluate,
onLeave: OnLeaveEvaluate)
: Cost = batches.foldLeft(Cost.zero)((cost, batch) => {
cost + evaluate(mode, batch, onEnter, onLeave).dropPrediction()
})
*/
/**
* This callback is used for estimating the cost of for the current module.
* The callback is automatically called by computeCost after each prediction.
* So you only have to call computations in which are not triggered by that.
*
* @param input [in] Input activations of the module.
* @param reference [in] Activations to compare against (only for cost functions!)
* @return The cost contribution and prediction of this module. (avg. cost per sample in batch)
*/
// THIS HAS JUST A LOCAL SCOPE!
protected def doEvaluate(input: Tensor,
reference: Tensor,
output: Tensor,
context: PredictContext)
: Real
// ---------------------------------------------------------------------------
// Derive input error (without given error)
// ---------------------------------------------------------------------------
/**
* Modules that require access to the original layer input at any stage
* during backpropagation should set this to true.
*
* We use this flag to determine whether we should keep or discard a certain
* input. Thus allowing us to save memory.
*
*/
def backpropagationRequirementsForInput
: TensorDependency
/**
* Modules that require access to the untainted layer output at any stage
* during backpropagation must set this to true.
*
* We use this flag to determine whether we should keep or discard a certain
* input. Thus allowing us to save memory.
*/
def backpropagationRequirementsForOutput
: TensorDependency
/*
// TODO: Handle this better to make memory consumption tunable.
final lazy val prefersInputForBackpropagation: Boolean = {
val inpDep = dependenceOnInputForBackpropagation
val outDep = dependenceOnOutputForBackpropagation
inpDep.level < outDep.level
}
// TODO: Handle this better to make memory consumption tunable.
final lazy val prefersOutputForBackpropagation
: Boolean = !prefersInputForBackpropagation
*/
/*
final def deriveInputError(prediction: PredictionEx)
: NextError = deriveInputError(
prediction,
idleOnEnterDeriveInputError,
idleOnLeaveDeriveInputError
)
final def deriveInputError(predictions: Traversable[PredictionEx])
: Traversable[NextError] = predictions.map(deriveInputError)
final def deriveInputError(predictions: Iterable[PredictionEx])
: Iterable[NextError] = predictions.map(deriveInputError)
final def deriveInputError(predictions: Seq[PredictionEx])
: Seq[NextError] = predictions.map(deriveInputError)
final def deriveInputError(predictions: IndexedSeq[PredictionEx])
: IndexedSeq[NextError] = predictions.map(deriveInputError)
final def deriveInputError(predictions: Array[PredictionEx])
: Array[NextError] = predictions.fastMap(deriveInputError)
final def deriveInputError(predictions: DenseVector[PredictionEx])
: DenseVector[NextError] = predictions.fastMap(deriveInputError)
final def deriveInputError(predictions: DenseMatrix[PredictionEx])
: DenseMatrix[NextError] = predictions.fastMap(deriveInputError)
final def deriveInputError(prediction: PredictionEx,
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: NextError = deriveInputErrorEx(prediction, onEnter, onLeave)
final def deriveInputError(predictions: Traversable[PredictionEx],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: Traversable[NextError] = predictions.map(
deriveInputError(_, onEnter, onLeave)
)
final def deriveInputError(predictions: Iterable[PredictionEx],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: Iterable[NextError] = predictions.map(
deriveInputError(_, onEnter, onLeave)
)
final def deriveInputError(predictions: Seq[PredictionEx],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: Seq[NextError] = predictions.map(
deriveInputError(_, onEnter, onLeave)
)
final def deriveInputError(predictions: IndexedSeq[PredictionEx],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: IndexedSeq[NextError] = predictions.map(
deriveInputError(_, onEnter, onLeave)
)
final def deriveInputError(predictions: Array[PredictionEx],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: Array[NextError] = predictions.fastMap(
deriveInputError(_, onEnter, onLeave)
)
final def deriveInputError(predictions: DenseVector[PredictionEx],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: DenseVector[NextError] = predictions.fastMap(
deriveInputError(_, onEnter, onLeave)
)
final def deriveInputError(predictions: DenseMatrix[PredictionEx],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: DenseMatrix[NextError] = predictions.fastMap(
deriveInputError(_, onEnter, onLeave)
)
*/
// ---------------------------------------------------------------------------
// Derive input error (with given error)
// ---------------------------------------------------------------------------
/*
final def deriveInputError(prediction: PredictionEx,
error: Tensor)
: NextError = deriveInputError(
prediction,
error,
idleOnEnterDeriveInputError,
idleOnLeaveDeriveInputError
)
final def deriveInputError(prediction: PredictionEx,
error: Tensor,
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: NextError = deriveInputErrorEx(
prediction,
NextError(() => error.copy),
onEnter,
onLeave
)*/
/*
final private def deriveInputErrorEx(prediction: PredictionEx,
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: NextError = deriveInputErrorEx(
prediction,
NextError(prediction.output.allocateSiblingAndClear),
onEnter,
onLeave
)
final private def deriveInputErrorEx(prediction: PredictionEx,
error: NextError,
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: NextError = {
}
*/
// ---------------------------------------------------------------------------
// Derive gradient (without given input error, without sink)
// ---------------------------------------------------------------------------
/*
final def deriveGradients(prediction: PredictionEx)
: ParameterBuffer = deriveGradients(
prediction, idleOnEnterDeriveInputError, idleOnLeaveDeriveInputError
)
final def deriveGradients(prediction: PredictionEx,
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: (SortedMap[Int, Tensor], ParameterBuffer) = {
val gradients = ParameterBuffer.zeros(weightsLayout)
gradients.groups.foreach(sink => {
deriveGradients(prediction, sink, onEnter, onLeave).disposeThorough()
})
gradients
}
*/
// ---------------------------------------------------------------------------
// Derive gradient (with given input error, without sink)
// ---------------------------------------------------------------------------
/*
final def deriveGradients(prediction: PredictionEx, error: Tensor)
: (NextError, ParameterBuffer) = deriveGradients(
prediction,
error,
idleOnEnterDeriveInputError,
idleOnLeaveDeriveInputError
)
final def deriveGradients(prediction: PredictionEx,
error: Tensor,
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: (NextError, ParameterBuffer) = {
val gradients = ParameterBuffer.zeros(weightsLayout)
val errors = gradients.groups.map(
kv => kv._1 -> deriveGradients(prediction, error, kv, onEnter, onLeave)
)
errors -> gradients
}
final def deriveGradients(predictions: Iterable[PredictionEx],
errors: Iterable[Tensor],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: ParameterBuffer = {
val layout = weightsLayout
val gradients = ParameterBuffer.zeros(layout)
var noSamples = 0
predictions.fastForeachEx(errors)((p, e) => {
val tmp = deriveGradients(p, e, onEnter, onLeave)
val n = p.input.noSamples
noSamples += n
lerp.inPlace(gradients, tmp, n / Real(noSamples))
})
gradients
}
final def deriveGradients(predictions: Array[PredictionEx],
errors: Array[Tensor],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: ParameterBuffer = {
val layout = weightsLayout
val gradients = ParameterBuffer.zeros(layout)
var noSamples = 0
predictions.fastForeachEx(errors)((p, e) => {
val tmp = deriveGradients(p, e, onEnter, onLeave)
val n = p.input.noSamples
noSamples += n
lerp.inPlace(gradients, tmp, n / Real(noSamples))
})
gradients
}
final def deriveGradients(predictions: DenseVector[PredictionEx],
errors: DenseVector[Tensor],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: ParameterBuffer = {
val layout = weightsLayout
val gradients = ParameterBuffer.zeros(layout)
var noSamples = 0
predictions.fastForeachEx(errors)((p, e) => {
val tmp = deriveGradients(p, e, onEnter, onLeave)
val n = p.input.noSamples
noSamples += n
lerp.inPlace(gradients, tmp, n / Real(noSamples))
})
gradients
}
final def deriveGradients(predictions: DenseMatrix[PredictionEx],
errors: DenseMatrix[Tensor],
onEnter: OnEnterDeriveInputError,
onLeave: OnLeaveDeriveInputError)
: ParameterBuffer = {
val layout = weightsLayout
val gradients = ParameterBuffer.zeros(layout)
var noSamples = 0
predictions.fastForeachEx(errors)((p, e) => {
val tmp = deriveGradients(p, e, onEnter, onLeave)
val n = p.input.noSamples
noSamples += n
lerp.inPlace(gradients, tmp, n / Real(noSamples))
})
gradients
}
*/
// ---------------------------------------------------------------------------
// Derive gradient (with given input error, with sink)
// ---------------------------------------------------------------------------
final def deriveGradients(context: BackpropagationContext,
sink: ValueTensorBuffer)
: NextError = deriveGradients(
context,
sink,
idleOnEnterDeriveInputError,
idleOnLeaveDeriveInputError
)
final def deriveGradients(context: BackpropagationContext,
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnLeaveDeriveGradients)
: NextError = deriveGradients(
context,
IndependentError(context.output.createSiblingAndClear()),
sink,
onEnter,
onLeave
)
final def deriveGradients(context: BackpropagationContext,
error: Tensor,
sink: ValueTensorBuffer)
: NextError = deriveGradients(
context,
error,
sink,
idleOnEnterDeriveInputError,
idleOnLeaveDeriveInputError
)
final def deriveGradients(context: BackpropagationContext,
error: Tensor,
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnLeaveDeriveGradients)
: NextError = deriveGradients(
context,
IndependentError(error.copy),
sink,
onEnter,
onLeave
)
final def deriveGradients(context: BackpropagationContext,
error: NextError,
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnLeaveDeriveGradients)
: NextError = {
// Well, first we should make sure whether this backpropagation context
// supports backprop at all.
if (!context.mode.supportsBackpropagation) {
throw new UnsupportedOperationException("The mode selected during the prediction that produced the backpropagation context does not support backpropgation!")
}
// Now push everything we have into a stack so that we can process things
// one by one.
val tensors = mutable.Stack.concat(context.intermediates)
val contexts = mutable.Stack.concat(context.contexts)
deriveGradientsEx(error, sink, onEnter, onLeave, tensors, contexts)
}
final def deriveGradientsEx(error: NextError,
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnLeaveDeriveGradients,
intermediates: mutable.Stack[Tensor],
contexts: mutable.Stack[PredictContext])
: NextError = {
val clock = if (logger.isTraceEnabled) Stopwatch() else null
val context = contexts.pop()
val output = intermediates.pop()
val reference = intermediates.pop()
val input = intermediates.pop()
onEnter(this, input, reference, output, context, error)
val nextError = doDeriveGradients(
input, reference, output, context,
error,
sink,
onEnter, onLeave,
intermediates, contexts
)
onLeave(this, input, reference, output, context, nextError)
if (clock != null) {
logger.trace(f"$clock%s => deriveGradients() => $this%s")
}
nextError
}
/**
* Compute actual gradients towards selected weights bank and then (lazy)
* towards the input. Most layers split the operations.
*
* @param intermediates [in] List of layer outputs. Generated from predict.
* @param reference [in] Reference value used for cost functions.
* @param sink [in] Buffer for storing the gradients.
* @param onEnter [in] A function that returns the output errors.
* @return input error and remaining outputs.
*/
// THIS HAS JUST A LOCAL SCOPE!
protected def doDeriveGradients(input: Tensor,
reference: Tensor,
output: Tensor,
context: PredictContext,
error: NextError,
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnLeaveDeriveGradients,
intermediates: mutable.Stack[Tensor],
contexts: mutable.Stack[PredictContext])
: NextError
final def deriveGradients(contexts: TraversableOnce[BackpropagationContext],
sink: ValueTensorBuffer)
: Unit = deriveGradients(
contexts,
sink,
idleOnEnterDeriveInputError,
idleOnLeaveDeriveInputError
)
final def deriveGradients(contexts: TraversableOnce[BackpropagationContext],
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnEnterDeriveGradients)
: Unit = {
var n = 0
contexts.foreach(context => {
val nextError = deriveGradients(context, sink, onEnter, onLeave)
nextError.close()
n += 1
})
sink *= Real.one / n
}
final def deriveGradients(contexts: Iterator[BackpropagationContext],
sink: ValueTensorBuffer)
: Unit = deriveGradients(
contexts,
sink,
idleOnEnterDeriveInputError,
idleOnLeaveDeriveInputError
)
final def deriveGradients(contexts: Iterator[BackpropagationContext],
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnEnterDeriveGradients)
: Unit = {
var n = 0
contexts.foreach(context => {
val nextError = deriveGradients(context, sink, onEnter, onLeave)
nextError.close()
n += 1
})
sink *= Real.one / n
}
final def deriveGradients(contexts: Array[BackpropagationContext],
sink: ValueTensorBuffer)
: Unit = deriveGradients(
contexts,
sink,
idleOnEnterDeriveInputError,
idleOnLeaveDeriveInputError
)
final def deriveGradients(contexts: Array[BackpropagationContext],
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnEnterDeriveGradients)
: Unit = {
ArrayEx.foreach(contexts)(context => {
val nextError = deriveGradients(context, sink, onEnter, onLeave)
nextError.close()
})
sink *= Real.one / contexts.length
}
// ---------------------------------------------------------------------------
// State management.
// ---------------------------------------------------------------------------
override def state
: ModuleState = ModuleStateEx(super.state)
override def restoreState(state: InstanceState)
: Unit = {
super.restoreState(state.parent)
state match {
case state: ModuleStateEx =>
case _ =>
throw new MatchError(state)
}
}
}
abstract class ModuleBuilder
extends InstanceExBuilder1[ModuleBuilder, Module, BuildHints]
with VariantBuilder {
final private var _handle
: String = id.toString
final def handle
: String = _handle
final def handle_=(value: String)
: Unit = {
require(value != null)
_handle = value
}
def setHandle(value: String)
: ModuleBuilder
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), _handle.hashCode())
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: ModuleBuilder =>
_handle == other._handle
case _ =>
false
})
override def copyTo(other: InstanceBuilder): Unit = {
super.copyTo(other)
other match {
case other: ModuleBuilder =>
other._handle = _handle
case _ =>
}
}
// ---------------------------------------------------------------------------
// Statistics
// ---------------------------------------------------------------------------
final def weightLayoutFor(hints: BuildHints)
: TensorLayoutBuffer = {
val builder = TensorLayoutBufferBuilder()
weightLayoutFor(hints, builder)
builder.result()
}
def weightLayoutFor(hints: BuildHints,
builder: TensorLayoutBufferBuilder)
: BuildHints
/**
* @param hints the input size to try.
* @return Returns the size of the output of this module, given the input
* size.
*/
def outputHintsFor(hints: BuildHints): BuildHints
// ---------------------------------------------------------------------------
// Weights / binding related
// ---------------------------------------------------------------------------
final override def build(hints: BuildHints,
seed: InstanceSeed)
: Module = {
val weightsBuilder = ValueTensorBufferBuilder()
build(hints, seed, weightsBuilder)
}
def build(hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: Module
// ---------------------------------------------------------------------------
// Mutable variables and permutation.
// ---------------------------------------------------------------------------
def permuteHandles(fn: String => String)
: ModuleBuilder
protected def doPermuteHandles(fn: String => String)
: Unit = handle_=(fn(_handle))
def permuteWeightReferences(fn: LabeledBufferReference => LabeledBufferReference)
: ModuleBuilder
protected def doPermuteWeightReferences(fn: LabeledBufferReference => LabeledBufferReference)
: Unit = {}
// ---------------------------------------------------------------------------
// Conversion related
// ---------------------------------------------------------------------------
final def toGraph(hints: BuildHints)
: Graph = toGraph(Option(hints))
final def toGraph(hints: Option[BuildHints] = None)
: Graph = {
val result = Graph()
toGraphEx(
hints,
Seq.empty,
LineStyle.Solid,
result.nodes,
result.edges
)
result
}
/**
* @param nodeSink Vertices and vertex groups will end up here.
* @param edgeSink Edge information ends up here.
* @return The vertex for the current object.
*/
def toGraphEx(hints: Option[BuildHints],
inputs: Seq[Vertex],
edgeStyle: LineStyle,
nodeSink: mutable.Buffer[Node],
edgeSink: mutable.Buffer[Edge])
: (Option[BuildHints], Seq[Vertex])
// ---------------------------------------------------------------------------
// Checking related
// ---------------------------------------------------------------------------
final def check(hints: BuildHints,
indentLevel: Int = 0,
indentString: String = " ")
: Long = checkEx(hints, indentLevel, indentString)._2
final def checkEx(hints: BuildHints,
indentLevel: Int = 0,
indentString: String = " ")
: (BuildHints, Long) = {
// Print module header.
val sb = StringBuilder.newBuilder
cfor(0)(_ < indentLevel, _ + 1)(
i => sb ++= indentString
)
sb ++= f"$this {"
logger.info(sb.result())
// Perform implementation specific tests.
var noErrors = 0L
var outputHints = hints
try {
val tmp = doCheckEx(hints, indentLevel, indentString)
outputHints = tmp._1
noErrors += tmp._2
}
catch {
case ex: Exception =>
sb.clear()
cfor(0)(_ <= indentLevel, _ + 1)(
i => sb ++= indentString
)
sb ++= s"The component does not support supplied hints '$hints'!"
logger.info(sb.result())
noErrors += 1L
}
// Print number of errors.
sb.clear()
cfor(0)(_ <= indentLevel, _ + 1)(
i => sb ++= indentString
)
if (noErrors > 0L) {
sb ++= f"Number of errors: $noErrors%d"
}
else {
sb ++= "OK"
}
logger.info(sb.result())
// Evaluate and exit.
sb.clear()
cfor(0)(_ < indentLevel, _ + 1)(
i => sb ++= indentString
)
sb ++= f"}"
logger.info(sb.result())
(outputHints, noErrors)
}
protected def doCheckEx(hints: BuildHints,
indentLevel: Int,
indentString: String)
: (BuildHints, Long)
}
abstract class ModuleEx[TBuilder <: ModuleExBuilder[_]]
extends Module {
override def builder
: TBuilder
}
abstract class ModuleExBuilder[TThis <: ModuleExBuilder[_]]
extends ModuleBuilder
with VariantBuilderEx[TThis] {
override def repr
: TThis
override protected def doCopy()
: TThis
final override def setHandle(value: String)
: TThis = {
handle_=(value)
repr
}
// ---------------------------------------------------------------------------
// Mutable variables and permutation.
// ---------------------------------------------------------------------------
final override def permuteHandles(fn: String => String)
: TThis = {
doPermuteHandles(fn)
repr
}
final override def permuteWeightReferences(fn: LabeledBufferReference => LabeledBufferReference)
: ModuleBuilder = {
doPermuteWeightReferences(fn)
repr
}
}
abstract class ModuleState
extends InstanceState
final case class ModuleStateEx(override val parent: InstanceState)
extends ModuleState
abstract class ModuleVariantDescription[TBuilder <: ModuleExBuilder[_]]
extends VariantDescription[TBuilder] {
final def score(builder: TBuilder,
hints: BuildHints,
priority: Byte)
: (Int, Array[String]) = {
val reasons = Array.newBuilder[String]
var result = baseScore(builder, priority, reasons)
// Platform
if (hints.preferredPlatform.exists(_ == platform)) {
result |= 1 << 24
reasons += "platform preference from hints"
}
// Avoid tensor format switching.
if (platform.exists(_ == hints.platform)) {
result |= 1 << 15
reasons += "input platform matches"
}
// Score overrides.
result = doScore(builder, hints, result, reasons)
(result, reasons.result())
}
protected def doScore(builder: TBuilder,
hints: BuildHints,
scorePrev: Int,
reasons: mutable.ArrayBuilder[String])
: Int = scorePrev
def outputPlatformFor(builder: TBuilder, hints: BuildHints)
: Platform
def build(builder: TBuilder,
hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: Module
}
class ModuleVariantTable[TBuilder <: ModuleExBuilder[_]]
extends VariantTable[TBuilder, ModuleVariantDescription[TBuilder]] {
final def lookup(builder: TBuilder, hints: BuildHints)
: ModuleVariantDescription[TBuilder] = {
// Score the variants and select variant with highest score.
var highestScore: Int = 0
var highestDesc: ModuleVariantDescription[TBuilder] = null
MapEx.foreach(variants)((desc, priority) => {
val (score, reasons) = desc.score(builder, hints, priority)
if (logger.isDebugEnabled) {
val sb = StringBuilder.newBuilder
ArrayEx.foreach(reasons)(reason => {
sb ++= reason
sb ++= ", "
})
sb.length = Math.max(sb.length - 2, 0)
logger.debug(f"$builder%s: $score%08x => $desc%s, $sb%s")
}
if (score > highestScore) {
highestScore = score
highestDesc = desc
}
})
if (highestDesc == null) {
throw new UnsupportedOperationException("Unable to determine a compatible variant!")
}
if (logger.isInfoEnabled) {
logger.info(f"$builder%s: $highestDesc%s selected!")
}
highestDesc
}
final def lookupAndBuild(builder: TBuilder,
hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: Module = {
// Score the the variants.
val desc = lookup(builder, hints)
// Instantiate highest and return.
desc.build(builder, hints, seed, weightsBuilder)
}
final def outputPlatformFor(builder: TBuilder, hints: BuildHints)
: Platform = {
// Score the the variants.
val desc = lookup(builder, hints)
// Instantiate highest and return.
desc.outputPlatformFor(builder, hints)
}
}
|
Teletha/CoinToss
|
src/main/java/cointoss/trade/Profitable.java
|
/*
* Copyright (C) 2021 cointoss Development Team
*
* Licensed under the MIT License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://opensource.org/licenses/MIT
*/
package cointoss.trade;
import cointoss.util.arithmetic.Num;
public interface Profitable {
/**
* Calculate total profit or loss on the current price.
*
* @param currentPrice A current price.
* @return A total profit or loss of this entry.
*/
default Num profit(Num currentPrice) {
return realizedProfit().plus(unrealizedProfit(currentPrice));
}
/**
* A realized profit or loss of this entry.
*
* @return A realized profit or loss of this entry.
*/
Num realizedProfit();
/**
* Calculate unrealized profit or loss on the current price.
*
* @param currentPrice A current price.
* @return An unrealized profit or loss of this entry.
*/
Num unrealizedProfit(Num currentPrice);
/**
* The total commission.
*
* @return The total commission.
*/
Num commission();
}
|
rding2454/IndeStudy
|
vpr/src/power/power.cpp
|
<filename>vpr/src/power/power.cpp
/*********************************************************************
* The following code is part of the power modelling feature of VTR.
*
* For support:
* http://code.google.com/p/vtr-verilog-to-routing/wiki/Power
*
* or email:
* <EMAIL>
*
* If you are using power estimation for your researach please cite:
*
* <NAME> and <NAME>. VersaPower: Power Estimation
* for Diverse FPGA Architectures. In International Conference on
* Field Programmable Technology, 2012.
*
********************************************************************/
/**
* This is the top-level file for power estimation in VTR
*/
/************************* INCLUDES *********************************/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <csignal>
#include <ctime>
#include <cmath>
#include <ctype.h>
using namespace std;
#include "vtr_util.h"
#include "vtr_log.h"
#include "vtr_assert.h"
#include "vtr_memory.h"
#include "power.h"
#include "power_components.h"
#include "power_util.h"
#include "power_lowlevel.h"
#include "power_sizing.h"
#include "power_callibrate.h"
#include "power_cmos_tech.h"
#include "physical_types.h"
#include "globals.h"
#include "rr_graph.h"
#include "vpr_utils.h"
/************************* DEFINES **********************************/
#define CONVERT_NM_PER_M 1000000000
#define CONVERT_UM_PER_M 1000000
/************************* ENUMS ************************************/
typedef enum {
POWER_BREAKDOWN_ENTRY_TYPE_TITLE = 0,
POWER_BREAKDOWN_ENTRY_TYPE_MODE,
POWER_BREAKDOWN_ENTRY_TYPE_COMPONENT,
POWER_BREAKDOWN_ENTRY_TYPE_PB,
POWER_BREAKDOWN_ENTRY_TYPE_INTERC,
POWER_BREAKDOWN_ENTRY_TYPE_BUFS_WIRES
} e_power_breakdown_entry_type;
/************************* File Scope **********************************/
static t_rr_node_power * rr_node_power;
/************************* Function Declarations ********************/
/* Routing */
static void power_usage_routing(t_power_usage * power_usage,
const t_det_routing_arch * routing_arch, t_segment_inf * segment_inf);
/* Tiles */
static void power_usage_blocks(t_power_usage * power_usage);
static void power_usage_pb(t_power_usage * power_usage, t_pb * pb,
t_pb_graph_node * pb_node, ClusterBlockId iblk);
static void power_usage_primitive(t_power_usage * power_usage, t_pb * pb,
t_pb_graph_node * pb_graph_node, ClusterBlockId iblk);
static void power_reset_tile_usage(void);
static void power_reset_pb_type(t_pb_type * pb_type);
static void power_usage_local_buffers_and_wires(t_power_usage * power_usage,
t_pb * pb, t_pb_graph_node * pb_node, ClusterBlockId iblk);
/* Clock */
static void power_usage_clock(t_power_usage * power_usage,
t_clock_arch * clock_arch);
static void power_usage_clock_single(t_power_usage * power_usage,
t_clock_network * clock_inf);
/* Init/Uninit */
static void dealloc_mux_graph(t_mux_node * node);
static void dealloc_mux_graph_rec(t_mux_node * node);
/* Printing */
static void power_print_breakdown_pb_rec(FILE * fp, t_pb_type * pb_type,
int indent);
static void power_print_summary(FILE * fp, const t_vpr_setup& vpr_setup);
//static void power_print_stats(FILE * fp);
static void power_print_breakdown_summary(FILE * fp);
static void power_print_breakdown_entry(FILE * fp, int indent,
e_power_breakdown_entry_type type, const char * name, float power,
float total_power, float perc_dyn, const char * method);
static void power_print_breakdown_component(FILE * fp, const char * name,
e_power_component_type type, int indent_level);
static void power_print_breakdown_pb(FILE * fp);
static const char * power_estimation_method_name(
e_power_estimation_method power_method);
void power_usage_local_pin_toggle(t_power_usage * power_usage, t_pb * pb,
t_pb_graph_pin * pin, ClusterBlockId iblk);
void power_usage_local_pin_buffer_and_wire(t_power_usage * power_usage,
t_pb * pb, t_pb_graph_pin * pin, ClusterBlockId iblk);
void power_alloc_and_init_pb_pin(t_pb_graph_pin * pin);
void power_init_pb_pins_rec(t_pb_graph_node * pb_node);
void power_pb_pins_init();
void power_routing_init(const t_det_routing_arch * routing_arch);
/************************* FUNCTION DEFINITIONS *********************/
/**
* This function calculates the power of primitives (ff, lut, etc),
* by calling the appropriate primitive function.
* - power_usage: (Return value)
* - pb: The pysical block
* - pb_graph_node: The physical block graph node
* - calc_dynamic: Calculate dynamic power? Otherwise ignore
* - calc_static: Calculate static power? Otherwise ignore
*/
static void power_usage_primitive(t_power_usage * power_usage, t_pb * pb,
t_pb_graph_node * pb_graph_node, ClusterBlockId iblk) {
t_power_usage sub_power_usage;
power_zero_usage(power_usage);
power_zero_usage(&sub_power_usage);
auto& atom_ctx = g_vpr_ctx.atom();
auto& device_ctx = g_vpr_ctx.device();
auto& power_ctx = g_vpr_ctx.power();
if (strcmp(pb_graph_node->pb_type->blif_model, MODEL_NAMES) == 0) {
/* LUT */
char * SRAM_values;
float * input_probabilities;
float * input_densities;
int LUT_size;
int pin_idx;
VTR_ASSERT(pb_graph_node->num_input_ports == 1);
LUT_size = pb_graph_node->num_input_pins[0];
input_probabilities = (float*) vtr::calloc(LUT_size, sizeof(float));
input_densities = (float*) vtr::calloc(LUT_size, sizeof(float));
for (pin_idx = 0; pin_idx < LUT_size; pin_idx++) {
t_pb_graph_pin * pin = &pb_graph_node->input_pins[0][pin_idx];
input_probabilities[pin_idx] = pin_prob(pb, pin, iblk);
input_densities[pin_idx] = pin_dens(pb, pin, iblk);
}
if (pb) {
AtomBlockId blk_id = atom_ctx.lookup.pb_atom(pb);
SRAM_values = alloc_SRAM_values_from_truth_table(LUT_size,
atom_ctx.nlist.block_truth_table(blk_id));
} else {
SRAM_values = alloc_SRAM_values_from_truth_table(LUT_size, AtomNetlist::TruthTable());
}
power_usage_lut(&sub_power_usage, LUT_size,
power_ctx.arch->LUT_transistor_size, SRAM_values,
input_probabilities, input_densities, power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
free(SRAM_values);
free(input_probabilities);
free(input_densities);
} else if (strcmp(pb_graph_node->pb_type->blif_model, MODEL_LATCH) == 0) {
/* Flip-Flop */
t_pb_graph_pin * D_pin = &pb_graph_node->input_pins[0][0];
t_pb_graph_pin * Q_pin = &pb_graph_node->output_pins[0][0];
float D_dens = 0.;
float D_prob = 0.;
float Q_prob = 0.;
float Q_dens = 0.;
float clk_dens = 0.;
float clk_prob = 0.;
D_dens = pin_dens(pb, D_pin, iblk);
D_prob = pin_prob(pb, D_pin, iblk);
Q_dens = pin_dens(pb, Q_pin, iblk);
Q_prob = pin_prob(pb, Q_pin, iblk);
clk_prob = device_ctx.clock_arch->clock_inf[0].prob;
clk_dens = device_ctx.clock_arch->clock_inf[0].dens;
power_usage_ff(&sub_power_usage, power_ctx.arch->FF_size, D_prob, D_dens,
Q_prob, Q_dens, clk_prob, clk_dens, power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
} else {
char msg[vtr::bufsize];
sprintf(msg, "No dynamic power defined for BLIF model: %s",
pb_graph_node->pb_type->blif_model);
power_log_msg(POWER_LOG_WARNING, msg);
sprintf(msg, "No leakage power defined for BLIF model: %s",
pb_graph_node->pb_type->blif_model);
power_log_msg(POWER_LOG_WARNING, msg);
}
}
void power_usage_local_pin_toggle(t_power_usage * power_usage, t_pb * pb,
t_pb_graph_pin * pin, ClusterBlockId iblk) {
float scale_factor;
auto& power_ctx = g_vpr_ctx.power();
power_zero_usage(power_usage);
if (pin->pin_power->scaled_by_pin) {
scale_factor = pin_prob(pb, pin->pin_power->scaled_by_pin, iblk);
if (pin->port->port_power->reverse_scaled) {
scale_factor = 1 - scale_factor;
}
} else {
scale_factor = 1.0;
}
/* Divide by 2 because density is switches/cycle, but a toggle is 2 switches */
power_usage->dynamic += scale_factor
* pin->port->port_power->energy_per_toggle * pin_dens(pb, pin, iblk) / 2.0
/ power_ctx.solution_inf.T_crit;
}
void power_usage_local_pin_buffer_and_wire(t_power_usage * power_usage,
t_pb * pb, t_pb_graph_pin * pin, ClusterBlockId iblk) {
t_power_usage sub_power_usage;
float buffer_size = 0.;
double C_wire;
auto& power_ctx = g_vpr_ctx.power();
power_zero_usage(power_usage);
/* Wire switching */
C_wire = pin->pin_power->C_wire;
power_usage_wire(&sub_power_usage, C_wire, pin_dens(pb, pin, iblk),
power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
/* Buffer power */
buffer_size = pin->pin_power->buffer_size;
if (buffer_size) {
power_usage_buffer(&sub_power_usage, buffer_size, pin_prob(pb, pin, iblk),
pin_dens(pb, pin, iblk), false, power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
}
}
static void power_usage_local_buffers_and_wires(t_power_usage * power_usage,
t_pb * pb, t_pb_graph_node * pb_node, ClusterBlockId iblk) {
int port_idx;
int pin_idx;
t_power_usage pin_power;
power_zero_usage(power_usage);
/* Input pins */
for (port_idx = 0; port_idx < pb_node->num_input_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_input_pins[port_idx];
pin_idx++) {
power_usage_local_pin_buffer_and_wire(&pin_power, pb,
&pb_node->input_pins[port_idx][pin_idx], iblk);
power_add_usage(power_usage, &pin_power);
}
}
/* Output pins */
for (port_idx = 0; port_idx < pb_node->num_output_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_output_pins[port_idx];
pin_idx++) {
power_usage_local_pin_buffer_and_wire(&pin_power, pb,
&pb_node->output_pins[port_idx][pin_idx], iblk);
power_add_usage(power_usage, &pin_power);
}
}
/* Clock pins */
for (port_idx = 0; port_idx < pb_node->num_clock_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_clock_pins[port_idx];
pin_idx++) {
power_usage_local_pin_buffer_and_wire(&pin_power, pb,
&pb_node->clock_pins[port_idx][pin_idx], iblk);
power_add_usage(power_usage, &pin_power);
}
}
}
/** Calculates the power of a pb:
* First checks if dynamic/static power is provided by user in arch file. If not:
* - Calculate power of all interconnect
* - Call recursively for children
* - If no children, must be a primitive. Call primitive hander.
*/
static void power_usage_pb(t_power_usage * power_usage, t_pb * pb,
t_pb_graph_node * pb_node, ClusterBlockId iblk) {
t_power_usage power_usage_bufs_wires;
t_power_usage power_usage_local_muxes;
t_power_usage power_usage_children;
t_power_usage power_usage_pin_toggle;
t_power_usage power_usage_sub;
int pb_type_idx;
int pb_idx;
int interc_idx;
int pb_mode;
int port_idx;
int pin_idx;
float dens_avg;
int num_pins;
auto& power_ctx = g_vpr_ctx.power();
power_zero_usage(power_usage);
t_pb_type * pb_type = pb_node->pb_type;
t_pb_type_power * pb_power = pb_node->pb_type->pb_type_power;
bool estimate_buffers_and_wire = false;
bool estimate_multiplexers = false;
bool estimate_primitives = false;
bool recursive_children;
/* Get mode */
if (pb) {
pb_mode = pb->mode;
} else {
/* Default mode if not initialized (will only affect leakage power) */
pb_mode = pb_type->pb_type_power->leakage_default_mode;
}
recursive_children = power_method_is_recursive(
pb_node->pb_type->pb_type_power->estimation_method);
power_zero_usage(&power_usage_sub);
switch (pb_node->pb_type->pb_type_power->estimation_method) {
case POWER_METHOD_IGNORE:
case POWER_METHOD_SUM_OF_CHILDREN:
break;
case POWER_METHOD_ABSOLUTE:
power_add_usage(power_usage, &pb_power->absolute_power_per_instance);
power_component_add_usage(&pb_power->absolute_power_per_instance,
POWER_COMPONENT_PB_OTHER);
break;
case POWER_METHOD_C_INTERNAL:
power_zero_usage(&power_usage_sub);
/* Just take the average density of inputs pins and use
* that with user-defined block capacitance and leakage */
/* Average the activity of all pins */
num_pins = 0;
dens_avg = 0.;
for (port_idx = 0; port_idx < pb_node->num_input_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_input_pins[port_idx];
pin_idx++) {
dens_avg += pin_dens(pb,
&pb_node->input_pins[port_idx][pin_idx], iblk);
num_pins++;
}
}
if (num_pins != 0) {
dens_avg = dens_avg / num_pins;
}
power_usage_sub.dynamic += power_calc_node_switching(
pb_power->C_internal, dens_avg, power_ctx.solution_inf.T_crit);
/* Leakage is an absolute */
power_usage_sub.leakage +=
pb_power->absolute_power_per_instance.leakage;
/* Add to power of this PB */
power_add_usage(power_usage, &power_usage_sub);
// Add to component type
power_component_add_usage(&power_usage_sub, POWER_COMPONENT_PB_OTHER);
break;
case POWER_METHOD_TOGGLE_PINS:
power_zero_usage(&power_usage_pin_toggle);
/* Add toggle power of each input pin */
for (port_idx = 0; port_idx < pb_node->num_input_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_input_pins[port_idx];
pin_idx++) {
t_power_usage pin_power;
power_usage_local_pin_toggle(&pin_power, pb,
&pb_node->input_pins[port_idx][pin_idx], iblk);
power_add_usage(&power_usage_pin_toggle, &pin_power);
}
}
/* Add toggle power of each output pin */
for (port_idx = 0; port_idx < pb_node->num_output_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_output_pins[port_idx];
pin_idx++) {
t_power_usage pin_power;
power_usage_local_pin_toggle(&pin_power, pb,
&pb_node->output_pins[port_idx][pin_idx], iblk);
power_add_usage(&power_usage_pin_toggle, &pin_power);
}
}
/* Add toggle power of each clock pin */
for (port_idx = 0; port_idx < pb_node->num_clock_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_clock_pins[port_idx];
pin_idx++) {
t_power_usage pin_power;
power_usage_local_pin_toggle(&pin_power, pb,
&pb_node->clock_pins[port_idx][pin_idx], iblk);
power_add_usage(&power_usage_pin_toggle, &pin_power);
}
}
/* Static is supplied as an absolute */
power_usage_pin_toggle.leakage +=
pb_power->absolute_power_per_instance.leakage;
// Add to this PB power
power_add_usage(power_usage, &power_usage_pin_toggle);
// Add to component type power
power_component_add_usage(&power_usage_pin_toggle,
POWER_COMPONENT_PB_OTHER);
break;
case POWER_METHOD_SPECIFY_SIZES:
estimate_buffers_and_wire = true;
estimate_multiplexers = true;
estimate_primitives = true;
break;
case POWER_METHOD_AUTO_SIZES:
estimate_buffers_and_wire = true;
estimate_multiplexers = true;
estimate_primitives = true;
break;
case POWER_METHOD_UNDEFINED:
default:
VTR_ASSERT(0);
break;
}
if (pb_node->pb_type->class_type == LUT_CLASS) {
/* LUTs will have a child node that is used to indicate pin
* equivalence for routing purposes.
* There is a crossbar to the child node; however,
* this interconnect does not exist in FPGA hardware and should
* be ignored for power calculations. */
estimate_buffers_and_wire = false;
estimate_multiplexers = false;
}
if (pb_node->pb_type->num_modes == 0) {
/* This is a leaf node, which is a primitive (lut, ff, etc) */
if (estimate_primitives) {
VTR_ASSERT(pb_node->pb_type->blif_model);
power_usage_primitive(&power_usage_sub, pb, pb_node, iblk);
// Add to power of this PB
power_add_usage(power_usage, &power_usage_sub);
// Add to power of component type
power_component_add_usage(&power_usage_sub,
POWER_COMPONENT_PB_PRIMITIVES);
}
} else {
/* This node had children. The power of this node is the sum of:
* - Buffers/Wires in Interconnect from Parent to children
* - Multiplexers in Interconnect from Parent to children
* - Child nodes
*/
if (estimate_buffers_and_wire) {
/* Check pins of all interconnect */
power_usage_local_buffers_and_wires(&power_usage_bufs_wires, pb,
pb_node, iblk);
power_component_add_usage(&power_usage_bufs_wires,
POWER_COMPONENT_PB_BUFS_WIRE);
power_add_usage(
&pb_node->pb_type->pb_type_power->power_usage_bufs_wires,
&power_usage_bufs_wires);
power_add_usage(power_usage, &power_usage_bufs_wires);
}
/* Interconnect Structures (multiplexers) */
if (estimate_multiplexers) {
power_zero_usage(&power_usage_local_muxes);
for (interc_idx = 0;
interc_idx < pb_type->modes[pb_mode].num_interconnect;
interc_idx++) {
power_usage_local_interc_mux(&power_usage_sub, pb,
&pb_node->interconnect_pins[pb_mode][interc_idx], iblk);
power_add_usage(&power_usage_local_muxes, &power_usage_sub);
}
// Add to power of this PB
power_add_usage(power_usage, &power_usage_local_muxes);
// Add to component type power
power_component_add_usage(&power_usage_local_muxes,
POWER_COMPONENT_PB_INTERC_MUXES);
// Add to power of this mode
power_add_usage(
&pb_node->pb_type->modes[pb_mode].mode_power->power_usage,
&power_usage_local_muxes);
}
/* Add power for children */
if (recursive_children) {
power_zero_usage(&power_usage_children);
for (pb_type_idx = 0;
pb_type_idx
< pb_node->pb_type->modes[pb_mode].num_pb_type_children;
pb_type_idx++) {
for (pb_idx = 0;
pb_idx
< pb_node->pb_type->modes[pb_mode].pb_type_children[pb_type_idx].num_pb;
pb_idx++) {
t_pb * child_pb = NULL;
t_pb_graph_node * child_pb_graph_node;
if (pb && pb->child_pbs[pb_type_idx][pb_idx].name) {
/* Child is initialized */
child_pb = &pb->child_pbs[pb_type_idx][pb_idx];
}
child_pb_graph_node =
&pb_node->child_pb_graph_nodes[pb_mode][pb_type_idx][pb_idx];
power_usage_pb(&power_usage_sub, child_pb,
child_pb_graph_node, iblk);
power_add_usage(&power_usage_children, &power_usage_sub);
}
}
// Add to power of this PB
power_add_usage(power_usage, &power_usage_children);
// Add to power of this mode
power_add_usage(
&pb_node->pb_type->modes[pb_mode].mode_power->power_usage,
&power_usage_children);
}
}
power_add_usage(&pb_node->pb_type->pb_type_power->power_usage, power_usage);
}
/* Resets the power stats for all physical blocks */
static void power_reset_pb_type(t_pb_type * pb_type) {
int mode_idx;
int child_idx;
int interc_idx;
power_zero_usage(&pb_type->pb_type_power->power_usage);
power_zero_usage(&pb_type->pb_type_power->power_usage_bufs_wires);
for (mode_idx = 0; mode_idx < pb_type->num_modes; mode_idx++) {
power_zero_usage(&pb_type->modes[mode_idx].mode_power->power_usage);
for (child_idx = 0;
child_idx < pb_type->modes[mode_idx].num_pb_type_children;
child_idx++) {
power_reset_pb_type(
&pb_type->modes[mode_idx].pb_type_children[child_idx]);
}
for (interc_idx = 0;
interc_idx < pb_type->modes[mode_idx].num_interconnect;
interc_idx++) {
power_zero_usage(
&pb_type->modes[mode_idx].interconnect[interc_idx].interconnect_power->power_usage);
}
}
}
/**
* Resets the power usage for all tile types
*/
static void power_reset_tile_usage(void) {
int type_idx;
auto& device_ctx = g_vpr_ctx.device();
for (type_idx = 0; type_idx < device_ctx.num_block_types; type_idx++) {
if (device_ctx.block_types[type_idx].pb_type) {
power_reset_pb_type(device_ctx.block_types[type_idx].pb_type);
}
}
}
/*
* Calcultes the power usage of all tiles in the FPGA
*/
static void power_usage_blocks(t_power_usage * power_usage) {
auto& device_ctx = g_vpr_ctx.device();
auto& cluster_ctx = g_vpr_ctx.clustering();
auto& place_ctx = g_vpr_ctx.placement();
power_zero_usage(power_usage);
power_reset_tile_usage();
/* Loop through all grid locations */
for (size_t x = 0; x < device_ctx.grid.width(); x++) {
for (size_t y = 0; y < device_ctx.grid.height(); y++) {
if ((device_ctx.grid[x][y].width_offset != 0)
|| (device_ctx.grid[x][y].height_offset != 0)
|| (device_ctx.grid[x][y].type == device_ctx.EMPTY_TYPE)) {
continue;
}
for (int z = 0; z < device_ctx.grid[x][y].type->capacity; z++) {
t_pb * pb = NULL;
t_power_usage pb_power;
ClusterBlockId iblk = place_ctx.grid_blocks[x][y].blocks[z];
if (iblk != EMPTY_BLOCK_ID && iblk != INVALID_BLOCK_ID)
pb = cluster_ctx.clb_nlist.block_pb(iblk);
/* Calculate power of this CLB */
power_usage_pb(&pb_power, pb, device_ctx.grid[x][y].type->pb_graph_head, iblk);
power_add_usage(power_usage, &pb_power);
}
}
}
return;
}
/**
* Calculates the total power usage from the clock network
*/
static void power_usage_clock(t_power_usage * power_usage,
t_clock_arch * clock_arch) {
int clock_idx;
auto& power_ctx = g_vpr_ctx.power();
/* Initialization */
power_usage->dynamic = 0.;
power_usage->leakage = 0.;
/* if no global clock, then return */
if (clock_arch->num_global_clocks == 0) {
return;
}
for (clock_idx = 0; clock_idx < clock_arch->num_global_clocks;
clock_idx++) {
t_power_usage clock_power;
/* Assume the global clock is active even for combinational circuits */
if (clock_arch->num_global_clocks == 1) {
if (clock_arch->clock_inf[clock_idx].dens == 0) {
clock_arch->clock_inf[clock_idx].dens = 2;
clock_arch->clock_inf[clock_idx].prob = 0.5;
// This will need to change for multi-clock
clock_arch->clock_inf[clock_idx].period = power_ctx.solution_inf.T_crit;
}
}
/* find the power dissipated by each clock network */
power_usage_clock_single(&clock_power,
&clock_arch->clock_inf[clock_idx]);
power_add_usage(power_usage, &clock_power);
}
return;
}
/**
* Calculates the power from a single spine-and-rib global clock
*/
static void power_usage_clock_single(t_power_usage * power_usage,
t_clock_network * single_clock) {
/*
*
* The following code assumes a spine-and-rib clock network as shown below.
* This is comprised of 3 main combonents:
* 1. A single wire from the io pad to the center of the chip
* 2. A H-structure which provides a 'spine' to all 4 quadrants
* 3. Ribs connect each spine with an entire column of blocks
___________________
| |
| |_|_|_2__|_|_|_ |
| | | | | | | | |
| |3| | | | | | |
| | |
| | | | | | | | |
| |_|_|__|_|_|_|_ |
| | | | | | | | |
|_______1|________|
* It is assumed that there are a single-inverter buffers placed along each wire,
* with spacing equal to the FPGA block size (1 buffer/block) */
t_power_usage clock_buffer_power;
int length;
t_power_usage buffer_power;
t_power_usage wire_power;
float C_segment;
float buffer_size;
auto& power_ctx = g_vpr_ctx.power();
auto& device_ctx = g_vpr_ctx.device();
power_usage->dynamic = 0.;
power_usage->leakage = 0.;
/* Check if this clock is active - this is used for calculating leakage */
if (single_clock->dens) {
} else {
VTR_ASSERT(0);
}
C_segment = power_ctx.commonly_used->tile_length * single_clock->C_wire;
if (single_clock->autosize_buffer) {
buffer_size = 1 + C_segment / power_ctx.commonly_used->INV_1X_C_in;
} else {
buffer_size = single_clock->buffer_size;
}
/* Calculate the capacitance and leakage power for the clock buffer */
power_usage_inverter(&clock_buffer_power, single_clock->dens,
single_clock->prob, buffer_size, single_clock->period);
length = 0;
/* 1. IO to chip center */
length += device_ctx.grid.height() / 2;
/* 2. H-Tree to 4 quadrants */
length += device_ctx.grid.height() / 2; //Vertical component of H
length += 2 * device_ctx.grid.width(); //Horizontal horizontal component of H (two rows)
/* 3. Ribs - to */
length += device_ctx.grid.width() / 2 * device_ctx.grid.height(); //Each rib spand 1/2 of width, two rows of ribs
buffer_power.dynamic = length * clock_buffer_power.dynamic;
buffer_power.leakage = length * clock_buffer_power.leakage;
power_add_usage(power_usage, &buffer_power);
power_component_add_usage(&buffer_power, POWER_COMPONENT_CLOCK_BUFFER);
power_usage_wire(&wire_power, length * C_segment, single_clock->dens,
single_clock->period);
power_add_usage(power_usage, &wire_power);
power_component_add_usage(&wire_power, POWER_COMPONENT_CLOCK_WIRE);
return;
}
/* Frees a multiplexer graph */
static void dealloc_mux_graph(t_mux_node * node) {
dealloc_mux_graph_rec(node);
free(node);
}
static void dealloc_mux_graph_rec(t_mux_node * node) {
int child_idx;
/* Dealloc Children */
if (node->level != 0) {
for (child_idx = 0; child_idx < node->num_inputs; child_idx++) {
dealloc_mux_graph_rec(&node->children[child_idx]);
}
free(node->children);
}
}
/**
* Calculates the power of the entire routing fabric (not local routing
*/
static void power_usage_routing(t_power_usage * power_usage,
const t_det_routing_arch * routing_arch, t_segment_inf * segment_inf) {
int rr_node_idx;
int edge_idx;
auto& power_ctx = g_vpr_ctx.power();
auto& cluster_ctx = g_vpr_ctx.clustering();
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
power_zero_usage(power_usage);
/* Reset routing statistics */
power_ctx.commonly_used->num_sb_buffers = 0;
power_ctx.commonly_used->total_sb_buffer_size = 0.;
power_ctx.commonly_used->num_cb_buffers = 0;
power_ctx.commonly_used->total_cb_buffer_size = 0.;
/* Reset rr graph net indices */
for (rr_node_idx = 0; rr_node_idx < device_ctx.num_rr_nodes; rr_node_idx++) {
rr_node_power[rr_node_idx].net_num = ClusterNetId::INVALID();
rr_node_power[rr_node_idx].num_inputs = 0;
rr_node_power[rr_node_idx].selected_input = 0;
}
/* Populate net indices into rr graph */
for (auto net_id : cluster_ctx.clb_nlist.nets()) {
t_trace * trace;
for (trace = route_ctx.trace_head[net_id]; trace != NULL; trace = trace->next) {
rr_node_power[trace->index].visited = false;
rr_node_power[trace->index].net_num = net_id;
}
}
/* Populate net indices into rr graph */
for (auto net_id : cluster_ctx.clb_nlist.nets()) {
t_trace * trace;
for (trace = route_ctx.trace_head[net_id]; trace != NULL; trace = trace->next) {
t_rr_node * node = &device_ctx.rr_nodes[trace->index];
t_rr_node_power * node_power = &rr_node_power[trace->index];
if (node_power->visited) {
continue;
}
for (edge_idx = 0; edge_idx < node->num_edges(); edge_idx++) {
if (node->edge_sink_node(edge_idx) != OPEN) {
t_rr_node * next_node = &device_ctx.rr_nodes[node->edge_sink_node(edge_idx)];
t_rr_node_power * next_node_power = &rr_node_power[node->edge_sink_node(edge_idx)];
switch (next_node->type()) {
case CHANX:
case CHANY:
case IPIN:
if (next_node_power->net_num == node_power->net_num) {
next_node_power->selected_input = next_node_power->num_inputs;
}
next_node_power->in_dens[next_node_power->num_inputs] = clb_net_density(node_power->net_num);
next_node_power->in_prob[next_node_power->num_inputs] = clb_net_prob(node_power->net_num);
next_node_power->num_inputs++;
if (next_node_power->num_inputs > next_node->fan_in()) {
vtr::printf_info("%d %d\n", next_node_power->num_inputs,
next_node->fan_in());
fflush(0);
VTR_ASSERT(0);
}
break;
default:
/* Do nothing */
break;
}
}
}
node_power->visited = true;
}
}
/* Calculate power of all routing entities */
for (rr_node_idx = 0; rr_node_idx < device_ctx.num_rr_nodes; rr_node_idx++) {
t_power_usage sub_power_usage;
t_rr_node * node = &device_ctx.rr_nodes[rr_node_idx];
t_rr_node_power * node_power = &rr_node_power[rr_node_idx];
float C_wire;
float buffer_size;
int switch_idx;
int connectionbox_fanout;
int switchbox_fanout;
//float C_per_seg_split;
int wire_length;
switch (node->type()) {
case SOURCE:
case SINK:
case OPIN:
/* No power usage for these types */
break;
case IPIN:
/* This is part of the connectionbox. The connection box is comprised of:
* - Driver (accounted for at end of CHANX/Y - see below)
* - Multiplexor */
if (node->fan_in()) {
VTR_ASSERT(node_power->in_dens);
VTR_ASSERT(node_power->in_prob);
/* Multiplexor */
power_usage_mux_multilevel(&sub_power_usage,
power_get_mux_arch(node->fan_in(),
power_ctx.arch->mux_transistor_size),
node_power->in_prob, node_power->in_dens,
node_power->selected_input, true,
power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
power_component_add_usage(&sub_power_usage,
POWER_COMPONENT_ROUTE_CB);
}
break;
case CHANX:
case CHANY:
/* This is a wire driven by a switchbox, which includes:
* - The Multiplexor at the beginning of the wire
* - A buffer, after the mux to drive the wire
* - The wire itself
* - A buffer at the end of the wire, going to switchbox/connectionbox */
VTR_ASSERT(node_power->in_dens);
VTR_ASSERT(node_power->in_prob);
wire_length = 0;
if (node->type() == CHANX) {
wire_length = node->xhigh() - node->xlow() + 1;
} else if (node->type() == CHANY) {
wire_length = node->yhigh() - node->ylow() + 1;
}
C_wire =
wire_length
* segment_inf[device_ctx.rr_indexed_data[node->cost_index()].seg_index].Cmetal;
//(double)power_ctx.commonly_used->tile_length);
VTR_ASSERT(node_power->selected_input < node->fan_in());
/* Multiplexor */
power_usage_mux_multilevel(&sub_power_usage,
power_get_mux_arch(node->fan_in(),
power_ctx.arch->mux_transistor_size),
node_power->in_prob, node_power->in_dens,
node_power->selected_input, true, power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
power_component_add_usage(&sub_power_usage,
POWER_COMPONENT_ROUTE_SB);
/* Buffer Size */
switch (device_ctx.rr_switch_inf[node_power->driver_switch_type].power_buffer_type) {
case POWER_BUFFER_TYPE_AUTO:
/*
C_per_seg_split = ((float) node->num_edges
* power_ctx.commonly_used->INV_1X_C_in + C_wire);
// / (float) power_ctx.arch->seg_buffer_split;
buffer_size = power_buffer_size_from_logical_effort(
C_per_seg_split);
buffer_size = max(buffer_size, 1.0F);
*/
buffer_size = power_calc_buffer_size_from_Cout(
device_ctx.rr_switch_inf[node_power->driver_switch_type].Cout);
break;
case POWER_BUFFER_TYPE_ABSOLUTE_SIZE:
buffer_size =
device_ctx.rr_switch_inf[node_power->driver_switch_type].power_buffer_size;
buffer_size = max(buffer_size, 1.0F);
break;
case POWER_BUFFER_TYPE_NONE:
buffer_size = 0.;
break;
default:
buffer_size = 0.;
VTR_ASSERT(0);
break;
}
power_ctx.commonly_used->num_sb_buffers++;
power_ctx.commonly_used->total_sb_buffer_size += buffer_size;
/*
power_ctx.commonly_used->num_sb_buffers +=
power_ctx.arch->seg_buffer_split;
power_ctx.commonly_used->total_sb_buffer_size += buffer_size
* power_ctx.arch->seg_buffer_split;
*/
/* Buffer */
power_usage_buffer(&sub_power_usage, buffer_size,
node_power->in_prob[node_power->selected_input],
node_power->in_dens[node_power->selected_input], true,
power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
power_component_add_usage(&sub_power_usage,
POWER_COMPONENT_ROUTE_SB);
/* Wire Capacitance */
power_usage_wire(&sub_power_usage, C_wire,
clb_net_density(node_power->net_num), power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
power_component_add_usage(&sub_power_usage,
POWER_COMPONENT_ROUTE_GLB_WIRE);
/* Determine types of switches that this wire drives */
connectionbox_fanout = 0;
switchbox_fanout = 0;
for (switch_idx = 0; switch_idx < node->num_edges(); switch_idx++) {
if (node->edge_switch(switch_idx) == routing_arch->wire_to_rr_ipin_switch) {
connectionbox_fanout++;
} else if (node->edge_switch(switch_idx) == routing_arch->delayless_switch) {
/* Do nothing */
} else {
switchbox_fanout++;
}
}
/* Buffer to next Switchbox */
if (switchbox_fanout) {
buffer_size = power_buffer_size_from_logical_effort(
switchbox_fanout * power_ctx.commonly_used->NMOS_1X_C_d);
power_usage_buffer(&sub_power_usage, buffer_size,
1 - node_power->in_prob[node_power->selected_input],
node_power->in_dens[node_power->selected_input], false,
power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
power_component_add_usage(&sub_power_usage,
POWER_COMPONENT_ROUTE_SB);
}
/* Driver for ConnectionBox */
if (connectionbox_fanout) {
buffer_size = power_buffer_size_from_logical_effort(
connectionbox_fanout
* power_ctx.commonly_used->NMOS_1X_C_d);
power_usage_buffer(&sub_power_usage, buffer_size,
1 - node_power->in_prob[node_power->selected_input],
node_power->in_dens[node_power->selected_input],
false, power_ctx.solution_inf.T_crit);
power_add_usage(power_usage, &sub_power_usage);
power_component_add_usage(&sub_power_usage,
POWER_COMPONENT_ROUTE_CB);
power_ctx.commonly_used->num_cb_buffers++;
power_ctx.commonly_used->total_cb_buffer_size += buffer_size;
}
break;
case INTRA_CLUSTER_EDGE:
VTR_ASSERT(0);
break;
default:
power_log_msg(POWER_LOG_WARNING,
"The global routing-resource graph contains an unknown node type.");
break;
}
}
}
void power_alloc_and_init_pb_pin(t_pb_graph_pin * pin) {
int port_idx;
t_port * port_to_find;
t_pb_graph_node * node = pin->parent_node;
bool found;
pin->pin_power = (t_pb_graph_pin_power*) malloc(
sizeof(t_pb_graph_pin_power));
pin->pin_power->C_wire = 0.;
pin->pin_power->buffer_size = 0.;
pin->pin_power->scaled_by_pin = NULL;
if (pin->port->port_power->scaled_by_port) {
port_to_find = pin->port->port_power->scaled_by_port;
/*pin->pin_power->scaled_by_pin =
get_pb_graph_node_pin_from_model_port_pin(
port_to_find->model_port,
pin->port->port_power->scaled_by_port_pin_idx,
pin->parent_node);*/
/* Search input, output, clock ports */
found = false;
for (port_idx = 0; port_idx < node->num_input_ports; port_idx++) {
if (node->input_pins[port_idx][0].port == port_to_find) {
pin->pin_power->scaled_by_pin =
&node->input_pins[port_idx][pin->port->port_power->scaled_by_port_pin_idx];
found = true;
break;
}
}
if (!found) {
for (port_idx = 0; port_idx < node->num_output_ports; port_idx++) {
if (node->output_pins[port_idx][0].port == port_to_find) {
pin->pin_power->scaled_by_pin =
&node->output_pins[port_idx][pin->port->port_power->scaled_by_port_pin_idx];
found = true;
break;
}
}
}
if (!found) {
for (port_idx = 0; port_idx < node->num_clock_ports; port_idx++) {
if (node->clock_pins[port_idx][0].port == port_to_find) {
pin->pin_power->scaled_by_pin =
&node->clock_pins[port_idx][pin->port->port_power->scaled_by_port_pin_idx];
found = true;
break;
}
}
}
VTR_ASSERT(found);
VTR_ASSERT(pin->pin_power->scaled_by_pin);
}
}
void power_init_pb_pins_rec(t_pb_graph_node * pb_node) {
int mode;
int type;
int pb;
int port_idx;
int pin_idx;
for (port_idx = 0; port_idx < pb_node->num_input_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_input_pins[port_idx];
pin_idx++) {
power_alloc_and_init_pb_pin(
&pb_node->input_pins[port_idx][pin_idx]);
}
}
for (port_idx = 0; port_idx < pb_node->num_output_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_output_pins[port_idx];
pin_idx++) {
power_alloc_and_init_pb_pin(
&pb_node->output_pins[port_idx][pin_idx]);
}
}
for (port_idx = 0; port_idx < pb_node->num_clock_ports; port_idx++) {
for (pin_idx = 0; pin_idx < pb_node->num_clock_pins[port_idx];
pin_idx++) {
power_alloc_and_init_pb_pin(
&pb_node->clock_pins[port_idx][pin_idx]);
}
}
for (mode = 0; mode < pb_node->pb_type->num_modes; mode++) {
for (type = 0;
type < pb_node->pb_type->modes[mode].num_pb_type_children;
type++) {
for (pb = 0;
pb
< pb_node->pb_type->modes[mode].pb_type_children[type].num_pb;
pb++) {
power_init_pb_pins_rec(
&pb_node->child_pb_graph_nodes[mode][type][pb]);
}
}
}
}
void power_pb_pins_init() {
int type_idx;
auto& device_ctx = g_vpr_ctx.device();
for (type_idx = 0; type_idx < device_ctx.num_block_types; type_idx++) {
if (device_ctx.block_types[type_idx].pb_graph_head) {
power_init_pb_pins_rec(device_ctx.block_types[type_idx].pb_graph_head);
}
}
}
void power_routing_init(const t_det_routing_arch * routing_arch) {
int rr_node_idx;
int max_fanin;
int max_IPIN_fanin;
int max_seg_to_IPIN_fanout;
int max_seg_to_seg_fanout;
int max_seg_fanout;
auto& power_ctx = g_vpr_ctx.mutable_power();
auto& device_ctx = g_vpr_ctx.device();
auto& cluster_ctx = g_vpr_ctx.clustering();
auto& atom_ctx = g_vpr_ctx.atom();
/* Copy probability/density values to new netlist */
if (power_ctx.clb_net_power.size() == 0) {
power_ctx.clb_net_power.resize(cluster_ctx.clb_nlist.nets().size());
}
for (auto net_id : cluster_ctx.clb_nlist.nets()) {
power_ctx.clb_net_power[net_id].probability = power_ctx.atom_net_power[atom_ctx.lookup.atom_net(net_id)].probability;
power_ctx.clb_net_power[net_id].density = power_ctx.atom_net_power[atom_ctx.lookup.atom_net(net_id)].density;
}
/* Initialize RR Graph Structures */
rr_node_power = (t_rr_node_power*) vtr::calloc(device_ctx.num_rr_nodes,
sizeof(t_rr_node_power));
for (rr_node_idx = 0; rr_node_idx < device_ctx.num_rr_nodes; rr_node_idx++) {
rr_node_power[rr_node_idx].driver_switch_type = OPEN;
}
/* Initialize Mux Architectures */
max_fanin = 0;
max_IPIN_fanin = 0;
max_seg_to_seg_fanout = 0;
max_seg_to_IPIN_fanout = 0;
for (rr_node_idx = 0; rr_node_idx < device_ctx.num_rr_nodes; rr_node_idx++) {
int switch_idx;
int fanout_to_IPIN = 0;
int fanout_to_seg = 0;
t_rr_node * node = &device_ctx.rr_nodes[rr_node_idx];
t_rr_node_power * node_power = &rr_node_power[rr_node_idx];
switch (node->type()) {
case IPIN:
max_IPIN_fanin = max(max_IPIN_fanin,
static_cast<int>(node->fan_in()));
max_fanin = max(max_fanin, static_cast<int>(node->fan_in()));
node_power->in_dens = (float*) vtr::calloc(node->fan_in(),
sizeof(float));
node_power->in_prob = (float*) vtr::calloc(node->fan_in(),
sizeof(float));
break;
case CHANX:
case CHANY:
for (switch_idx = 0; switch_idx < node->num_edges(); switch_idx++) {
if (node->edge_switch(switch_idx) == routing_arch->wire_to_rr_ipin_switch) {
fanout_to_IPIN++;
} else if (node->edge_switch(switch_idx) != routing_arch->delayless_switch) {
fanout_to_seg++;
}
}
max_seg_to_IPIN_fanout = max(max_seg_to_IPIN_fanout,
fanout_to_IPIN);
max_seg_to_seg_fanout = max(max_seg_to_seg_fanout, fanout_to_seg);
max_fanin = max(max_fanin, static_cast<int>(node->fan_in()));
node_power->in_dens = (float*) vtr::calloc(node->fan_in(),
sizeof(float));
node_power->in_prob = (float*) vtr::calloc(node->fan_in(),
sizeof(float));
break;
default:
/* Do nothing */
break;
}
}
power_ctx.commonly_used->max_routing_mux_size = max_fanin;
power_ctx.commonly_used->max_IPIN_fanin = max_IPIN_fanin;
power_ctx.commonly_used->max_seg_to_seg_fanout = max_seg_to_seg_fanout;
power_ctx.commonly_used->max_seg_to_IPIN_fanout = max_seg_to_IPIN_fanout;
#ifdef PRINT_SPICE_COMPARISON
power_ctx.commonly_used->max_routing_mux_size =
max(power_ctx.commonly_used->max_routing_mux_size, 26);
#endif
/* Populate driver switch type */
for (rr_node_idx = 0; rr_node_idx < device_ctx.num_rr_nodes; rr_node_idx++) {
t_rr_node * node = &device_ctx.rr_nodes[rr_node_idx];
int edge_idx;
for (edge_idx = 0; edge_idx < node->num_edges(); edge_idx++) {
if (node->edge_sink_node(edge_idx) != OPEN) {
if (rr_node_power[node->edge_sink_node(edge_idx)].driver_switch_type == OPEN) {
rr_node_power[node->edge_sink_node(edge_idx)].driver_switch_type = node->edge_switch(edge_idx);
} else {
VTR_ASSERT(rr_node_power[node->edge_sink_node(edge_idx)].driver_switch_type == node->edge_switch(edge_idx));
}
}
}
}
/* Find Max Fanout of Routing Buffer */
max_seg_fanout = 0;
for (rr_node_idx = 0; rr_node_idx < device_ctx.num_rr_nodes; rr_node_idx++) {
t_rr_node * node = &device_ctx.rr_nodes[rr_node_idx];
switch (node->type()) {
case CHANX:
case CHANY:
if (node->num_edges() > max_seg_fanout) {
max_seg_fanout = node->num_edges();
}
break;
default:
/* Do nothing */
break;
}
}
power_ctx.commonly_used->max_seg_fanout = max_seg_fanout;
}
/**
* Initialization for all power-related functions
*/
bool power_init(const char * power_out_filepath,
const char * cmos_tech_behavior_filepath, const t_arch * arch,
const t_det_routing_arch * routing_arch) {
auto& power_ctx = g_vpr_ctx.mutable_power();
bool error = false;
/* Set global power architecture & options */
power_ctx.arch = arch->power;
power_ctx.commonly_used = new t_power_commonly_used;
power_ctx.tech = (t_power_tech*) vtr::malloc(sizeof(t_power_tech));
power_ctx.output = (t_power_output*) vtr::malloc(sizeof(t_power_output));
/* Set up Logs */
power_ctx.output->num_logs = POWER_LOG_NUM_TYPES;
power_ctx.output->logs = (t_log*) vtr::calloc(power_ctx.output->num_logs,
sizeof(t_log));
power_ctx.output->logs[POWER_LOG_ERROR].name = vtr::strdup("Errors");
power_ctx.output->logs[POWER_LOG_WARNING].name = vtr::strdup("Warnings");
/* Initialize output file */
if (!error) {
power_ctx.output->out = NULL;
power_ctx.output->out = vtr::fopen(power_out_filepath, "w");
if (!power_ctx.output->out) {
error = true;
}
}
/* Load technology properties */
power_tech_init(cmos_tech_behavior_filepath);
/* Low-Level Initialization */
power_lowlevel_init();
/* Initialize sub-modules */
power_components_init();
/* Perform callibration */
power_callibrate();
/* Initialize routing information */
power_routing_init(routing_arch);
// Allocates power structures for each pb pin
power_pb_pins_init();
/* Size all components */
power_sizing_init(arch);
//power_print_spice_comparison();
// power_print_callibration();
return error;
}
/**
* Uninitialize power module
*/
bool power_uninit(void) {
int mux_size;
int log_idx;
int rr_node_idx;
int msg_idx;
auto& device_ctx = g_vpr_ctx.device();
auto& power_ctx = g_vpr_ctx.power();
bool error = false;
for (rr_node_idx = 0; rr_node_idx < device_ctx.num_rr_nodes; rr_node_idx++) {
t_rr_node * node = &device_ctx.rr_nodes[rr_node_idx];
t_rr_node_power * node_power = &rr_node_power[rr_node_idx];
switch (node->type()) {
case CHANX:
case CHANY:
case IPIN:
if (node->fan_in()) {
free(node_power->in_dens);
free(node_power->in_prob);
}
break;
default:
/* Do nothing */
break;
}
}
free(rr_node_power);
/* Free mux architectures */
for (std::map<float, t_power_mux_info*>::iterator it =
power_ctx.commonly_used->mux_info.begin();
it != power_ctx.commonly_used->mux_info.end(); it++) {
t_power_mux_info * mux_info = it->second;
for (mux_size = 1; mux_size <= mux_info->mux_arch_max_size;
mux_size++) {
dealloc_mux_graph(mux_info->mux_arch[mux_size].mux_graph_head);
}
delete mux_info;
}
delete power_ctx.commonly_used;
if (power_ctx.output->out) {
fclose(power_ctx.output->out);
}
/* Free logs */
for (log_idx = 0; log_idx < power_ctx.output->num_logs; log_idx++) {
for (msg_idx = 0; msg_idx < power_ctx.output->logs[log_idx].num_messages;
msg_idx++) {
free(power_ctx.output->logs[log_idx].messages[msg_idx]);
}
free(power_ctx.output->logs[log_idx].messages);
free(power_ctx.output->logs[log_idx].name);
}
free(power_ctx.output->logs);
free(power_ctx.output);
return error;
}
#if 0
/**
* Prints the power of all pb structures, in an xml format that matches the archicture file
*/
static void power_print_pb_usage_recursive(FILE * fp, t_pb_type * type,
int indent_level, float parent_power, float total_power) {
int mode_idx;
int mode_indent;
int child_idx;
int interc_idx;
float pb_type_power;
pb_type_power = type->pb_type_power->power_usage.dynamic
+ type->pb_type_power->power_usage.leakage;
print_tabs(fp, indent_level);
fprintf(fp,
"<pb_type name=\"%s\" P=\"%.4g\" P_parent=\"%.3g\" P_total=\"%.3g\" P_dyn=\"%.3g\" >\n",
type->name, pb_type_power, pb_type_power / parent_power * 100,
pb_type_power / total_power * 100,
type->pb_type_power->power_usage.dynamic / pb_type_power);
mode_indent = 0;
if (type->num_modes > 1) {
mode_indent = 1;
}
for (mode_idx = 0; mode_idx < type->num_modes; mode_idx++) {
float mode_power;
mode_power = type->modes[mode_idx].mode_power->power_usage.dynamic
+ type->modes[mode_idx].mode_power->power_usage.leakage;
if (type->num_modes > 1) {
print_tabs(fp, indent_level + mode_indent);
fprintf(fp,
"<mode name=\"%s\" P=\"%.4g\" P_parent=\"%.3g\" P_total=\"%.3g\" P_dyn=\"%.3g\">\n",
type->modes[mode_idx].name, mode_power,
mode_power / pb_type_power * 100,
mode_power / total_power * 100,
type->modes[mode_idx].mode_power->power_usage.dynamic
/ mode_power);
}
if (type->modes[mode_idx].num_interconnect) {
/* Sum the interconnect power */
t_power_usage interc_power_usage;
float interc_total_power;
power_zero_usage(&interc_power_usage);
for (interc_idx = 0;
interc_idx < type->modes[mode_idx].num_interconnect;
interc_idx++) {
power_add_usage(&interc_power_usage,
&type->modes[mode_idx].interconnect[interc_idx].interconnect_power->power_usage);
}
interc_total_power = interc_power_usage.dynamic
+ interc_power_usage.leakage;
/* All interconnect */
print_tabs(fp, indent_level + mode_indent + 1);
fprintf(fp,
"<interconnect P=\"%.4g\" P_parent=\"%.3g\" P_total=\"%.3g\" P_dyn=\"%.3g\">\n",
interc_total_power, interc_total_power / mode_power * 100,
interc_total_power / total_power * 100,
interc_power_usage.dynamic / interc_total_power);
for (interc_idx = 0;
interc_idx < type->modes[mode_idx].num_interconnect;
interc_idx++) {
float interc_power =
type->modes[mode_idx].interconnect[interc_idx].interconnect_power->power_usage.dynamic
+ type->modes[mode_idx].interconnect[interc_idx].interconnect_power->power_usage.leakage;
/* Each interconnect */
print_tabs(fp, indent_level + mode_indent + 2);
fprintf(fp,
"<%s name=\"%s\" P=\"%.4g\" P_parent=\"%.3g\" P_total=\"%.3g\" P_dyn=\"%.3g\"/>\n",
interconnect_type_name(
type->modes[mode_idx].interconnect[interc_idx].type),
type->modes[mode_idx].interconnect[interc_idx].name,
interc_power, interc_power / interc_total_power * 100,
interc_power / total_power * 100,
type->modes[mode_idx].interconnect[interc_idx].interconnect_power->power_usage.dynamic
/ interc_power);
}
print_tabs(fp, indent_level + mode_indent + 1);
fprintf(fp, "</interconnect>\n");
}
for (child_idx = 0;
child_idx < type->modes[mode_idx].num_pb_type_children;
child_idx++) {
power_print_pb_usage_recursive(fp,
&type->modes[mode_idx].pb_type_children[child_idx],
indent_level + mode_indent + 1,
type->modes[mode_idx].mode_power->power_usage.dynamic
+ type->modes[mode_idx].mode_power->power_usage.leakage,
total_power);
}
if (type->num_modes > 1) {
print_tabs(fp, indent_level + mode_indent);
fprintf(fp, "</mode>\n");
}
}
print_tabs(fp, indent_level);
fprintf(fp, "</pb_type>\n");
}
static void power_print_clb_detailed(FILE * fp) {
int type_idx;
auto& device_ctx = g_vpr_ctx.device();
float clb_power_total = power_component_get_usage_sum(
POWER_COMPONENT_PB);
for (type_idx = 0; type_idx < device_ctx.num_block_types; type_idx++) {
if (!device_ctx.block_types[type_idx].pb_type) {
continue;
}
power_print_pb_usage_recursive(fp, device_ctx.block_types[type_idx].pb_type,
0, clb_power_total, clb_power_total);
}
}
#endif
/*
static void power_print_stats(FILE * fp) {
auto& power_ctx = g_vpr_ctx.power();
fprintf(fp, "Max Segment Fanout: %d\n",
power_ctx.commonly_used->max_seg_fanout);
fprintf(fp, "Max Segment->Segment Fanout: %d\n",
power_ctx.commonly_used->max_seg_to_seg_fanout);
fprintf(fp, "Max Segment->IPIN Fanout: %d\n",
power_ctx.commonly_used->max_seg_to_IPIN_fanout);
fprintf(fp, "Max IPIN fanin: %d\n", power_ctx.commonly_used->max_IPIN_fanin);
fprintf(fp, "Average SB Buffer Size: %.1f\n",
power_ctx.commonly_used->total_sb_buffer_size
/ (float) power_ctx.commonly_used->num_sb_buffers);
fprintf(fp, "SB Buffer Transistors: %g\n",
power_count_transistors_buffer(
power_ctx.commonly_used->total_sb_buffer_size
/ (float) power_ctx.commonly_used->num_sb_buffers));
fprintf(fp, "Average CB Buffer Size: %.1f\n",
power_ctx.commonly_used->total_cb_buffer_size
/ (float) power_ctx.commonly_used->num_cb_buffers);
fprintf(fp, "Tile length (um): %.2f\n",
power_ctx.commonly_used->tile_length * CONVERT_UM_PER_M);
fprintf(fp, "1X Inverter C_in: %g\n", power_ctx.commonly_used->INV_1X_C_in);
fprintf(fp, "\n");
}
*/
static const char * power_estimation_method_name(
e_power_estimation_method power_method) {
switch (power_method) {
case POWER_METHOD_UNDEFINED:
return "Undefined";
case POWER_METHOD_IGNORE:
return "Ignore";
case POWER_METHOD_AUTO_SIZES:
return "Transistor Auto-Size";
case POWER_METHOD_SPECIFY_SIZES:
return "Transistor Specify-Size";
case POWER_METHOD_TOGGLE_PINS:
return "Pin-Toggle";
case POWER_METHOD_C_INTERNAL:
return "C-Internal";
case POWER_METHOD_ABSOLUTE:
return "Absolute";
case POWER_METHOD_SUM_OF_CHILDREN:
return "Sum of Children";
default:
return "Unkown";
}
}
static void power_print_breakdown_pb_rec(FILE * fp, t_pb_type * pb_type,
int indent) {
int mode_idx;
int child_idx;
int i;
char buf[51];
int child_indent;
int interc_idx;
t_mode * mode;
t_power_usage interc_usage;
e_power_estimation_method est_method =
pb_type->pb_type_power->estimation_method;
float total_power = power_component_get_usage_sum(POWER_COMPONENT_TOTAL);
t_pb_type_power * pb_power = pb_type->pb_type_power;
for (i = 0; i < indent; i++) {
buf[i] = ' ';
}
strncpy(buf + indent, pb_type->name, 50 - indent);
buf[50] = '\0';
buf[strlen((pb_type->name)) + indent] = '\0';
power_print_breakdown_entry(fp, indent, POWER_BREAKDOWN_ENTRY_TYPE_PB,
pb_type->name, power_sum_usage(&pb_power->power_usage), total_power,
power_perc_dynamic(&pb_power->power_usage),
power_estimation_method_name(
pb_type->pb_type_power->estimation_method));
if (power_method_is_transistor_level(
pb_type->pb_type_power->estimation_method)) {
/* Local bufs and wires */
power_print_breakdown_entry(fp, indent + 1,
POWER_BREAKDOWN_ENTRY_TYPE_BUFS_WIRES, "Bufs/Wires",
power_sum_usage(&pb_power->power_usage_bufs_wires), total_power,
power_perc_dynamic(&pb_power->power_usage_bufs_wires), NULL);
}
if (power_method_is_recursive(est_method)) {
if (pb_type->num_modes > 1) {
child_indent = indent + 2;
} else {
child_indent = indent + 1;
}
for (mode_idx = 0; mode_idx < pb_type->num_modes; mode_idx++) {
mode = &pb_type->modes[mode_idx];
if (pb_type->num_modes > 1) {
power_print_breakdown_entry(fp, indent + 1,
POWER_BREAKDOWN_ENTRY_TYPE_MODE, mode->name,
power_sum_usage(&mode->mode_power->power_usage),
total_power,
power_perc_dynamic(&mode->mode_power->power_usage),
NULL);
}
/* Interconnect Power */
power_zero_usage(&interc_usage);
/* Sum the interconnect */
if (power_method_is_transistor_level(est_method)) {
for (interc_idx = 0; interc_idx < mode->num_interconnect;
interc_idx++) {
power_add_usage(&interc_usage,
&mode->interconnect[interc_idx].interconnect_power->power_usage);
}
if (mode->num_interconnect) {
power_print_breakdown_entry(fp, child_indent,
POWER_BREAKDOWN_ENTRY_TYPE_INTERC, "Interc:",
power_sum_usage(&interc_usage), total_power,
power_perc_dynamic(&interc_usage), NULL);
}
/* Print Interconnect Breakdown */
for (interc_idx = 0; interc_idx < mode->num_interconnect;
interc_idx++) {
t_interconnect * interc = &mode->interconnect[interc_idx];
if (interc->type == DIRECT_INTERC) {
// no power - skip
} else {
power_print_breakdown_entry(fp, child_indent + 1,
POWER_BREAKDOWN_ENTRY_TYPE_INTERC, interc->name,
power_sum_usage(
&interc->interconnect_power->power_usage),
total_power,
power_perc_dynamic(
&interc->interconnect_power->power_usage),
NULL);
}
}
}
for (child_idx = 0;
child_idx < pb_type->modes[mode_idx].num_pb_type_children;
child_idx++) {
power_print_breakdown_pb_rec(fp,
&pb_type->modes[mode_idx].pb_type_children[child_idx],
child_indent);
}
}
}
}
static void power_print_summary(FILE * fp, const t_vpr_setup& vpr_setup) {
auto& power_ctx = g_vpr_ctx.power();
auto& device_ctx = g_vpr_ctx.device();
fprintf(power_ctx.output->out, "Circuit: %s\n",
vpr_setup.FileNameOpts.CircuitName.c_str());
fprintf(power_ctx.output->out, "Architecture: %s\n", vtr::basename(vpr_setup.FileNameOpts.ArchFile).c_str());
fprintf(fp, "Technology (nm): %.0f\n",
power_ctx.tech->tech_size * CONVERT_NM_PER_M);
fprintf(fp, "Voltage: %.2f\n", power_ctx.tech->Vdd);
fprintf(fp, "Temperature: %g\n", power_ctx.tech->temperature);
fprintf(fp, "Critical Path: %g\n", power_ctx.solution_inf.T_crit);
fprintf(fp, "Size of FPGA: %zu x %zu\n", device_ctx.grid.width(), device_ctx.grid.height());
fprintf(fp, "Channel Width: %d\n", power_ctx.solution_inf.channel_width);
fprintf(fp, "\n");
}
/*
* Top-level function for the power module.
* Calculates the average power of the entire FPGA (watts),
* and prints it to the output file
* - run_time_s: (Return value) The total runtime in seconds (us accuracy)
*/
e_power_ret_code power_total(float * run_time_s, const t_vpr_setup& vpr_setup,
const t_arch * arch, const t_det_routing_arch * routing_arch) {
t_power_usage total_power;
t_power_usage sub_power_usage;
clock_t t_start;
clock_t t_end;
t_power_usage clb_power_usage;
auto& power_ctx = g_vpr_ctx.power();
t_start = clock();
power_zero_usage(&total_power);
if (routing_arch->directionality == BI_DIRECTIONAL) {
power_log_msg(POWER_LOG_ERROR,
"Cannot calculate routing power for bi-directional architectures");
return POWER_RET_CODE_ERRORS;
}
/* Calculate Power */
/* Routing */
power_usage_routing(&sub_power_usage, routing_arch, arch->Segments);
power_add_usage(&total_power, &sub_power_usage);
power_component_add_usage(&sub_power_usage, POWER_COMPONENT_ROUTING);
/* Clock */
power_usage_clock(&sub_power_usage, arch->clocks);
power_add_usage(&total_power, &sub_power_usage);
power_component_add_usage(&sub_power_usage, POWER_COMPONENT_CLOCK);
/* CLBs */
power_usage_blocks(&clb_power_usage);
power_add_usage(&total_power, &clb_power_usage);
power_component_add_usage(&clb_power_usage, POWER_COMPONENT_PB);
power_component_add_usage(&total_power, POWER_COMPONENT_TOTAL);
power_print_title(power_ctx.output->out, "Summary");
power_print_summary(power_ctx.output->out, vpr_setup);
/* Print Error & Warning Logs */
output_logs(power_ctx.output->out, power_ctx.output->logs,
power_ctx.output->num_logs);
//power_print_title(power_ctx.output->out, "Statistics");
//power_print_stats(power_ctx.output->out);
power_print_title(power_ctx.output->out, "Power Breakdown");
power_print_breakdown_summary(power_ctx.output->out);
power_print_title(power_ctx.output->out, "Power Breakdown by PB");
power_print_breakdown_pb(power_ctx.output->out);
//power_print_title(power_ctx.output->out, "Spice Comparison");
//power_print_spice_comparison();
t_end = clock();
*run_time_s = (float) (t_end - t_start) / CLOCKS_PER_SEC;
/* Return code */
if (power_ctx.output->logs[POWER_LOG_ERROR].num_messages) {
return POWER_RET_CODE_ERRORS;
} else if (power_ctx.output->logs[POWER_LOG_WARNING].num_messages) {
return POWER_RET_CODE_WARNINGS;
} else {
return POWER_RET_CODE_SUCCESS;
}
}
/**
* Prints the power usage for all components
* - fp: File descripter to print out to
*/
static void power_print_breakdown_summary(FILE * fp) {
power_print_breakdown_entry(fp, 0, POWER_BREAKDOWN_ENTRY_TYPE_TITLE, NULL,
0., 0., 0., NULL);
power_print_breakdown_component(fp, "Total", POWER_COMPONENT_TOTAL, 0);
fprintf(fp, "\n");
}
static void power_print_breakdown_pb(FILE * fp) {
fprintf(fp,
"This sections provides a detailed breakdown of power usage by PB (physical\n"
"block). For each PB, the power is listed, which is the sum power of all\n"
"instances of the block. It also indicates its percentage of total power (entire\n"
"FPGA), as well as the percentage of its power that is dynamic (vs. static). It\n"
"also indicates the method used for power estimation.\n\n"
"The data includes:\n"
"\tModes:\t\tWhen a pb contains multiple modes, each mode is "
"listed, with\n\t\t\t\tits power statistics.\n"
"\tBufs/Wires:\tPower of all local "
"buffers and local wire switching\n"
"\t\t\t\t(transistor-level estimation only).\n"
"\tInterc:\t\tPower of local interconnect multiplexers (transistor-\n"
"\t\t\t\tlevel estimation only)\n\n"
"Description of Estimation Methods:\n"
"\tTransistor Auto-Size: Transistor-level power estimation. Local buffers and\n"
"\t\twire lengths are automatically sized. This is the default estimation\n"
"\t\tmethod.\n"
"\tTransistor Specify-Size: Transistor-level power estimation. Local buffers\n"
"\t\tand wire lengths are only inserted where specified by the user in the\n"
"\t\tarchitecture file.\n"
"\tPin-Toggle: Dynamic power is calculated using enery-per-toggle of the PB\n"
"\t\tinput pins. Static power is absolute.\n"
"\tC-Internal: Dynamic power is calculated using an internal equivalent\n"
"\t\tcapacitance for PB type. Static power is absolute.\n"
"\tAbsolute: Dynamic and static power are absolutes from the architecture file.\n"
"\tSum of Children: Power of PB is only the sum of all child PBs; interconnect\n"
"\t\tbetween the PB and its children is ignored.\n"
"\tIgnore: Power of PB is ignored.\n\n\n");
power_print_breakdown_entry(fp, 0, POWER_BREAKDOWN_ENTRY_TYPE_TITLE, NULL,
0., 0., 0., NULL);
auto& device_ctx = g_vpr_ctx.device();
auto& power_ctx = g_vpr_ctx.power();
for (int type_idx = 0; type_idx < device_ctx.num_block_types; type_idx++) {
if (device_ctx.block_types[type_idx].pb_type) {
power_print_breakdown_pb_rec(power_ctx.output->out,
device_ctx.block_types[type_idx].pb_type, 0);
}
}
fprintf(fp, "\n");
}
/**
* Internal recurseive function, used by power_component_print_usage
*/
static void power_print_breakdown_component(FILE * fp, const char * name,
e_power_component_type type, int indent_level) {
auto& power_ctx = g_vpr_ctx.power();
power_print_breakdown_entry(fp, indent_level,
POWER_BREAKDOWN_ENTRY_TYPE_COMPONENT, name,
power_sum_usage(&power_ctx.by_component.components[type]),
power_sum_usage(
&power_ctx.by_component.components[POWER_COMPONENT_TOTAL]),
power_perc_dynamic(&power_ctx.by_component.components[type]), NULL);
switch (type) {
case (POWER_COMPONENT_TOTAL):
power_print_breakdown_component(fp, "Routing", POWER_COMPONENT_ROUTING,
indent_level + 1);
power_print_breakdown_component(fp, "PB Types", POWER_COMPONENT_PB,
indent_level + 1);
power_print_breakdown_component(fp, "Clock", POWER_COMPONENT_CLOCK,
indent_level + 1);
break;
case (POWER_COMPONENT_ROUTING):
power_print_breakdown_component(fp, "Switch Box",
POWER_COMPONENT_ROUTE_SB, indent_level + 1);
power_print_breakdown_component(fp, "Connection Box",
POWER_COMPONENT_ROUTE_CB, indent_level + 1);
power_print_breakdown_component(fp, "Global Wires",
POWER_COMPONENT_ROUTE_GLB_WIRE, indent_level + 1);
break;
case (POWER_COMPONENT_CLOCK):
/*
power_print_breakdown_component(fp, "Clock Buffers",
POWER_COMPONENT_CLOCK_BUFFER, indent_level + 1);
power_print_breakdown_component(fp, "Clock Wires",
POWER_COMPONENT_CLOCK_WIRE, indent_level + 1);
*/
break;
case (POWER_COMPONENT_PB):
power_print_breakdown_component(fp, "Primitives",
POWER_COMPONENT_PB_PRIMITIVES, indent_level + 1);
power_print_breakdown_component(fp, "Interc Structures",
POWER_COMPONENT_PB_INTERC_MUXES, indent_level + 1);
power_print_breakdown_component(fp, "Buffers and Wires",
POWER_COMPONENT_PB_BUFS_WIRE, indent_level + 1);
power_print_breakdown_component(fp, "Other Estimation Methods",
POWER_COMPONENT_PB_OTHER, indent_level + 1);
break;
default:
break;
}
}
static void power_print_breakdown_entry(FILE * fp, int indent,
e_power_breakdown_entry_type type, const char * name, float power,
float total_power, float perc_dyn, const char * method) {
const int buf_size = 32;
char buf[buf_size];
switch (type) {
case POWER_BREAKDOWN_ENTRY_TYPE_TITLE:
fprintf(fp, "%-*s%-12s%-12s%-12s%-12s\n\n", buf_size, "Component",
"Power (W)", "%-Total", "%-Dynamic", "Method");
break;
case POWER_BREAKDOWN_ENTRY_TYPE_MODE:
for (int i = 0; i < indent; i++)
buf[i] = ' ';
strcpy(buf + indent, "Mode:");
strncpy(buf + indent + 5, name, buf_size - indent - 6);
fprintf(fp, "%-*s%-12.4g%-12.4g%-12.4g\n", buf_size, buf, power,
power / total_power, perc_dyn);
break;
case POWER_BREAKDOWN_ENTRY_TYPE_COMPONENT:
case POWER_BREAKDOWN_ENTRY_TYPE_INTERC:
case POWER_BREAKDOWN_ENTRY_TYPE_BUFS_WIRES:
for (int i = 0; i < indent; i++)
buf[i] = ' ';
strncpy(buf + indent, name, buf_size - indent - 1);
buf[buf_size - 1] = '\0';
fprintf(fp, "%-*s%-12.4g%-12.4g%-12.4g\n", buf_size, buf, power,
power / total_power, perc_dyn);
break;
case POWER_BREAKDOWN_ENTRY_TYPE_PB:
for (int i = 0; i < indent; i++)
buf[i] = ' ';
strncpy(buf + indent, name, buf_size - indent - 1);
buf[buf_size - 1] = '\0';
fprintf(fp, "%-*s%-12.4g%-12.4g%-12.4g%-12s\n", buf_size, buf, power,
power / total_power, perc_dyn, method);
break;
default:
break;
}
}
|
Hrom131/terraform-provider-tokend
|
vendor/gitlab.com/tokend/regources/offer.go
|
<reponame>Hrom131/terraform-provider-tokend
package regources
import (
"gitlab.com/distributed_lab/logan/v3/fields"
"strconv"
)
type Offer struct {
PT string `json:"paging_token"`
OwnerID string `json:"owner_id"`
OfferID uint64 `json:"offer_id"`
OrderBookID uint64 `json:"order_book_id"`
BaseBalanceID string `json:"base_balance_id"`
QuoteBalanceID string `json:"quote_balance_id"`
Fee Amount `json:"fee"`
OfferData
}
// PagingToken implementation for hal.Pageable in Horizon
func (o Offer) PagingToken() string {
return strconv.FormatUint(o.OfferID, 10)
}
func (o Offer) GetLoganFields() map[string]interface{} {
return fields.Merge(map[string]interface{}{
"paging_token": o.PT,
"owner_id": o.OwnerID,
"offer_id": o.OfferID,
"order_book_id": o.OrderBookID,
"base_balance_id": o.BaseBalanceID,
"quote_balance_id": o.QuoteBalanceID,
"fee": o.Fee,
}, o.OfferData.GetLoganFields())
}
type OfferData struct {
BaseAssetCode string `json:"base_asset_code"`
QuoteAssetCode string `json:"quote_asset_code"`
IsBuy bool `json:"is_buy"`
BaseAmount Amount `json:"base_amount"`
QuoteAmount Amount `json:"quote_amount"`
Price Amount `json:"price"`
CreatedAt Time `json:"created_at"`
}
func (d OfferData) GetLoganFields() map[string]interface{} {
return map[string]interface{}{
"base_asset_code": d.BaseAssetCode,
"quote_asset_code": d.QuoteAssetCode,
"is_buy": d.IsBuy,
"base_amount": d.BaseAmount,
"quote_amount": d.QuoteAmount,
"price": d.Price,
"created_at": d.CreatedAt,
}
}
|
armanbilge/Laika
|
io/src/main/scala/laika/helium/builder/HeliumDirectives.scala
|
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.helium.builder
import cats.syntax.all._
import laika.ast.Path.Root
import laika.ast.{Path, TemplateSpanSequence, TemplateString}
import laika.config.LaikaKeys
import laika.directive.Templates
import laika.rewrite.Versions
import laika.rewrite.nav.{ConfigurablePathTranslator, TranslatorConfig, TranslatorSpec}
/**
* @author <NAME>
*/
private[helium] object HeliumDirectives {
val initVersions: Templates.Directive = Templates.create("heliumInitVersions") {
Templates.dsl.cursor.map { cursor =>
val versions = cursor.config.get[Versions].toOption
val html = versions.fold("") { versions =>
val isVersioned = cursor.config.get[Boolean](LaikaKeys.versioned).getOrElse(false)
val localRootPrefix = "../" * (cursor.path.depth - (if (isVersioned) 0 else 1))
val (currentPath, currentVersion) = if (isVersioned) {
val lookup: Path => Option[TranslatorSpec] = path =>
if (path == cursor.path) Some(TranslatorSpec(isStatic = false, isVersioned = false)) else None
val config = TranslatorConfig.readFrom(cursor.root.config).getOrElse(TranslatorConfig.empty)
val translator = ConfigurablePathTranslator(config, "html", "html", Root / "doc", lookup)
val path = translator.translate(cursor.path).toString
val version = versions.currentVersion.pathSegment
(path, version)
} else ("", "")
s"""<script>initVersions("$localRootPrefix", "$currentPath", "$currentVersion");</script>"""
}
TemplateString(html)
}
}
val initPreview: Templates.Directive = Templates.eval("heliumInitPreview") {
import Templates.dsl._
(positionalAttributes.as[String].widen, cursor).mapN { (targetIds, cursor) =>
val res = for {
enabled <- cursor.config.get(LaikaKeys.preview.enabled, false)
} yield {
val idArray = targetIds.mkString("[\"", "\",\"", "\"]")
if (enabled) TemplateString(s"""<script>initPreview($idArray);</script>""")
else TemplateSpanSequence.empty
}
res.leftMap(_.message)
}
}
val all: Seq[Templates.Directive] = Seq(initVersions, initPreview)
}
|
usenixatc2021/SoftRefresh_Scheduling
|
linsched-linsched-alpha/arch/arm/mach-s5pv210/include/mach/pm-core.h
|
/* linux/arch/arm/mach-s5pv210/include/mach/pm-core.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Based on arch/arm/mach-s3c2410/include/mach/pm-core.h,
* Copyright 2008 Simtec Electronics
* <NAME> <<EMAIL>>
* http://armlinux.simtec.co.uk/
*
* S5PV210 - PM core support for arch/arm/plat-s5p/pm.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
static inline void s3c_pm_debug_init_uart(void)
{
/* nothing here yet */
}
static inline void s3c_pm_arch_prepare_irqs(void)
{
__raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK);
__raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
}
static inline void s3c_pm_arch_stop_clocks(void)
{
/* nothing here yet */
}
static inline void s3c_pm_arch_show_resume_irqs(void)
{
/* nothing here yet */
}
static inline void s3c_pm_arch_update_uart(void __iomem *regs,
struct pm_uart_save *save)
{
/* nothing here yet */
}
static inline void s3c_pm_restored_gpios(void) { }
static inline void samsung_pm_saved_gpios(void) { }
|
borasoftware/balau
|
src/main/cpp/Balau/Application/Impl/PropertyTypeSpecificationVisitor.hpp
|
// @formatter:off
//
// Balau core C++ library
//
// Copyright (C) 2017 <NAME> (<EMAIL>)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef COM_BORA_SOFTWARE__BALAU_APPLICATION_IMPL__PROPERTY_TYPE_SPECIFICATION_VISITOR
#define COM_BORA_SOFTWARE__BALAU_APPLICATION_IMPL__PROPERTY_TYPE_SPECIFICATION_VISITOR
#include <Balau/Application/Impl/EnvironmentConfigurationBuilderUtils.hpp>
#include <Balau/Application/Impl/PropertyBindingBuilderFactory.hpp>
#include <Balau/Application/Impl/PropertyString.hpp>
#include <Balau/Container/ObjectTrie.hpp>
#include <Balau/Lang/Property/Util/PropertyVisitor.hpp>
#include <Balau/Resource/UriResolve.hpp>
#include <numeric>
namespace Balau {
namespace Impl {
class PropertyTypeSpecificationVisitorPayload : public Lang::Property::Payload {
public: std::stack<std::vector<PropertyBindingBuilderFactoryPtr>> nodeVectorStack;
public: std::vector<Impl::PropertyBindingBuilderFactoryPtr> bindingBuilderFactoriesVector;
};
template <typename Unused = int> class PropertyTypeSpecificationVisitorUtilities {
private: static std::regex defaultValueDelimiter;
friend class PropertyTypeSpecificationVisitor;
};
template <typename Unused>
std::regex PropertyTypeSpecificationVisitorUtilities<Unused>::defaultValueDelimiter("\\s*=\\s*");
//
// Used to build the binding builder factories from a type specification property file source.
//
class PropertyTypeSpecificationVisitor : public Lang::Property::PropertyVisitor {
public: PropertyTypeSpecificationVisitor(const std::shared_ptr<Resource::Uri> & uri_) {
uris.push(uri_);
}
public: void execute(Lang::Property::Payload & payload) {
auto properties = Lang::Property::PropertyParserService::parse(uris.top());
visit(payload, properties);
}
public: void visit(Lang::Property::Payload & payload, const Lang::Property::AST::Properties & object) {
auto & pl = static_cast<PropertyTypeSpecificationVisitorPayload &>(payload);
pl.nodeVectorStack.push(std::vector<PropertyBindingBuilderFactoryPtr>());
for (auto & node : object.getNodes()) {
node->visit(payload, *this);
}
for (auto & factory : pl.nodeVectorStack.top()) {
pl.bindingBuilderFactoriesVector.emplace_back(factory);
}
pl.nodeVectorStack.pop();
}
public: void visit(Lang::Property::Payload & payload, const Lang::Property::AST::ValueProperty & object) {
auto & pl = static_cast<PropertyTypeSpecificationVisitorPayload &>(payload);
auto splitValue = Util::Strings::split(object.getValue(), PropertyTypeSpecificationVisitorUtilities<>::defaultValueDelimiter);
std::string typeSpecification;
std::string defaultValue;
switch (splitValue.size()) {
case 0: {
ThrowBalauException(
Exception::EnvironmentConfigurationException
, ::toString("Missing type specification for property ", object.getName(), " sourced from ", *uris.top(), ".")
);
}
case 1: {
typeSpecification = std::string(splitValue[0]);
break;
}
case 2: {
typeSpecification = std::string(splitValue[0]);
defaultValue = std::string(splitValue[1]);
break;
}
default: {
// The multiple default value text contains one or more '='.. combine into a single piece of text.
// It is up to the fromString function to determine if the text is value or not.
typeSpecification = std::string(splitValue[0]);
const size_t length = std::accumulate(
splitValue.begin(), splitValue.end(), 0U, [] (size_t l, auto & s) { return l + s.length(); }
) - splitValue[0].length();
defaultValue = std::string(splitValue[1].data(), length);
}
}
auto factory = getBindingBuilderFactory(typeSpecification, std::string(object.getName()), defaultValue);
pl.nodeVectorStack.top().emplace_back(factory);
}
public: void visit(Lang::Property::Payload & payload, const Lang::Property::AST::CompositeProperty & object) {
auto & pl = static_cast<PropertyTypeSpecificationVisitorPayload &>(payload);
pl.nodeVectorStack.push(std::vector<PropertyBindingBuilderFactoryPtr>());
auto factory = PropertyBindingBuilderFactoryPtr(new CompositePropertyBindingBuilderFactory(std::string(object.getName())));
auto composite = std::static_pointer_cast<CompositePropertyBindingBuilderFactory>(factory);
for (auto & childNode : object.getNodes()) {
childNode->visit(payload, *this);
}
for (auto & childFactory : pl.nodeVectorStack.top()) {
EnvironmentConfigurationBuilderUtils::addToComposite(*composite, childFactory);
EnvironmentConfigurationBuilderUtils::incrementLevel(childFactory);
}
pl.nodeVectorStack.pop();
pl.nodeVectorStack.top().emplace_back(composite);
}
public: void visit(Lang::Property::Payload & payload, const Lang::Property::AST::IncludePropertyNode & object) {
std::shared_ptr<Resource::Uri> resolvedUri = Resource::UriResolve::resolve(*uris.top(), Util::Strings::trim(object.getText()));
auto properties = Lang::Property::PropertyParserService::parse(resolvedUri);
uris.push(resolvedUri);
for (auto & node : properties.getNodes()) {
node->visit(payload, *this);
}
uris.pop();
}
public: void visit(Lang::Property::Payload & payload, const Lang::Property::AST::CommentPropertyNode & object) {
// NOP
}
public: void visit(Lang::Property::Payload & payload, const Lang::Property::AST::EmptyLinePropertyNode & object) {
// NOP
}
private: std::stack<std::shared_ptr<Resource::Uri>> uris;
};
} // namespace Impl
} // namespace Balau
#endif // COM_BORA_SOFTWARE__BALAU_APPLICATION_IMPL__PROPERTY_TYPE_SPECIFICATION_VISITOR
|
code-review-doctor/lite-frontend-1
|
unit_tests/caseworker/flags/test_services.py
|
<reponame>code-review-doctor/lite-frontend-1<filename>unit_tests/caseworker/flags/test_services.py<gh_stars>0
import pytest
from caseworker.flags.services import _add_flag_permissions
@pytest.mark.parametrize(
"permissions, num_unremovable",
[
([], 3),
(["REMOVE_AUTHORISED_COUNTERSIGNER_FLAGS"], 2),
(["REMOVE_HEAD_OF_LICENSING_UNIT_FLAGS"], 1),
(
[
"REMOVE_AUTHORISED_COUNTERSIGNER_FLAGS",
"REMOVE_HEAD_OF_LICENSING_UNIT_FLAGS",
],
0,
),
],
)
def test_get_flags_permissions(permissions, num_unremovable):
data = [
{
"id": "00000000-0000-0000-0000-000000000014",
"name": "Flag 2",
"colour": "default",
"level": "Case",
"label": None,
"status": "Active",
"priority": 0,
"blocks_finalising": False,
"removable_by": "Anyone",
"team": {"id": "00000000-0000-0000-0000-000000000001", "name": "Admin"},
},
{
"id": "00000000-0000-0000-0000-000000000015",
"name": "Flag 1",
"colour": "default",
"level": "Case",
"label": None,
"status": "Active",
"priority": 0,
"blocks_finalising": True,
"removable_by": "Anyone",
"team": {"id": "00000000-0000-0000-0000-000000000001", "name": "Admin"},
},
{
"id": "00000000-0000-0000-0000-000000000016",
"name": "<NAME>",
"colour": "default",
"level": "Case",
"label": None,
"status": "Active",
"priority": 0,
"blocks_finalising": True,
"removable_by": "Authorised countersigner",
"team": {"id": "00000000-0000-0000-0000-000000000001", "name": "Admin"},
},
{
"id": "00000000-0000-0000-0000-000000000017",
"name": "<NAME>",
"colour": "default",
"level": "Case",
"label": None,
"status": "Active",
"priority": 0,
"blocks_finalising": True,
"removable_by": "Head of Licensing Unit countersigner",
"team": {"id": "00000000-0000-0000-0000-000000000001", "name": "Admin"},
},
{
"id": "00000000-0000-0000-0000-000000000018",
"name": "<NAME>",
"colour": "default",
"level": "Case",
"label": None,
"status": "Active",
"priority": 0,
"blocks_finalising": True,
"removable_by": "Head of Licensing Unit countersigner",
"team": {"id": "00000000-0000-0000-0000-000000000001", "name": "Admin"},
},
]
flags = _add_flag_permissions(data, permissions)
unremovable_flags = [flag for flag in flags if flag["cannot_remove"]]
assert len(unremovable_flags) == num_unremovable
|
parkmap/hwplib
|
src/main/java/kr/dogfoot/hwplib/util/binary/BitFlag.java
|
package kr.dogfoot.hwplib.util.binary;
/**
* 이진 연산을 하는 객체
*
* @author neolord
*/
public class BitFlag {
/**
* mask에서 position번째 비트가 1인지 여부를 반환한다.
*
* @param mask long 값
* @param position 비트 위치
* @return mask값에서 position번째 비트가 1인지 여부
*/
public static boolean get(long mask, int position) {
long mask2 = 1 << position;
return (mask & mask2) == mask2;
}
/**
* mask에서 position번째 비트가 1인지 여부를 반환한다.
*
* @param mask int 값
* @param position 비트 위치
* @return mask값에서 position번째 비트가 1인지 여부
*/
public static boolean get(int mask, int position) {
int mask2 = 1 << position;
return (mask & mask2) == mask2;
}
/**
* mask에서 position번째 비트가 1인지 여부를 반환한다.
*
* @param mask short 값
* @param position 비트 위치
* @return mask값에서 position번째 비트가 1인지 여부
*/
public static boolean get(short mask, int position) {
short mask2 = (short) (1 << position);
return (mask & mask2) == mask2;
}
/**
* mask의 position번째 비트을 flag값이 true일때 1, false일때 0으로 설정한다..
*
* @param mask 이전 long 값
* @param position 비트 위치
* @param flag bool 값
* @return mask의 position번째 비트을 flag값이 true일때 1, false일때 0으로 설정한 결과값
*/
public static long set(long mask, int position, boolean flag) {
if (flag) {
mask = mask | (0x1 << position);
} else {
if ((mask & (0x1 << position)) != 0) {
mask = mask ^ (0x1 << position);
}
}
return mask;
}
/**
* mask의 position번째 비트을 flag값이 true일때 1, false일때 0으로 설정한다..
*
* @param mask 이전 int 값
* @param position 비트 위치
* @param flag bool 값
* @return mask의 position번째 비트을 flag값이 true일때 1, false일때 0으로 설정한 결과값
*/
public static int set(int mask, int position, boolean flag) {
if (flag) {
mask = mask | (0x1 << position);
} else {
if ((mask & (0x1 << position)) != 0) {
mask = mask ^ (0x1 << position);
}
}
return mask;
}
/**
* mask의 position번째 비트을 flag값이 true일때 1, false일때 0으로 설정한다..
*
* @param mask 이전 short 값
* @param position 비트 위치
* @param flag bool 값
* @return mask의 position번째 비트을 flag값이 true일때 1, false일때 0으로 설정한 결과값
*/
public static short set(short mask, int position, boolean flag) {
if (flag) {
mask = (short) (mask | (0x1 << position));
} else {
if ((mask & (0x1 << position)) != 0) {
mask = (short) (mask ^ (0x1 << position));
}
}
return mask;
}
/**
* mask 값에서 start부터 end까지의 비트의 값을 반환한다.
*
* @param mask long 값
* @param start 비트 시작 위치
* @param end 비트 끝 위치
* @return mask 값에서 start부터 end까지 비트의 정수값
*/
public static long get(long mask, int start, int end) {
long ret = 0;
ret = mask >> start;
long temp = 0;
for (int nIndex = 0; nIndex < end - start + 1; nIndex++) {
temp = temp << 1;
temp += 1;
}
ret = ret & temp;
return ret;
}
/**
* mask 값에서 start부터 end까지의 비트의 값을 반환한다.
*
* @param mask int 값
* @param start 비트 시작 위치
* @param end 비트 끝 위치
* @return mask 값에서 start부터 end까지 비트의 정수값
*/
public static int get(int mask, int start, int end) {
int ret = 0;
ret = mask >> start;
int temp = 0;
for (int nIndex = 0; nIndex < end - start + 1; nIndex++) {
temp = temp << 1;
temp += 1;
}
ret = ret & temp;
return ret;
}
/**
* mask 값에서 start부터 end까지의 비트의 값을 반환한다.
*
* @param mask short 값
* @param start 비트 시작 위치
* @param end 비트 끝 위치
* @return mask 값에서 start부터 end까지 비트의 정수값
*/
public static short get(short mask, int start, int end) {
short ret = 0;
ret = (short) (mask >> start);
int temp = 0;
for (int nIndex = 0; nIndex < end - start + 1; nIndex++) {
temp = temp << 1;
temp += 1;
}
ret = (short) (ret & temp);
return ret;
}
/**
* mask값에서 start부터 end까지의 비트를 value값으로 설정한다.
*
* @param mask 이전 long 값
* @param start 비트 시작 위치
* @param end 비트 끝 위치
* @param value 설정값
* @return mask값에서 start부터 end까지의 비트를 value값으로 설정한 결과값
*/
public static long set(long mask, int start, int end, int value) {
for (int position = start; position <= end; position++) {
boolean flag = BitFlag.get(value, position - start);
mask = BitFlag.set(mask, position, flag);
}
return mask;
}
/**
* mask값에서 start부터 end까지의 비트를 value값으로 설정한다.
*
* @param mask 이전 int 값
* @param start 비트 시작 위치
* @param end 비트 끝 위치
* @param value 설정값
* @return mask값에서 start부터 end까지의 비트를 value값으로 설정한 결과값
*/
public static int set(int mask, int start, int end, int value) {
for (int position = start; position <= end; position++) {
boolean flag = BitFlag.get(value, position - start);
mask = BitFlag.set(mask, position, flag);
}
return mask;
}
/**
* mask값에서 start부터 end까지의 비트를 value값으로 설정한다.
*
* @param mask 이전 short 값
* @param start 비트 시작 위치
* @param end 비트 끝 위치
* @param value 설정값
* @return mask값에서 start부터 end까지의 비트를 value값으로 설정한 결과값
*/
public static short set(short mask, int start, int end, int value) {
for (int position = start; position <= end; position++) {
boolean flag = BitFlag.get(value, position - start);
mask = BitFlag.set(mask, position, flag);
}
return mask;
}
}
|
tsenmu/leetcode
|
burst-balloons/solution-1.cpp
|
<reponame>tsenmu/leetcode
class Solution {
public:
int dfs(vector<int>& nums, vector<vector<int>>& mem, int left, int right) {
if (right - left < 2) {
return 0;
}
if (mem[left][right] != -1) {
return mem[left][right];
}
int ans = 0;
for (int i = left + 1; i < right; ++i) {
ans = max(ans, nums[i] * nums[left] * nums[right] +
dfs(nums, mem, left, i) + dfs(nums, mem, i, right));
}
return mem[left][right] = ans;
}
int maxCoins(vector<int>& input) {
vector<int> nums(input.size() + 2, 0);
nums[0] = nums[input.size() + 1] = 1;
for (int i = 0; i < input.size(); ++i) {
nums[i + 1] = input[i];
}
int n = nums.size();
vector<vector<int>> mem(n, vector<int>(n, -1));
return dfs(nums, mem, 0, n - 1);
}
};
|
ceefour/java-web-services-training
|
jws103-8/src/main/java/com/hendyirawan/jws1038/Query.java
|
<gh_stars>1-10
package com.hendyirawan.jws1038;
import com.coxautodev.graphql.tools.GraphQLQueryResolver;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.List;
@Component
public class Query implements GraphQLQueryResolver {
@Autowired
private CountryRepository countryRepo;
public List<Country> countries() {
return countryRepo.findAll();
}
}
|
wenwei8268/Alink
|
core/src/main/java/com/alibaba/alink/pipeline/ModelExporterUtils.java
|
package com.alibaba.alink.pipeline;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.ml.api.misc.param.Params;
import org.apache.flink.shaded.guava18.com.google.common.collect.Lists;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.type.TypeReference;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.types.Row;
import org.apache.flink.util.Preconditions;
import com.alibaba.alink.common.MLEnvironmentFactory;
import com.alibaba.alink.common.io.filesystem.AkStream;
import com.alibaba.alink.common.io.filesystem.AkStream.AkReader;
import com.alibaba.alink.common.io.filesystem.AkUtils;
import com.alibaba.alink.common.io.filesystem.AkUtils.FileProcFunction;
import com.alibaba.alink.common.io.filesystem.FilePath;
import com.alibaba.alink.common.mapper.ComboMapper;
import com.alibaba.alink.common.mapper.ComboModelMapper;
import com.alibaba.alink.common.mapper.Mapper;
import com.alibaba.alink.common.mapper.MapperChain;
import com.alibaba.alink.common.mapper.ModelMapper;
import com.alibaba.alink.common.mapper.PipelineModelMapper;
import com.alibaba.alink.common.utils.DataSetConversionUtil;
import com.alibaba.alink.common.utils.JsonConverter;
import com.alibaba.alink.common.utils.TableUtil;
import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.source.MemSourceBatchOp;
import com.alibaba.alink.operator.batch.source.TableSourceBatchOp;
import com.alibaba.alink.operator.common.io.types.FlinkTypeConverter;
import com.alibaba.alink.params.ModelStreamScanParams;
import com.alibaba.alink.pipeline.recommendation.BaseRecommender;
import com.alibaba.alink.pipeline.recommendation.RecommenderUtil;
import org.apache.commons.lang3.ArrayUtils;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static com.alibaba.alink.common.mapper.PipelineModelMapper.getExtendModelSchema;
/**
* A utility class for exporting {@link PipelineModel}.
*/
public class ModelExporterUtils {
// meta col
private static final TypeInformation <?>[] PREFIX_TYPES = new TypeInformation <?>[] {Types.STRING};
private static final String MODEL_COL_PREFIX = "p";
private static final String META_DELIMITER = "__ALINK_META_ROW_DELIMITER__";
static final String ID_COL_NAME = "id";
private interface Node {
int[] getChildren();
}
private static <T extends Node> void preOrder(T[] stages, Consumer <Integer> consumer) {
LinkedList <Integer> stack = new LinkedList <>();
stack.push(0);
while (!stack.isEmpty()) {
int lp = stack.pop();
// visit
consumer.accept(lp);
T node = stages[lp];
if (node.getChildren() != null) {
// left to right
for (int i = node.getChildren().length - 1; i >= 0; --i) {
stack.push(node.getChildren()[i]);
}
}
}
}
private static <T extends Node> void postOrder(T[] stages, Consumer <Integer> consumer) {
LinkedList <Integer> stack = new LinkedList <>();
stack.push(0);
int lastLp = -1;
while (!stack.isEmpty()) {
int lp = stack.peek();
T node = stages[lp];
if (node.getChildren() != null && lastLp != node.getChildren()[0]) {
// right to left
for (int i = 0; i < node.getChildren().length; ++i) {
stack.push(node.getChildren()[i]);
}
} else {
// visit
consumer.accept(lp);
lastLp = lp;
stack.pop();
}
}
}
public static class StageNode implements Node {
// valid information
// the identifier of stage.
public String identifier;
// the params in the stage.
public Params params;
// the index of schema in the root node.
public int[] schemaIndices;
// the column names of stage. only on the leaf node.
public String[] colNames;
// the parent node's index.
public int parent;
// Temporary
// stage in memory
public transient PipelineStageBase <?> stage;
// index in the parent's array of children
public transient int inParentIndex;
// index array of the child.
public transient int[] children;
// Temporary size of children.
public transient int sizeChildren;
// Temporary types for construct schema.
public transient TypeInformation <?>[] types;
public StageNode() {
}
public StageNode(PipelineStageBase <?> stage, int parent, int inParentIndex, int[] children) {
this.stage = stage;
this.parent = parent;
this.inParentIndex = inParentIndex;
this.children = children;
if (stage != null) {
identifier = stage.getClass().getName();
params = stage.getParams();
}
}
@Override
public int[] getChildren() {
return children;
}
}
// serialize
private static int findFirst(TypeInformation <?>[] all, int start, TypeInformation <?> t) {
for (int i = start; i < all.length; ++i) {
if (t.equals(all[i])) {
return i;
}
}
return -1;
}
public static Tuple2 <TypeInformation <?>[], int[]> mergeType(
TypeInformation <?>[] a, TypeInformation <?>[] b) {
if (a == null) {
return Tuple2.of(b, initialSchemaIndices(b));
}
if (b == null) {
return Tuple2.of(a, initialSchemaIndices(a));
}
List <TypeInformation <?>> merged = new ArrayList <>(Arrays.asList(a));
Map <TypeInformation <?>, Integer> bCurrentIdx = new HashMap <>();
int[] bIdx = new int[b.length];
for (int i = 0; i < b.length; ++i) {
Integer currentIdx = bCurrentIdx.getOrDefault(b[i], 0);
if (currentIdx < 0) {
bIdx[i] = merged.size();
merged.add(b[i]);
continue;
}
int found = findFirst(a, currentIdx, b[i]);
if (found < 0) {
bIdx[i] = merged.size();
merged.add(b[i]);
bCurrentIdx.put(b[i], found);
} else {
bIdx[i] = found;
bCurrentIdx.put(b[i], found + 1);
}
}
return Tuple2.of(merged.toArray(new TypeInformation <?>[0]), bIdx);
}
private static TypeInformation <?>[] getTypes(PipelineStageBase <?> stage) {
if (stage instanceof PipelineModel) {
throw new IllegalArgumentException(
"Error pipeline stage. Could not get column types from pipeline model."
);
} else if (stage instanceof ModelBase) {
return ((ModelBase <?>) stage).getModelData().getColTypes();
} else if (stage instanceof Pipeline) {
throw new IllegalArgumentException(
"Error pipeline stage. Could not get column types from pipeline."
);
} else {
return null;
}
}
private static String[] getColNames(PipelineStageBase <?> stage) {
if (stage instanceof PipelineModel) {
throw new IllegalArgumentException(
"Error pipeline stage. Could not get column names from pipeline model."
);
} else if (stage instanceof ModelBase) {
return ((ModelBase <?>) stage).getModelData().getColNames();
} else if (stage instanceof Pipeline) {
throw new IllegalArgumentException(
"Error pipeline stage. Could not get column names from pipeline."
);
} else {
return null;
}
}
public static int[] initialSchemaIndices(TypeInformation <?>[] types) {
if (types == null) {
return null;
}
int[] schemaIndices = new int[types.length];
for (int i = 0; i < types.length; ++i) {
schemaIndices[i] = i;
}
return schemaIndices;
}
private static String[] appendPrefix(String prefix, int len) {
String[] result = new String[len];
for (int i = 0; i < len; ++i) {
result[i] = prefix + i;
}
return result;
}
private static StageNode[] postOrderCreateSchema(StageNode[] stages) {
if (stages == null || stages.length == 0) {
return stages;
}
postOrder(stages, lp -> {
StageNode node = stages[lp];
if (node.children != null) {
StageNode firstNode = stages[node.children[0]];
TypeInformation <?>[] first = firstNode.types;
firstNode.schemaIndices = initialSchemaIndices(first);
for (int i = 1; i < node.children.length; ++i) {
StageNode currentNode = stages[node.children[i]];
Tuple2 <TypeInformation <?>[], int[]> merged
= mergeType(first, currentNode.types);
currentNode.schemaIndices = merged.f1;
first = merged.f0;
}
node.types = first;
} else {
node.types = getTypes(node.stage);
node.colNames = getColNames(node.stage);
}
});
stages[0].schemaIndices = initialSchemaIndices(stages[0].types);
return stages;
}
private static TypeInformation <?>[] preOrderCorrectIndex(StageNode[] stages, TypeInformation <?>[] prefixTypes) {
if (stages == null || stages.length == 0 || stages[0].schemaIndices == null) {
return prefixTypes;
}
Tuple2 <TypeInformation <?>[], int[]> merged = mergeType(prefixTypes, stages[0].types);
for (int i = 0; i < stages[0].schemaIndices.length; ++i) {
stages[0].schemaIndices[i] = merged.f1[stages[0].schemaIndices[i]];
}
preOrder(stages, lp -> {
StageNode node = stages[lp];
if (node.parent >= 0 && node.schemaIndices != null) {
for (int i = 0; i < node.schemaIndices.length; ++i) {
node.schemaIndices[i]
= stages[node.parent].schemaIndices[node.schemaIndices[i]];
}
}
});
return merged.f0;
}
private static StageNode[] preOrderConstructStages(List <PipelineStageBase <?>> stages) {
if (stages == null || stages.isEmpty()) {
return null;
}
int numStages = stages.size();
List <StageNode> tree = new ArrayList <>(numStages);
tree.add(new StageNode(null, -1, -1, new int[stages.size()]));
LinkedList <StageNode> stack = new LinkedList <>();
for (int i = numStages - 1; i >= 0; --i) {
stack.push(new StageNode(stages.get(i), 0, i, null));
}
while (!stack.isEmpty()) {
StageNode node = stack.pop();
int index = tree.size();
tree.get(node.parent).children[node.inParentIndex] = index;
tree.add(node);
if (node.stage instanceof PipelineModel) {
PipelineModel pipelineModel = (PipelineModel) node.stage;
node.children = new int[pipelineModel.transformers.length];
for (int i = pipelineModel.transformers.length - 1; i >= 0; --i) {
stack.push(new StageNode(pipelineModel.transformers[i], index, i, null));
}
} else if (node.stage instanceof Pipeline) {
Pipeline pipeline = (Pipeline) node.stage;
node.children = new int[pipeline.stages.size()];
for (int i = pipeline.stages.size() - 1; i >= 0; --i) {
stack.push(new StageNode(pipeline.stages.get(i), index, i, null));
}
}
}
return tree.toArray(new StageNode[0]);
}
private static Row serializeMeta(StageNode[] tree, int len, Params params) {
Map <String, String> meta = new HashMap <>();
meta.put("stages", JsonConverter.toJson(tree));
Row metaRow = new Row(len);
metaRow.setField(0, -1L);
metaRow.setField(1, JsonConverter.toJson(meta) + META_DELIMITER + params.toJson());
return metaRow;
}
private static BatchOperator <?> preOrderSerialize(
StageNode[] stages, BatchOperator <?> packed, final TableSchema schema, final int offset) {
if (stages == null || stages.length == 0) {
return packed;
}
final int len = schema.getFieldTypes().length;
final long[] id = new long[1];
final BatchOperator <?>[] localPacked = new BatchOperator <?>[] {packed};
Consumer <Integer> serializeModelData = lp -> {
StageNode stageNode = stages[lp];
if (stageNode.parent >= 0
&& stageNode.schemaIndices != null
&& stageNode.children == null
&& stageNode.stage instanceof ModelBase <?>) {
final long localId = id[0];
final int[] localSchemaIndices = stageNode.schemaIndices;
DataSet <Row> modelData =
((ModelBase <?>) stageNode.stage)
.getModelData()
.getDataSet()
.map(new MapFunction <Row, Row>() {
private static final long serialVersionUID = 5218543921039328938L;
@Override
public Row map(Row value) {
Row ret = new Row(len);
ret.setField(0, localId);
for (int i = 0; i < localSchemaIndices.length; ++i) {
ret.setField(localSchemaIndices[i] + offset, value.getField(i));
}
return ret;
}
})
.returns(new RowTypeInfo(schema.getFieldTypes()));
localPacked[0] = new TableSourceBatchOp(
DataSetConversionUtil.toTable(
localPacked[0].getMLEnvironmentId(),
localPacked[0].getDataSet().union(modelData),
schema
)
).setMLEnvironmentId(localPacked[0].getMLEnvironmentId());
}
id[0]++;
};
preOrder(stages, serializeModelData);
return localPacked[0];
}
static BatchOperator <?> serializePipelineStages(List <PipelineStageBase <?>> stages, Params params) {
StageNode[] stageNodes = preOrderConstructStages(stages);
TypeInformation <?>[] typesWithPrefix
= preOrderCorrectIndex(postOrderCreateSchema(stageNodes), PREFIX_TYPES);
TableSchema finalSchema = new TableSchema(
ArrayUtils.addAll(new String[] {ID_COL_NAME}, appendPrefix(MODEL_COL_PREFIX, typesWithPrefix.length)),
ArrayUtils.addAll(new TypeInformation[] {Types.LONG}, typesWithPrefix)
);
Row metaRow = serializeMeta(stageNodes, finalSchema.getFieldTypes().length, params);
return preOrderSerialize(
stageNodes,
new MemSourceBatchOp(Collections.singletonList(metaRow), finalSchema)
.setMLEnvironmentId(stages.size() > 0 ? stages.get(0).getMLEnvironmentId() :
MLEnvironmentFactory.DEFAULT_ML_ENVIRONMENT_ID),
finalSchema,
1);
}
// deserialize
private static Tuple2 <StageNode[], Params> deserializeMeta(Row metaRow, TableSchema schema, final int offset) {
String[] metaAndParams = ((String) metaRow.getField(1)).split(META_DELIMITER);
Map <String, String> meta = JsonConverter.fromJson(metaAndParams[0]
, new TypeReference <Map <String, String>>() {}.getType()
);
Params pipeParams = metaAndParams.length == 2 ? Params.fromJson(metaAndParams[1]) : new Params();
StageNode[] stages = JsonConverter.fromJson(meta.get("stages"), StageNode[].class);
if (stages == null || stages.length == 0) {
return Tuple2.of(stages, pipeParams);
}
TypeInformation <?>[] types = schema.getFieldTypes();
for (StageNode stage : stages) {
if (stage.parent >= 0) {
stages[stage.parent].sizeChildren++;
}
}
for (int i = 0; i < stages.length; ++i) {
if (stages[i].sizeChildren > 0) {
stages[i].children = new int[stages[i].sizeChildren];
stages[i].sizeChildren = 0;
}
if (stages[i].parent >= 0) {
stages[stages[i].parent].children[stages[stages[i].parent].sizeChildren++] = i;
}
if (stages[i].schemaIndices != null) {
int len = stages[i].schemaIndices.length;
stages[i].types = new TypeInformation[len];
for (int j = 0; j < len; ++j) {
stages[i].types[j] = types[stages[i].schemaIndices[j] + offset];
}
}
}
return Tuple2.of(stages, pipeParams);
}
private static <T extends PipelineStageBase <?>> List <T> postOrderDeserialize(
StageNode[] stages, BatchOperator <?> unpacked, final TableSchema schema, final int offset) {
if (stages == null || stages.length == 0) {
return new ArrayList <>();
}
final long[] id = new long[] {stages.length - 1};
final BatchOperator <?>[] deserialized = new BatchOperator <?>[] {unpacked};
List <T> result = new ArrayList <>();
Consumer <Integer> deserializer = lp -> {
StageNode stageNode = stages[lp];
try {
if (stageNode.identifier != null) {
stageNode.stage = (PipelineStageBase <?>) Class
.forName(stageNode.identifier)
.getConstructor(Params.class)
.newInstance(stageNode.params);
}
} catch (ClassNotFoundException
| NoSuchMethodException
| InstantiationException
| IllegalAccessException
| InvocationTargetException ex) {
throw new IllegalArgumentException(ex);
}
// leaf node.
if (stageNode.children == null) {
if (stageNode.parent >= 0
&& stageNode.schemaIndices != null
&& stageNode.colNames != null) {
final long localId = id[0];
final int[] localSchemaIndices = stageNode.schemaIndices;
BatchOperator <?> model = new TableSourceBatchOp(
DataSetConversionUtil.toTable(
deserialized[0].getMLEnvironmentId(),
deserialized[0]
.getDataSet()
.filter(new FilterFunction <Row>() {
private static final long serialVersionUID = 355683133177055891L;
@Override
public boolean filter(Row value) {
return value.getField(0).equals(localId);
}
})
.map(new MapFunction <Row, Row>() {
private static final long serialVersionUID = -4286266312978550037L;
@Override
public Row map(Row value) throws Exception {
Row ret = new Row(localSchemaIndices.length);
for (int i = 0; i < localSchemaIndices.length; ++i) {
ret.setField(i, value.getField(localSchemaIndices[i] + offset));
}
return ret;
}
})
.returns(new RowTypeInfo(stageNode.types)),
new TableSchema(stageNode.colNames, stageNode.types)
)
).setMLEnvironmentId(deserialized[0].getMLEnvironmentId());
((ModelBase <?>) stageNode.stage).setModelData(model);
deserialized[0] = new TableSourceBatchOp(
DataSetConversionUtil.toTable(
deserialized[0].getMLEnvironmentId(),
deserialized[0]
.getDataSet()
.filter(new FilterFunction <Row>() {
private static final long serialVersionUID = -2803966833769030531L;
@Override
public boolean filter(Row value) {
return !value.getField(0).equals(localId);
}
}),
schema
)
).setMLEnvironmentId(deserialized[0].getMLEnvironmentId());
}
} else {
List <T> pipelineStageBases = new ArrayList <>();
for (int i = 0; i < stageNode.children.length; ++i) {
pipelineStageBases.add((T) stages[stageNode.children[i]].stage);
}
if (stageNode.stage == null) {
result.addAll(pipelineStageBases);
return;
}
if (stageNode.stage instanceof Pipeline) {
stageNode.stage = new Pipeline(pipelineStageBases.toArray(new PipelineStageBase <?>[0]));
} else if (stageNode.stage instanceof PipelineModel) {
stageNode.stage = new PipelineModel(pipelineStageBases.toArray(new TransformerBase <?>[0]));
} else {
throw new IllegalArgumentException("Unsupported stage.");
}
}
id[0]--;
};
postOrder(stages, deserializer);
return result;
}
private static <T extends PipelineStageBase <?>> List <T> postOrderUnPackWithoutModelData(StageNode[] stages) {
if (stages == null || stages.length == 0) {
return null;
}
LinkedList <Integer> stack = new LinkedList <>();
stack.push(0);
long id = stages.length - 1;
int lastLp = -1;
while (!stack.isEmpty()) {
int lp = stack.peek();
if (stages[lp].children != null && lastLp != stages[lp].children[0]) {
for (int i = 0; i < stages[lp].children.length; ++i) {
stack.push(stages[lp].children[i]);
}
} else {
try {
if (stages[lp].identifier != null) {
stages[lp].stage = (PipelineStageBase <?>) Class
.forName(stages[lp].identifier)
.getConstructor(Params.class)
.newInstance(stages[lp].params);
}
} catch (ClassNotFoundException | NoSuchMethodException | InstantiationException |
IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
// leaf node.
if (stages[lp].children != null) {
List <T> pipelineStageBases = new ArrayList <>();
for (int i = 0; i < stages[lp].children.length; ++i) {
pipelineStageBases.add((T) stages[stages[lp].children[i]].stage);
}
if (stages[lp].stage == null) {
return pipelineStageBases;
}
if (stages[lp].stage instanceof Pipeline) {
stages[lp].stage = new Pipeline(pipelineStageBases.toArray(new PipelineStageBase <?>[0]));
} else if (stages[lp].stage instanceof PipelineModel) {
stages[lp].stage = new PipelineModel(pipelineStageBases.toArray(new TransformerBase <?>[0]));
} else {
throw new IllegalArgumentException("Unsupported stage.");
}
}
id--;
lastLp = lp;
stack.pop();
}
}
return null;
}
public static Tuple3 <StageNode[], TableSchema, Row> extractStagesMeta(List <PipelineStageBase <?>> stages) {
StageNode[] stageNodes = preOrderConstructStages(stages);
TypeInformation <?>[] typesWithPrefix
= preOrderCorrectIndex(postOrderCreateSchema(stageNodes), PREFIX_TYPES);
TableSchema finalSchema = new TableSchema(
ArrayUtils.addAll(new String[] {ID_COL_NAME}, appendPrefix(MODEL_COL_PREFIX, typesWithPrefix.length)),
ArrayUtils.addAll(new TypeInformation[] {Types.LONG}, typesWithPrefix)
);
Row metaRow = serializeMeta(stageNodes, finalSchema.getFieldTypes().length, new Params());
return Tuple3.of(stageNodes, finalSchema, metaRow);
}
static Tuple2 <StageNode[], Params> collectMetaFromOp(BatchOperator <?> packed) {
return deserializeMeta(
packed.filter(String.format("%s < 0", ID_COL_NAME)).collect().get(0),
packed.getSchema(), 1
);
}
static StageNode[] deserializePipelineStagesFromMeta(Row metaRow, TableSchema schema) {
return deserializeMeta(metaRow, schema, 1).f0;
}
static Tuple2 <StageNode[], Params> deserializePipelineStagesAndParamsFromMeta(Row metaRow, TableSchema schema) {
return deserializeMeta(metaRow, schema, 1);
}
static <T extends PipelineStageBase <?>> List <T> fillPipelineStages(
BatchOperator <?> packed, StageNode[] stages, TableSchema schema) {
return postOrderDeserialize(stages, packed, schema, 1);
}
public static <T extends PipelineStageBase <?>> List <T> constructPipelineStagesFromMeta(
Row metaRow, TableSchema schema) {
StageNode[] stages = deserializeMeta(metaRow, schema, 1).f0;
return postOrderUnPackWithoutModelData(stages);
}
private static class MetaReader implements FileProcFunction <FilePath, Boolean> {
private Row meta;
private TableSchema schema;
@Override
public Boolean apply(FilePath filePath) throws IOException {
boolean fileExists = filePath.getFileSystem().exists(filePath.getPath());
if (!fileExists) {
throw new IllegalArgumentException("Could not find the file: " + filePath.getPathStr());
}
AkStream stream = new AkStream(filePath);
schema = TableUtil.schemaStr2Schema(stream.getAkMeta().schemaStr);
final int idColIndex = TableUtil.findColIndexWithAssertAndHint(schema, ID_COL_NAME);
try (AkReader reader = stream.getReader()) {
for (Row r : reader) {
if ((Long) r.getField(idColIndex) < 0) {
meta = r;
// find the meta row, interrupt the loop.
return false;
}
}
// continue to scan folder
return true;
}
}
public Row getMeta() {
return meta;
}
public TableSchema getSchema() {
return schema;
}
}
public static Tuple2 <TableSchema, Row> loadMetaFromAkFile(FilePath filePath) {
MetaReader metaReader = new MetaReader();
try {
AkUtils.getFromFolderForEach(filePath, metaReader);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (metaReader.getMeta() == null || metaReader.getSchema() == null) {
throw new IllegalArgumentException(String.format("Count not get meta from %s.", filePath.getPathStr()));
}
return Tuple2.of(metaReader.getSchema(), metaReader.getMeta());
}
public static List <Tuple3 <PipelineStageBase <?>, TableSchema, List <Row>>> loadStagesFromPipelineModel(
List <Row> pipelineModel, TableSchema modelSchema) {
final int idColIndex = TableUtil.findColIndexWithAssertAndHint(modelSchema, ID_COL_NAME);
int size = pipelineModel.size();
Integer[] order = new Integer[size];
for (int i = 0; i < size; ++i) {
order[i] = i;
}
Arrays.sort(order, Comparator.comparing(o -> ((Long) pipelineModel.get(o).getField(idColIndex))));
Preconditions.checkState(!pipelineModel.isEmpty(), "Model to load should not be empty.");
StageNode[] stages = deserializePipelineStagesFromMeta(pipelineModel.get(order[0]), modelSchema);
if (stages == null || stages.length == 0) {
return new ArrayList <>();
}
final int[] cursor = new int[] {size - 1};
final int offset = 1;
List <Tuple3 <PipelineStageBase <?>, TableSchema, List <Row>>> reverseStages = new ArrayList <>();
Consumer <Integer> deserializer = lp -> {
StageNode stageNode = stages[lp];
try {
if (stageNode.identifier != null) {
stageNode.stage = (PipelineStageBase <?>) Class
.forName(stageNode.identifier)
.getConstructor(Params.class)
.newInstance(stageNode.params);
}
} catch (ClassNotFoundException
| NoSuchMethodException
| InstantiationException
| IllegalAccessException
| InvocationTargetException e) {
throw new IllegalArgumentException(e);
}
// leaf node.
if (stageNode.children == null) {
if (stageNode.parent >= 0
&& stageNode.schemaIndices != null
&& stageNode.colNames != null) {
final int[] localSchemaIndices = stageNode.schemaIndices;
final int oldCursor = cursor[0];
cursor[0] = next(pipelineModel, order, cursor[0], idColIndex);
List <Row> localData = IntStream
.range(cursor[0] + 1, oldCursor + 1)
.mapToObj(value -> pipelineModel.get(order[value]))
.map(value -> {
Row ret = new Row(localSchemaIndices.length);
for (int i = 0; i < localSchemaIndices.length; ++i) {
ret.setField(i, value.getField(localSchemaIndices[i] + offset));
}
return ret;
}).collect(Collectors.toList());
reverseStages.add(
Tuple3.of(
stageNode.stage,
new TableSchema(stageNode.colNames, stageNode.types),
localData
)
);
} else {
reverseStages.add(Tuple3.of(stageNode.stage, null, null));
}
} else {
if (lp != 0) {
throw new IllegalArgumentException("There should not have nested pipeline or pipeline model.");
}
}
};
postOrder(stages, deserializer);
return Lists.reverse(reverseStages);
}
public static LocalPredictor loadLocalPredictorFromPipelineModel(
List <Tuple3 <PipelineStageBase <?>, TableSchema, List <Row>>> stages, TableSchema inputSchema) {
return new LocalPredictor(loadMapperListFromStages(stages, inputSchema).getMappers());
}
public static LocalPredictor loadLocalPredictorFromPipelineModel(
List <Row> pipelineModel, TableSchema modelSchema, TableSchema inputSchema) {
return loadLocalPredictorFromPipelineModel(
loadStagesFromPipelineModel(pipelineModel, modelSchema),
inputSchema
);
}
//mapper not open.
public static MapperChain loadMapperListFromStages(
List <Row> pipelineModel, TableSchema modelSchema, TableSchema inputSchema) {
return loadMapperListFromStages(
loadStagesFromPipelineModel(pipelineModel, modelSchema),
inputSchema
);
}
//mapper not open.
public static MapperChain loadMapperListFromStages(
List <Tuple3 <PipelineStageBase <?>, TableSchema, List <Row>>> stages, TableSchema inputSchema) {
TableSchema outSchema = inputSchema;
List <Mapper> mappers = new ArrayList <>();
for (Tuple3 <PipelineStageBase <?>, TableSchema, List <Row>> stageTuple3 : stages) {
Mapper mapper = createMapperFromStage(stageTuple3.f0, stageTuple3.f1, outSchema, stageTuple3.f2);
mappers.add(mapper);
outSchema = mapper.getOutputSchema();
}
return new MapperChain(mappers.toArray(new Mapper[0]));
}
//not open and load.
public static Mapper createMapperFromStage(
PipelineStageBase <?> stage, TableSchema modelSchema, TableSchema inputSchema, List <Row> data) {
Mapper mapper = null;
if (stage instanceof MapModel) {
MapModel <?> mapModel = (MapModel <?>) stage;
mapper = mapModel
.mapperBuilder
.apply(modelSchema, inputSchema, mapModel.getParams());
if (data != null) {
((ModelMapper) mapper).loadModel(data);
}
} else if (stage instanceof BaseRecommender) {
mapper = RecommenderUtil.createRecommMapper(
(BaseRecommender <?>) stage, modelSchema, inputSchema, data
);
} else if (stage instanceof MapTransformer) {
MapTransformer <?> mapTransformer = (MapTransformer <?>) stage;
mapper = mapTransformer
.mapperBuilder
.apply(inputSchema, mapTransformer.getParams());
} else {
throw new RuntimeException("not support yet.");
}
if (mapper instanceof ComboModelMapper) {
((ComboModelMapper) mapper).newMapperList();
}
if (mapper instanceof ComboMapper) {
((ComboMapper) mapper).newMapperList();
}
return mapper;
}
static LocalPredictor loadLocalPredictorFromPipelineModel(
FilePath filePath, TableSchema inputSchema) throws Exception {
Tuple2 <TableSchema, List <Row>> readed = AkUtils.readFromPath(filePath);
Tuple2 <TableSchema, Row> schemaAndMeta = ModelExporterUtils.loadMetaFromAkFile(filePath);
Tuple2 <StageNode[], Params> stagesAndParams
= ModelExporterUtils.deserializePipelineStagesAndParamsFromMeta(schemaAndMeta.f1, schemaAndMeta.f0);
Mapper[] mappers = loadMapperListFromStages(readed.f1, readed.f0, inputSchema).getMappers();
Params params = stagesAndParams.f1;
if (params.get(ModelStreamScanParams.MODEL_STREAM_FILE_PATH) != null) {
TableSchema extendSchema = mappers[mappers.length - 1].getOutputSchema();
params.set(PipelineModelMapper.PIPELINE_TRANSFORM_OUT_COL_NAMES, extendSchema.getFieldNames());
params.set(PipelineModelMapper.PIPELINE_TRANSFORM_OUT_COL_TYPES,
FlinkTypeConverter.getTypeString(extendSchema.getFieldTypes()));
PipelineModelMapper pipelineModelMapper
= new PipelineModelMapper(readed.f0, inputSchema, params);
pipelineModelMapper.loadModel(readed.f1);
return new LocalPredictor(pipelineModelMapper);
}
return new LocalPredictor(mappers);
}
private static int next(List <Row> all, Integer[] order, int cursor, int field) {
Object obj = all.get(order[cursor]).getField(field);
cursor--;
while (cursor >= 0
&& all.get(order[cursor]) != null
&& all.get(order[cursor]).getField(field) != null
&& all.get(order[cursor]).getField(field).equals(obj)) {
cursor--;
}
return cursor;
}
}
|
jinfagang/MNN
|
source/geometry/GeometryGather.cpp
|
//
// GeometryGather.cpp
// MNN
//
// Created by MNN on 2020/06/09.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "geometry/GeometryComputer.hpp"
#include "geometry/GeometryComputerUtils.hpp"
#include "core/OpCommonUtils.hpp"
namespace MNN {
static void _computeGather(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
GeometryComputer::Context& context, CommandBuffer& res, const Op* op) {
int axis = 0;
if (inputs.size() == 3) {
const Tensor *axisTensor = inputs[2];
axis = axisTensor->host<int32_t>()[0];
}
if (op->main_type() == OpParameter_Axis) {
axis = op->main_as_Axis()->axis();
}
auto params = inputs[0];
auto indices = inputs[1];
auto output = outputs[0];
MNN_ASSERT(axis > -params->buffer().dimensions && axis < params->buffer().dimensions);
if (axis < 0) {
axis = params->buffer().dimensions + axis;
}
const int gatherDimSize = params->buffer().dim[axis].extent;
const int N = indices->elementSize();
MNN_ASSERT(gatherDimSize <= std::numeric_limits<int32_t>::max());
int inside = 1;
int outside = 1;
for (int i = 0; i < axis; ++i) {
outside *= params->length(i);
}
for (int i = axis + 1; i < params->dimensions(); ++i) {
inside *= params->length(i);
}
flatbuffers::FlatBufferBuilder builder;
OpBuilder unaryOp(builder);
unaryOp.add_type(OpType_UnaryOp);
auto unaryOpPffset = unaryOp.Finish();
auto iterIndexesOffset = builder.CreateVector(std::vector<int>{-1, 1});
auto stepOffset = builder.CreateVector(std::vector<int>{inside, inside});
auto indexesOffset = builder.CreateVector(std::vector<int>{2, 0});
auto sizeOffset = builder.CreateVector(std::vector<int>{outside, 1, inside});
// View 0
auto view0Stride = builder.CreateVector(std::vector<int>{inside * N, inside, 1});
ViewBuilder view0Builder(builder);
view0Builder.add_offset(0);
view0Builder.add_stride(view0Stride);
auto view0Offset = view0Builder.Finish();
// View1
auto view1Stride = builder.CreateVector(std::vector<int>{inside * params->length(axis), inside, 1});
ViewBuilder view1Builder(builder);
view1Builder.add_offset(0);
view1Builder.add_stride(view1Stride);
auto view1Offset = view1Builder.Finish();
auto viewAllOffset = builder.CreateVector<flatbuffers::Offset<View>>({view0Offset, view1Offset});
RegionCommandBuilder rcmdBuild(builder);
rcmdBuild.add_op(unaryOpPffset);
rcmdBuild.add_view(viewAllOffset);
rcmdBuild.add_indexes(indexesOffset);
rcmdBuild.add_iterIndexes(iterIndexesOffset);
rcmdBuild.add_steps(stepOffset);
rcmdBuild.add_size(sizeOffset);
auto rcmdOffset = rcmdBuild.Finish();
auto rcmdAllOffset = builder.CreateVector<flatbuffers::Offset<RegionCommand>>({rcmdOffset});
auto inputIndexesOffset = builder.CreateVector(std::vector<int>{0, 1});
auto outputIndexesOffset = builder.CreateVector(std::vector<int>{2});
LoopParamBuilder loopBuilder(builder);
loopBuilder.add_commands(rcmdAllOffset);
loopBuilder.add_loopNumber(indices->elementSize());
loopBuilder.add_tensorNumber(3);
loopBuilder.add_inputIndexes(inputIndexesOffset);
loopBuilder.add_outputIndexes(outputIndexesOffset);
auto loopOffset = loopBuilder.Finish();
flatbuffers::Offset<flatbuffers::String> nameOffset;
if (nullptr != op->name()) {
nameOffset = builder.CreateString(op->name()->c_str());
}
OpBuilder finishBuilder(builder);
finishBuilder.add_main(loopOffset.Union());
finishBuilder.add_main_type(OpParameter_LoopParam);
finishBuilder.add_type(OpType_While);
if (nullptr != op->name()) {
finishBuilder.add_name(nameOffset);
}
builder.Finish(finishBuilder.Finish());
auto cmd = GeometryComputerUtils::makeCommand(builder, {params, indices}, outputs);
TensorUtils::getDescribe(output)->memoryType = Tensor::InsideDescribe::MEMORY_BACKEND;
res.command.emplace_back(std::move(cmd));
}
class GeometryGather : public GeometryComputer {
public:
virtual bool onCompute(const Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
Context& context, CommandBuffer& res) const override {
_computeGather(inputs, outputs, context, res, op);
return true;
}
virtual bool onRecompute(const Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
Context& context, CommandBuffer& cmd) const override {
if (cmd.command.size() != 1) {
return false;
}
int axis = 0;
if (inputs.size() == 3) {
const Tensor *axisTensor = inputs[2];
axis = axisTensor->host<int32_t>()[0];
}
if (op->main_type() == OpParameter_Axis) {
axis = op->main_as_Axis()->axis();
}
auto params = inputs[0];
auto indices = inputs[1];
auto output = outputs[0];
MNN_ASSERT(axis > -params->buffer().dimensions && axis < params->buffer().dimensions);
if (axis < 0) {
axis = params->buffer().dimensions + axis;
}
const int gatherDimSize = params->buffer().dim[axis].extent;
const int N = indices->elementSize();
MNN_ASSERT(gatherDimSize <= std::numeric_limits<int32_t>::max());
int inside = 1;
int outside = 1;
for (int i = 0; i < axis; ++i) {
outside *= params->length(i);
}
for (int i = axis + 1; i < params->dimensions(); ++i) {
inside *= params->length(i);
}
auto loopCmd = cmd.command[0];
auto param = loopCmd->op->main_as_LoopParam();
// Reset parameters for last command
((flatbuffers::Table*)param)->SetField(LoopParam::VT_LOOPNUMBER, indices->elementSize(), 0);
auto rgcmd = param->commands()->GetAs<RegionCommand>(0);
auto step = (int*)rgcmd->steps()->data();
step[0] = inside;
step[1] = inside;
auto size = (int*)rgcmd->size()->data();
size[0] = outside;
size[2] = inside;
auto view0Stride = (int*)rgcmd->view()->GetAs<View>(0)->stride()->data();
view0Stride[0] = inside * N;
view0Stride[1] = inside;
auto view1Stride = (int*)rgcmd->view()->GetAs<View>(1)->stride()->data();
view1Stride[0] = inside * params->length(axis);
view1Stride[1] = inside;
return true;
}
};
class GeometryGatherND : public GeometryComputer {
public:
enum MID_POSITION {
P_constStride = 0,
P_reshapeIndice = 1,
P_reshapeIndiceFloat = 2,
P_indiceFloat = 3,
P_indiceOneLine = 4,
P_MAX
};
static void makeLoopCommand(flatbuffers::FlatBufferBuilder& builder, int mSliceSize, int mSliceN, const Op* op) {
OpBuilder unaryOp(builder);
unaryOp.add_type(OpType_UnaryOp);
auto unaryOpPffset = unaryOp.Finish();
auto iterIndexesOffset = builder.CreateVector(std::vector<int>{-1, 1});
auto stepOffset = builder.CreateVector(std::vector<int>{mSliceSize, 1});
auto indexesOffset = builder.CreateVector(std::vector<int>{2, 0});
auto sizeOffset = builder.CreateVector(std::vector<int>{1, 1, mSliceSize});
// View 0
auto view0Stride = builder.CreateVector(std::vector<int>{mSliceSize, mSliceSize, 1});
ViewBuilder view0Builder(builder);
view0Builder.add_offset(0);
view0Builder.add_stride(view0Stride);
auto view0Offset = view0Builder.Finish();
// view0 and view1 is the same
auto viewAllOffset = builder.CreateVector<flatbuffers::Offset<View>>({view0Offset, view0Offset});
RegionCommandBuilder rcmdBuild(builder);
rcmdBuild.add_op(unaryOpPffset);
rcmdBuild.add_view(viewAllOffset);
rcmdBuild.add_indexes(indexesOffset);
rcmdBuild.add_iterIndexes(iterIndexesOffset);
rcmdBuild.add_steps(stepOffset);
rcmdBuild.add_size(sizeOffset);
auto rcmdOffset = rcmdBuild.Finish();
auto rcmdAllOffset = builder.CreateVector<flatbuffers::Offset<RegionCommand>>({rcmdOffset});
auto inputIndexesOffset = builder.CreateVector(std::vector<int>{0, 1});
auto outputIndexesOffset = builder.CreateVector(std::vector<int>{2});
LoopParamBuilder loopBuilder(builder);
loopBuilder.add_commands(rcmdAllOffset);
loopBuilder.add_loopNumber(mSliceN);
loopBuilder.add_tensorNumber(3);
loopBuilder.add_inputIndexes(inputIndexesOffset);
loopBuilder.add_outputIndexes(outputIndexesOffset);
auto loopOffset = loopBuilder.Finish();
flatbuffers::Offset<flatbuffers::String> nameOffset;
if (nullptr != op->name()) {
nameOffset = builder.CreateString(op->name()->c_str());
}
OpBuilder finishBuilder(builder);
finishBuilder.add_main(loopOffset.Union());
finishBuilder.add_main_type(OpParameter_LoopParam);
finishBuilder.add_type(OpType_While);
if (nullptr != op->name()) {
finishBuilder.add_name(nameOffset);
}
builder.Finish(finishBuilder.Finish());
}
virtual bool onRecompute(const Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
Context& context, CommandBuffer& cmd) const override {
if (cmd.extras.size() != P_MAX) {
return false;
}
MNN_ASSERT(2 == inputs.size());
MNN_ASSERT(1 == outputs.size());
auto params = inputs[0];
auto indice = inputs[1];
auto output = outputs[0];
int mSliceN = 1;
int mSliceSize = 1;
for (int i = 0; i < indice->dimensions() - 1; ++i) {
mSliceN *= indice->length(i);
}
auto indiceNd = indice->length(indice->dimensions() - 1);
for (int i = indiceNd; i < params->dimensions(); ++i) {
mSliceSize *= params->length(i);
}
auto paramSize = params->elementSize();
auto constStride = cmd.extras[P_constStride];
auto reshapeIndice = cmd.extras[P_reshapeIndice];
auto reshapeIndiceFloat = cmd.extras[P_reshapeIndiceFloat];
auto indiceFloat = cmd.extras[P_indiceFloat];
auto indiceOneLine = cmd.extras[P_indiceOneLine];
// Set length
bool needAlloc = constStride->length(0) < indiceNd;
constStride->setLength(0, indiceNd);
reshapeIndice->setLength(0, mSliceN);
reshapeIndice->setLength(1, indiceNd);
reshapeIndiceFloat->setLength(0, mSliceN);
reshapeIndiceFloat->setLength(1, indiceNd);
indiceFloat->setLength(0, mSliceN);
indiceOneLine->setLength(0, mSliceN);
if (needAlloc) {
if (!context.allocTensor(constStride.get())) {
return false;
}
}
for (int i=0; i<indiceNd; ++i) {
int dimCount = paramSize / params->length(i);
constStride->host<float>()[i] = (float)dimCount;
paramSize = dimCount;
}
reshapeIndice->buffer().device = 0;
reshapeIndice->buffer().host = 0;
auto des = TensorUtils::getDescribe(reshapeIndice.get());
des->extra.offset = 0;
des->memoryType = Tensor::InsideDescribe::MEMORY_VIRTUAL;
des->backend = nullptr;
des->regions = {GeometryComputerUtils::makeRawAddressRef(indice, 0, mSliceN * indiceNd)};
auto loopCmd = cmd.command[cmd.command.size() - 1];
auto param = loopCmd->op->main_as_LoopParam();
// Reset parameters for last command
((flatbuffers::Table*)param)->SetField(LoopParam::VT_LOOPNUMBER, mSliceN, 0);
auto rgCmd = param->commands()->GetAs<RegionCommand>(0);
auto stepData = (int*)rgCmd->steps()->data();
stepData[0] = mSliceSize;
auto sizeData = (int*)rgCmd->size()->data();
sizeData[2] = mSliceSize;
auto strideData = (int*)rgCmd->view()->GetAs<View>(0)->stride()->data();
strideData[0] = mSliceSize;
strideData[1] = mSliceSize;
strideData = (int*)rgCmd->view()->GetAs<View>(1)->stride()->data();
strideData[0] = mSliceSize;
strideData[1] = mSliceSize;
return true;
}
virtual bool onCompute(const Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
Context& context, CommandBuffer& res) const override {
MNN_ASSERT(2 == inputs.size());
MNN_ASSERT(1 == outputs.size());
auto params = inputs[0];
auto indice = inputs[1];
auto output = outputs[0];
int mSliceN = 1;
int mSliceSize = 1;
for (int i = 0; i < indice->dimensions() - 1; ++i) {
mSliceN *= indice->length(i);
}
auto indiceNd = indice->length(indice->dimensions() - 1);
for (int i = indiceNd; i < params->dimensions(); ++i) {
mSliceSize *= params->length(i);
}
auto paramSize = params->elementSize();
std::array<std::shared_ptr<Tensor>, 5> midTensors;
std::shared_ptr<Tensor> constStride(Tensor::createDevice<int>({indiceNd, 1}));
if (!context.allocTensor(constStride.get())) {
return false;
}
midTensors[P_constStride] = constStride;
for (int i=0; i<indiceNd; ++i) {
int dimCount = paramSize / params->length(i);
constStride->host<float>()[i] = (float)dimCount;
paramSize = dimCount;
}
std::shared_ptr<Tensor> reshapeIndice(Tensor::createDevice<int>({mSliceN, indiceNd}));
midTensors[P_reshapeIndice] = reshapeIndice;
{
auto des = TensorUtils::getDescribe(reshapeIndice.get());
des->memoryType = Tensor::InsideDescribe::MEMORY_VIRTUAL;
des->regions = {GeometryComputerUtils::makeRawAddressRef(indice, 0, mSliceN * indiceNd)};
}
std::shared_ptr<Tensor> reshapeIndiceFloat(Tensor::createDevice<float>({mSliceN, indiceNd}));
midTensors[P_reshapeIndiceFloat] = reshapeIndiceFloat;
{
flatbuffers::FlatBufferBuilder builder;
CastParamBuilder builder_(builder);
builder_.add_dstT(DataType_DT_FLOAT);
auto mainOffset = builder_.Finish().Union();
OpBuilder opB(builder);
opB.add_type(OpType_Cast);
opB.add_main(mainOffset);
opB.add_main_type(OpParameter_CastParam);
builder.Finish(opB.Finish());
auto cmd = GeometryComputerUtils::makeCommand(builder, {reshapeIndice.get()}, {reshapeIndiceFloat.get()});
res.command.emplace_back(std::move(cmd));
}
std::shared_ptr<Tensor> indiceFloat(Tensor::createDevice<float>({mSliceN, 1}));
midTensors[P_indiceFloat] = indiceFloat;
{
// MatMul
auto cmd = GeometryComputerUtils::makeMatMul(reshapeIndiceFloat.get(), constStride.get(), indiceFloat.get());
res.command.emplace_back(std::move(cmd));
}
std::shared_ptr<Tensor> indiceOneLine(Tensor::createDevice<int>({mSliceN, 1}));
midTensors[P_indiceOneLine] = indiceOneLine;
{
flatbuffers::FlatBufferBuilder builder;
CastParamBuilder builder_(builder);
builder_.add_dstT(DataType_DT_INT32);
auto mainOffset = builder_.Finish().Union();
OpBuilder opB(builder);
opB.add_type(OpType_Cast);
opB.add_main(mainOffset);
opB.add_main_type(OpParameter_CastParam);
builder.Finish(opB.Finish());
auto cmd = GeometryComputerUtils::makeCommand(builder, {indiceFloat.get()}, {indiceOneLine.get()});
res.command.emplace_back(std::move(cmd));
}
auto indiceData = indice->host<int32_t>();
auto outputDes = TensorUtils::getDescribe(output);
flatbuffers::FlatBufferBuilder builder;
makeLoopCommand(builder, mSliceSize, mSliceN, op);
auto cmd = GeometryComputerUtils::makeCommand(builder, {params, indiceOneLine.get()}, outputs);
TensorUtils::getDescribe(output)->memoryType = Tensor::InsideDescribe::MEMORY_BACKEND;
res.command.emplace_back(std::move(cmd));
res.extras.insert(res.extras.end(), midTensors.begin(), midTensors.end());
return true;
}
};
static void _create() {
std::shared_ptr<GeometryComputer> comp(new GeometryGather);
GeometryComputer::registerGeometryComputer(comp, {OpType_Gather, OpType_GatherV2}, Runtime::Compiler_Loop);
std::shared_ptr<GeometryComputer> comp2(new GeometryGatherND);
GeometryComputer::registerGeometryComputer(comp2, {OpType_GatherND}, Runtime::Compiler_Loop);
}
REGISTER_GEOMETRY(GeometryGather, _create);
} // namespace MNN
|
pankajdarak-xlnx/pyxir
|
include/pyxir/pyxir.hpp
|
<reponame>pankajdarak-xlnx/pyxir<gh_stars>10-100
/*
* Copyright 2020 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include "graph/xgraph.hpp"
#include "io/io.hpp"
#include "pyxir_api.hpp"
#include "runtime/constants.hpp"
#include "runtime/run_options.hpp"
#include "runtime/runtime_module_factory.hpp"
namespace pyxir {
/**
* @brief Partition the provided XGraph for the given target(s)
* @param xg The XGraph to be partitioned
* @param targets The target(s) for which to partition the XGraph (only
* one target is supported at the moment)
* @param last_layer The last layer uptil which to do partitioning of the
* XGraph
*/
PX_API void partition(std::shared_ptr<graph::XGraph> xg,
const std::vector<std::string> &targets,
const std::string &last_layer = "");
/**
* @brief Build a runtime module for executing the provided XGraph model
* @param xg The XGraph model to be executed
* @param target The target for executing the XGraph model
* @param in_tensor_names The names of the input tensors that will be
* provided in the same order
* @param out_tensor_names The names of the output tensors that will be
* provided in the same order
* @param runtime (optional) The runtime to be used for executing the model
* @param run_options (optional) The specified run options, e.g. whether online
* quantization should be enabled
* @returns A runtime module to can be used for execution of the provided
* XGraph
*/
PX_API RtModHolder build_rt(std::shared_ptr<graph::XGraph> &xg,
const std::string &target,
const std::vector<std::string> &in_tensor_names,
const std::vector<std::string> &out_tensor_names,
const std::string &runtime = runtime::pxCpuTfRuntimeModule,
RunOptionsHolder const &run_options = nullptr);
/**
* @brief Load an XGraph model from file
* @param model_path The path to the model graph file
* @param params_path The path to the model params file
* @returns A shared pointer to the loaded XGraph object
*/
PX_API std::shared_ptr<graph::XGraph> load(const std::string &model_path,
const std::string ¶ms_path);
/**
* @brief Return whether the Python interpreter is initialized (for
* internal use)
*/
PX_API bool py_is_initialized();
} // pyxir
|
lamia100/yona
|
test/models/SearchResultTests.java
|
package models;
import org.junit.Test;
import java.util.List;
import static org.fest.assertions.Assertions.assertThat;
/**
* @author <NAME>
*/
public class SearchResultTests {
@Test
public void makeSnipet() {
// Given
String contents = "자동링크로 바꿀 수 있는 url은 자동링크처럼 보여주기 이슈 본문이나 댓글 등에 Yobi의 어떤 페이지에 대한 링크를 넣었을 때, 이를 렌더링해서 보여줄 때는 자동링크로 보여주면 좋을 것 같습니다. 예를 들어 `http://yobi.navercorp.com/dlab/hive/issue/1478`를 자동으로 #1478 로 보여준다거나, `http://yobi.navercorp.com/dlab/hive/commit/2f0ef4c0bbe535eb3475b0e7cdaadf86add6f220?branch=master`는 2f0ef4c로 보여주는 식입니다.";
String keyword = "이슈";
SearchResult searchResult = new SearchResult();
searchResult.setKeyword(keyword);
// When
List<String> snipets = searchResult.makeSnippets(contents, 10);
// Then
assertThat(snipets.size()).isEqualTo(1);
assertThat(snipets).contains("링크처럼 보여주기 이슈 본문이나 댓글 등");
}
@Test
public void merge_overlap() {
// Given
String contents = "#1477 마일스톤 이슈리스트 화면 개선 #1466 이슈에서 응준님께서 말씀주신 내용을 처리하고자 의견을 기다립니다. 1. github에서처럼 마일스톤내의 이슈검색시, 이슈리스트로 해당마일스톤을 검색필터로 선정하여 이동 * 별다른 개발없이 링크만 바꿔주면됨 * back버튼으로 마일스톤 리스트화면으로 이동이 가능하며, 이슈리스트의 검색기능을 그대로 활용가능 2. 마일스톤내 이슈화면에 검색기능을 추가 * 추가기능을 개발하다보면, 이슈리스트화면과 같아짐 * 향후, 마일스톤내 이슈페이지만의 기능을 넣고자 한다면, 이 방법이 나아보임 그럼 의견주시면 주신대로 작업진행하도록 하겠습니다~";
String keyword = "이슈";
SearchResult searchResult = new SearchResult();
searchResult.setKeyword(keyword);
// When
List<String> snipets = searchResult.makeSnippets(contents, 40);
// Then
assertThat(snipets.size()).isEqualTo(2);
assertThat(snipets).contains("#1477 마일스톤 이슈리스트 화면 개선 #1466 이슈에서 응준님께서 말씀주신 내용을 처리하고자 의견을 기다립니다. 1. github에서처럼 마일스톤내의 이슈검색시, 이슈리스트로 해당마일스톤을 검색필터로 선정하여 이동 * 별다른 개발없이 링크");
assertThat(snipets).contains("바꿔주면됨 * back버튼으로 마일스톤 리스트화면으로 이동이 가능하며, 이슈리스트의 검색기능을 그대로 활용가능 2. 마일스톤내 이슈화면에 검색기능을 추가 * 추가기능을 개발하다보면, 이슈리스트화면과 같아짐 * 향후, 마일스톤내 이슈페이지만의 기능을 넣고자 한다면, 이 방법이 나아보임 그럼 의견주시면 주");
}
}
|
D3ATHBRINGER13/Protocol
|
java/java-codec/src/main/java/org/cloudburstmc/protocol/java/codec/v754/serializer/play/clientbound/ContainerSetContentSerializer_v754.java
|
<reponame>D3ATHBRINGER13/Protocol
package org.cloudburstmc.protocol.java.codec.v754.serializer.play.clientbound;
import io.netty.buffer.ByteBuf;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import org.cloudburstmc.protocol.java.codec.JavaCodecHelper;
import org.cloudburstmc.protocol.java.codec.JavaPacketSerializer;
import org.cloudburstmc.protocol.java.data.inventory.ItemStack;
import org.cloudburstmc.protocol.java.packet.play.clientbound.ContainerSetContentPacket;
@NoArgsConstructor(access = AccessLevel.PROTECTED)
public class ContainerSetContentSerializer_v754 implements JavaPacketSerializer<ContainerSetContentPacket> {
public static final ContainerSetContentSerializer_v754 INSTANCE = new ContainerSetContentSerializer_v754();
@Override
public void serialize(ByteBuf buffer, JavaCodecHelper helper, ContainerSetContentPacket packet) {
buffer.writeByte(packet.getContainerId());
buffer.writeShort(packet.getItems().length);
for (ItemStack item : packet.getItems()) {
helper.writeItemStack(buffer, item);
}
}
@Override
public void deserialize(ByteBuf buffer, JavaCodecHelper helper, ContainerSetContentPacket packet) {
packet.setContainerId(buffer.readUnsignedByte());
packet.setItems(new ItemStack[buffer.readShort()]);
ItemStack[] items = packet.getItems();
for (int index = 0; index < packet.getItems().length; index++) {
items[index] = helper.readItemStack(buffer);
packet.setItems(items);
}
}
}
|
yzq992778484/MVCHelper
|
app/src/main/java/com/shizhefei/test/models/parser/JsonParser.java
|
<reponame>yzq992778484/MVCHelper<gh_stars>1000+
package com.shizhefei.test.models.parser;
import com.google.gson.Gson;
import com.google.gson.internal.$Gson$Types;
import com.shizhefei.mvc.http.NetworkExeption;
import com.shizhefei.mvc.http.okhttp.ResponseParser;
import java.io.Reader;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import okhttp3.Response;
/**
* Created by LuckyJayce on 2016/7/11.
*/
public abstract class JsonParser<DATA> implements ResponseParser<DATA> {
private final Type type;
public JsonParser(Class<?> clas) {
type = getSuperclassTypeParameter(clas);
}
public JsonParser() {
type = getSuperclassTypeParameter(getClass());
}
@Override
public DATA parse(Response response) throws Exception {
if (response.isSuccessful()) {
return parse(response.body().charStream());
}
throw new NetworkExeption(response);
}
protected DATA parse(Reader json) throws Exception {
DATA data = new Gson().fromJson(json, type);
return data;
}
protected DATA parse(String json) throws Exception {
DATA data = new Gson().fromJson(json, type);
return data;
}
private static Type getSuperclassTypeParameter(Class<?> subclass) {
Type superclass = subclass.getGenericSuperclass();
if (superclass instanceof Class) {
throw new RuntimeException("Missing type parameter.");
}
ParameterizedType parameterized = (ParameterizedType) superclass;
return $Gson$Types.canonicalize(parameterized.getActualTypeArguments()[0]);
}
}
|
chenyacheng/AndroidComponentProject
|
commonlib/src/main/java/com/chenyacheng/commonlib/base/BaseApplication.java
|
<filename>commonlib/src/main/java/com/chenyacheng/commonlib/base/BaseApplication.java
package com.chenyacheng.commonlib.base;
import android.app.Application;
/**
* 每个组件实现自己的Application,并且继承BaseApplication.
* 组件模块中实现的Application必须在debug包中的AndroidManifest.xml中注册,否则无法使用.
* 组件模块的Application需置于java/debug文件夹中,不得放于主代码.
*
* @author chenyacheng
* @date 2019/01/18
*/
public class BaseApplication extends Application {
private static BaseApplication application;
public static BaseApplication getApplication() {
return application;
}
@Override
public void onCreate() {
super.onCreate();
application = this;
}
}
|
MathiasReker/Delfinen
|
src/main/java/com/app/controllers/menuactions/DisplayArrearsMenuAction.java
|
package com.app.controllers.menuactions;
import com.app.controllers.PaymentController;
public class DisplayArrearsMenuAction extends MenuAction {
public DisplayArrearsMenuAction(String description) {
super(description);
}
@Override
public void run() {
new PaymentController().displayMembersInArrears();
}
}
|
idGain/ewp-node
|
src/main/java/pt/ulisboa/ewp/node/domain/entity/KeyStoreConfiguration.java
|
package pt.ulisboa.ewp.node.domain.entity;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.EntityListeners;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Lob;
import javax.persistence.Table;
import org.hibernate.annotations.Type;
import pt.ulisboa.ewp.node.domain.listener.EntityAuditListener;
@Entity
@EntityListeners(EntityAuditListener.class)
@Table(name = "EWP_KEYSTORE_CONFIGURATION")
public class KeyStoreConfiguration {
private long id;
private byte[] keystore;
private String certificateAlias;
protected KeyStoreConfiguration() {}
protected KeyStoreConfiguration(byte[] keystore, String certificateAlias) {
this.keystore = keystore;
this.certificateAlias = certificateAlias;
}
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id", unique = true, nullable = false)
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
@Lob
@Type(type = "org.hibernate.type.BinaryType")
@Column(name = "keystore", columnDefinition = "BYTEA") // previously BLOB
public byte[] getKeystore() {
return keystore;
}
public void setKeystore(byte[] keystore) {
this.keystore = keystore;
}
@Column(name = "certificate_alias", nullable = false)
public String getCertificateAlias() {
return certificateAlias;
}
public void setCertificateAlias(String certificateAlias) {
this.certificateAlias = certificateAlias;
}
public static KeyStoreConfiguration create(byte[] keystore, String certificateAlias) {
return new KeyStoreConfiguration(keystore, certificateAlias);
}
@Override
public String toString() {
return String.format(
"KeyStoreConfiguration(keystore length = %d bytes; certificateAlias = %s)",
keystore.length, certificateAlias);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.