code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
package example.repo;
import example.model.Customer1794;
import java.util.List;
import org.springframework.data.repository.CrudRepository;
public interface Customer1794Repository extends CrudRepository<Customer1794, Long> {
List<Customer1794> findByLastName(String lastName);
}
| spring-projects/spring-data-examples | jpa/deferred/src/main/java/example/repo/Customer1794Repository.java | Java | apache-2.0 | 284 |
"use strict";
define(["lib/three.min"], function() {
var Reality = function(sourceCanvas){
// Create a default camera and scene.
var camera = new THREE.Camera();
var scene = new THREE.Scene();
// Create a plane geometry to hold the sourceCanvas texture
var geometry = new THREE.PlaneGeometry(2, 2, 0);
// Create a material textured with the contents of sourceCanvas.
var texture = new THREE.Texture(sourceCanvas);
var material = new THREE.MeshBasicMaterial({
map: texture,
depthTest: false,
depthWrite: false
});
// Build a mesh and add it to the scene.
var mesh = new THREE.Mesh( geometry, material );
scene.add(mesh);
// We need to notify ThreeJS when the texture has changed.
function update() {
texture.needsUpdate = true;
}
return {
camera: camera,
scene: scene,
update: update,
}
}
var Scene = function() {
var scene = new THREE.Scene();
var camera = new THREE.Camera();
function add(object) {
scene.add(object);
}
function remove(object) {
scene.remove(object);
}
function setProjectionMatrix(matrix) {
camera.projectionMatrix.setFromArray( matrix );
}
return {
scene:scene,
camera:camera,
add:add,
remove:remove,
setProjectionMatrix:setProjectionMatrix,
}
}
var create = function(dimensions, sourceCanvas) {
// Create a canvas which will be used for WebGL
var glCanvas = document.createElement('canvas');
// Initialize the renderer and attach it to the canvas
var renderer = new THREE.WebGLRenderer({canvas:glCanvas});
renderer.setSize(dimensions.width, dimensions.height);
renderer.autoClear = false;
// Create a reality scene
var reality = new Reality(sourceCanvas);
var virtual = new Scene();
var light = new THREE.SpotLight(0xffffff);
light.position.set(0, 0, 9000);
light.lookAt( new THREE.Vector3(0,0,0) );
virtual.scene.add(light);
function render() {
// Render the reality scene
renderer.render(reality.scene, reality.camera);
// Render the augmented components on top of the reality scene.
renderer.render(virtual.scene, virtual.camera);
}
function update() {
// Notify the reality scene to update it's texture
reality.update();
}
function setCameraMatrix( matrix ) {
virtual.setProjectionMatrix( matrix );
}
function add( object ) {
virtual.add( object.model );
}
function remove( object ) {
virtual.remove( object.model );
}
return {
add: add,
remove: remove,
update: update,
render: render,
glCanvas: glCanvas,
setCameraMatrix: setCameraMatrix,
}
}
return {
create: create,
}
});
| xsoh/3DKabah | arview.js | JavaScript | apache-2.0 | 3,251 |
const $ = require('jquery');
const { BagItProfile } = require('../../bagit/bagit_profile');
const { Job } = require('../../core/job');
const { JobRunController } = require('./job_run_controller');
const { PackageOperation } = require('../../core/package_operation');
const path = require('path');
const { StorageService } = require('../../core/storage_service');
const { TestUtil } = require('../../core/test_util');
const { UITestUtil } = require('../common/ui_test_util');
const { UploadOperation } = require('../../core/upload_operation');
const { Util } = require('../../core/util');
beforeEach(() => {
TestUtil.deleteJsonFile('Job');
TestUtil.deleteJsonFile('StorageService');
});
afterAll(() => {
TestUtil.deleteJsonFile('Job');
TestUtil.deleteJsonFile('StorageService');
});
function getStorageService(name, proto, host) {
let ss = new StorageService({
name: name,
protocol: proto,
host: host
});
ss.save();
return ss;
}
function getUploadOp(name, proto, host) {
let ss = getStorageService(name, proto, host);
let op = new UploadOperation();
op.sourceFiles = ['/dev/null'];
op.storageServiceId = ss.id;
return op;
}
function getJob() {
var job = new Job();
job.packageOp = new PackageOperation('TestBag', '/dev/null');
job.packageOp.packageFormat = 'BagIt';
job.packageOp._trimLeadingPaths = false;
job.packageOp.sourceFiles = [
__dirname,
path.join(__dirname, '..', 'forms')
];
job.dirCount = 2;
job.fileCount = 12;
job.byteCount = 237174;
job.uploadOps = [
getUploadOp('target1', 's3', 'target1.com'),
getUploadOp('target2', 's3', 'target2.com')
];
job.bagItProfile = BagItProfile.load(path.join(__dirname, '..', '..', 'test', 'profiles', 'multi_manifest.json'));
job.save();
return job;
}
function getController() {
let job = getJob();
let params = new URLSearchParams({ id: job.id });
return new JobRunController(params);
}
test('constructor', () => {
let controller = getController();
expect(controller.model).toEqual(Job);
expect(controller.job).not.toBeNull();
});
test('show', () => {
let controller = getController();
let response = controller.show()
expect(response.container).toMatch(controller.job.packageOp.packageName);
expect(response.container).toMatch(controller.job.packageOp.outputPath);
expect(response.container).toMatch(controller.job.bagItProfile.name);
expect(response.container).toMatch(controller.job.bagItProfile.description);
expect(response.container).toMatch('2 Directories');
expect(response.container).toMatch('12 Files');
expect(response.container).toMatch('231.62 KB');
expect(response.container).toMatch(controller.job.packageOp.sourceFiles[0]);
expect(response.container).toMatch(controller.job.packageOp.sourceFiles[1]);
});
| APTrust/easy-store | ui/controllers/job_run_controller.test.js | JavaScript | apache-2.0 | 2,913 |
package io.github.dantesun.petclinic.data.velocity;
import org.apache.ibatis.executor.parameter.ParameterHandler;
import org.apache.ibatis.mapping.BoundSql;
import org.apache.ibatis.mapping.MappedStatement;
import org.apache.ibatis.mapping.SqlSource;
import org.apache.ibatis.parsing.XNode;
import org.apache.ibatis.scripting.LanguageDriver;
import org.apache.ibatis.session.Configuration;
import org.apache.ibatis.type.Alias;
import org.mybatis.scripting.velocity.Driver;
/**
* Created by dsun on 15/2/22.
*/
@Alias("velocity")
public class VelocityDriver implements LanguageDriver {
private Driver driverImpl = new Driver();
@Override
public ParameterHandler createParameterHandler(MappedStatement mappedStatement, Object parameterObject, BoundSql boundSql) {
return driverImpl.createParameterHandler(mappedStatement, parameterObject, boundSql);
}
@Override
public SqlSource createSqlSource(Configuration configuration, XNode script, Class<?> parameterType) {
return createSqlSource(configuration, script.getNode().getTextContent(), parameterType);
}
@Override
public SqlSource createSqlSource(Configuration configuration, String script, Class<?> parameterType) {
if (parameterType == null) {
parameterType = Object.class;
}
return new VelocitySqlSource(configuration, script, parameterType);
}
}
| dantesun/webapp-boilerplate | core-models/src/main/java/io/github/dantesun/petclinic/data/velocity/VelocityDriver.java | Java | apache-2.0 | 1,398 |
package core.utils;
import java.util.ArrayList;
import java.util.List;
public class Page<T> {
public static final int PAGE_SIZE = 10;
protected List<T> listObjects = new ArrayList<>();
protected int currentPage;
protected int pageSize = PAGE_SIZE;
/**
* Constructor.
* @param list contains the ArrayList to copy
* @param page correspond to the currentPage
*/
public Page(List<T> list, int page) {
for (int i = 0; i < list.size(); i++) {
this.listObjects.add(list.get(i));
}
this.currentPage = page;
}
/**
* Constructor.
* @param list contains the ArrayList to copy
* @param page correspond to the currentPage
* @param pageSize the page size
*/
public Page(List<T> list, int page, int pageSize) {
for (int i = 0; i < list.size(); i++) {
this.listObjects.add(list.get(i));
}
this.currentPage = page;
this.pageSize = pageSize;
}
/**
* Get the ArrayList containing a T page.
* @return the ArrayList containing a T page
*/
public List<T> getListPage() {
return listObjects;
}
/**
* Get the next page.
* @return next page
*/
public int getNextPage() {
return currentPage + 1;
}
/**
* Get previous page.
* @return previous page if currentPage > 0 else 0
*/
public int getPreviousPage() {
if (currentPage > 0) {
return currentPage - 1;
} else {
return 0;
}
}
/**
* Get the current page.
* @return the current page
*/
public int getCurrentPage() {
return currentPage;
}
/**
* Get the page size.
* @return the page size
*/
public int getPageSize() {
return pageSize;
}
/**
* Test if the ArrayList<T> is empty.
* @return True if Empty, else false
*/
public boolean isEmpty() {
return listObjects.isEmpty();
}
/**
* Returns a string representation of the object.
* @return a string representation of the object.
*/
@Override
public String toString() {
return this.getClass() + " [listObjects = " + listObjects + "]";
}
/**
* Equals Methode.
* @param o other object
* @return true if equals, else false
*/
@Override
public boolean equals(Object o) {
Page<T> page = (Page<T>) o;
if (page.getPageSize() != this.pageSize || page.getCurrentPage() != this.currentPage) {
return false;
}
boolean equals = true;
int i = 0;
while (i < this.pageSize && equals) {
equals = page.getListPage().get(i).equals(this.listObjects.get(i));
i++;
}
return equals;
}
/**
* Hash Code.
* @return hash code
*/
@Override
public int hashCode() {
int result = listObjects != null ? listObjects.hashCode() : 0;
result = 31 * result + currentPage;
result = 31 * result + pageSize;
return result;
}
/**
* Add a Object in the ArrayList.
* @param t the object
*/
public void add(T t) {
listObjects.add(t);
}
}
| gdanguy/training-java-gdanguy | core/src/main/java/core/utils/Page.java | Java | apache-2.0 | 3,276 |
/**
* @license Apache-2.0
*
* Copyright (c) 2020 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var isPositiveInteger = require( '@stdlib/math/base/assert/is-positive-integer' );
var constantFunction = require( '@stdlib/utils/constant-function' );
var isfinite = require( '@stdlib/math/base/assert/is-finite' );
var round = require( '@stdlib/math/base/special/round' );
var isnan = require( '@stdlib/math/base/assert/is-nan' );
var exp = require( '@stdlib/math/base/special/exp' );
var LN2 = require( '@stdlib/constants/float64/ln-two' );
var weights = require( './weights.js' );
// MAIN //
/**
* Returns a function for evaluating the cumulative distribution function (CDF) for the distribution of the Wilcoxon signed rank test statistic with `n` observations.
*
* @param {PositiveInteger} n - number of observations
* @returns {Function} CDF
*
* @example
* var cdf = factory( 8 );
* var y = cdf( 3.9 );
* // returns ~0.027
*
* y = cdf( 17.0 );
* // returns ~0.473
*/
function factory( n ) {
var mlim;
var pui;
if ( !isPositiveInteger( n ) || !isfinite( n ) ) {
return constantFunction( NaN );
}
pui = exp( -n * LN2 );
mlim = n * ( n + 1 ) / 2;
return cdf;
/**
* Evaluates the cumulative distribution function (CDF) for the distribution of the Wilcoxon signed rank test statistic.
*
* @private
* @param {number} x - input value
* @returns {Probability} evaluated CDF
*
* @example
* var y = cdf( 2 );
* // returns <number>
*/
function cdf( x ) {
var i;
var p;
if ( isnan( x ) ) {
return NaN;
}
if ( x < 0.0 ) {
return 0.0;
}
x = round( x );
if ( x >= mlim ) {
return 1.0;
}
p = 0;
for ( i = 0; i <= x; i++ ) {
p += weights( i, n ) * pui;
}
return p;
}
}
// EXPORTS //
module.exports = factory;
| stdlib-js/stdlib | lib/node_modules/@stdlib/stats/base/dists/signrank/cdf/lib/factory.js | JavaScript | apache-2.0 | 2,313 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.ml.preprocessing.imputer;
import org.apache.ignite.ml.math.functions.IgniteBiFunction;
/**
* Preprocessing function that makes imputing.
*
* @param <K> Type of a key in {@code upstream} data.
* @param <V> Type of a value in {@code upstream} data.
*/
public class ImputerPreprocessor<K, V> implements IgniteBiFunction<K, V, double[]> {
/** */
private static final long serialVersionUID = 6887800576392623469L;
/** Filling values. */
private final double[] imputingValues;
/** Base preprocessor. */
private final IgniteBiFunction<K, V, double[]> basePreprocessor;
/**
* Constructs a new instance of imputing preprocessor.
*
* @param basePreprocessor Base preprocessor.
*/
public ImputerPreprocessor(double[] imputingValues,
IgniteBiFunction<K, V, double[]> basePreprocessor) {
this.imputingValues = imputingValues;
this.basePreprocessor = basePreprocessor;
}
/**
* Applies this preprocessor.
*
* @param k Key.
* @param v Value.
* @return Preprocessed row.
*/
@Override public double[] apply(K k, V v) {
double[] res = basePreprocessor.apply(k, v);
assert res.length == imputingValues.length;
for (int i = 0; i < res.length; i++) {
if (Double.valueOf(res[i]).equals(Double.NaN))
res[i] = imputingValues[i];
}
return res;
}
}
| voipp/ignite | modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/imputer/ImputerPreprocessor.java | Java | apache-2.0 | 2,255 |
/*
© 2017 Altavant Technologies Inc.
Web: http://www.altavant.com
*/
namespace Altavant.Fusion.Graphics
{
using System;
using Fusion.Utils;
public enum MatrixOrder : byte
{
Append,
Prepend
}
public class Matrix
{
private float _m11;
private float _m12;
private float _m21;
private float _m22;
private float _offsetX;
private float _offsetY;
private float[] _values;
public Matrix()
: this(1F, 0F, 0F, 1F, 0F, 0F)
{
}
public Matrix(float m11, float m12, float m21, float m22, float offsetX, float offsetY)
{
_m11 = m11;
_m12 = m12;
_m21 = m21;
_m22 = m22;
_offsetX = offsetX;
_offsetY = offsetY;
}
public Matrix(Android.Graphics.Matrix matrix)
{
float[] values = new float[9];
matrix.GetValues(values);
_m11 = values[Android.Graphics.Matrix.MscaleX];
_m21 = values[Android.Graphics.Matrix.MskewX];
_offsetX = values[Android.Graphics.Matrix.MtransX];
_m12 = values[Android.Graphics.Matrix.MskewY];
_m22 = values[Android.Graphics.Matrix.MscaleY];
_offsetY = values[Android.Graphics.Matrix.MtransY];
}
public float M11
{
get
{
return _m11;
}
set
{
_m11 = value;
}
}
public float M12
{
get
{
return _m12;
}
set
{
_m12 = value;
}
}
public float M21
{
get
{
return _m21;
}
set
{
_m21 = value;
}
}
public float M22
{
get
{
return _m22;
}
set
{
_m22 = value;
}
}
public float OffsetX
{
get
{
return _offsetX;
}
set
{
_offsetX = value;
}
}
public float OffsetY
{
get
{
return _offsetY;
}
set
{
_offsetY = value;
}
}
public static Matrix Identity
{
get
{
return new Matrix(1F, 0F, 0F, 1F, 0F, 0F);
}
}
public bool IsIdentity
{
get
{
if ((_m11 == 1F) && (_m12 == 0F) && (_m21 == 0F) && (_m22 == 1F) && (_offsetX == 0F) && (_offsetY == 0F))
return true;
return false;
}
}
public void SetValues(float m11, float m12, float m21, float m22, float offsetX, float offsetY)
{
_m11 = m11;
_m12 = m12;
_m21 = m21;
_m22 = m22;
_offsetX = offsetX;
_offsetY = offsetY;
}
public void Reset()
{
_m11 = 1F;
_m12 = 0F;
_m21 = 0F;
_m22 = 1F;
_offsetX = 0F;
_offsetY = 0F;
}
public void Multiply(Matrix matrix)
{
Multiply(matrix, MatrixOrder.Prepend);
}
public void Multiply(Matrix matrix, MatrixOrder order)
{
if (matrix == null)
throw new ArgumentNullException(nameof(matrix));
float a11, a12, a21, a22, aX, aY;
float b11, b12, b21, b22, bX, bY;
if (order == MatrixOrder.Append)
{
a11 = _m11;
a12 = _m12;
a21 = _m21;
a22 = _m22;
aX = _offsetX;
aY = _offsetY;
b11 = matrix.M11;
b12 = matrix.M12;
b21 = matrix.M21;
b22 = matrix.M22;
bX = matrix.OffsetX;
bY = matrix.OffsetY;
}
else
{
a11 = matrix.M11;
a12 = matrix.M12;
a21 = matrix.M21;
a22 = matrix.M22;
aX = matrix.OffsetX;
aY = matrix.OffsetY;
b11 = _m11;
b12 = _m12;
b21 = _m21;
b22 = _m22;
bX = _offsetX;
bY = _offsetY;
}
_m11 = a11 * b11 + a12 * b21;
_m12 = a11 * b12 + a12 * b22;
_m21 = a21 * b11 + a22 * b21;
_m22 = a21 * b12 + a22 * b22;
_offsetX = aX * b11 + aY * b21 + bX;
_offsetY = aX * b12 + aY * b22 + bY;
}
public void Translate(float offsetX, float offsetY)
{
Translate(offsetX, offsetY, MatrixOrder.Prepend);
}
public void Translate(float offsetX, float offsetY, MatrixOrder order)
{
Matrix matrix = Translation(offsetX, offsetY);
Multiply(matrix, order);
}
public void Rotate(float angle)
{
Rotate(angle, MatrixOrder.Prepend);
}
public void Rotate(float angle, MatrixOrder order)
{
Matrix matrix = Rotation(angle);
Multiply(matrix, order);
}
public void RotateAt(float angle, Point center)
{
RotateAt(angle, center.X, center.Y, MatrixOrder.Prepend);
}
public void RotateAt(float angle, float centerX, float centerY)
{
RotateAt(angle, centerX, centerY, MatrixOrder.Prepend);
}
public void RotateAt(float angle, Point center, MatrixOrder order)
{
if (center == null)
throw new ArgumentNullException(nameof(center));
RotateAt(angle, center.X, center.Y, order);
}
public void RotateAt(float angle, float centerX, float centerY, MatrixOrder order)
{
Matrix matrix = RotationAt(angle, centerX, centerY);
Multiply(matrix, order);
}
public void Scale(float scaleX, float scaleY)
{
Scale(scaleX, scaleY, MatrixOrder.Prepend);
}
public void Scale(float scaleX, float scaleY, MatrixOrder order)
{
Matrix matrix = Scaling(scaleX, scaleY);
Multiply(matrix, order);
}
public void ScaleAt(float scaleX, float scaleY, Point center)
{
ScaleAt(scaleX, scaleY, center.X, center.Y, MatrixOrder.Prepend);
}
public void ScaleAt(float scaleX, float scaleY, float centerX, float centerY)
{
ScaleAt(scaleX, scaleY, centerX, centerY, MatrixOrder.Prepend);
}
public void ScaleAt(float scaleX, float scaleY, Point center, MatrixOrder order)
{
if (center == null)
throw new ArgumentNullException(nameof(center));
ScaleAt(scaleX, scaleY, center.X, center.Y, order);
}
public void ScaleAt(float scaleX, float scaleY, float centerX, float centerY, MatrixOrder order)
{
Matrix matrix = ScalingAt(scaleX, scaleY, centerX, centerY);
Multiply(matrix, order);
}
public void Shear(float shearX, float shearY)
{
Shear(shearX, shearY, MatrixOrder.Prepend);
}
public void Shear(float shearX, float shearY, MatrixOrder order)
{
Matrix matrix = Shearing(shearX, shearY);
Multiply(matrix, order);
}
public void ShearAt(float shearX, float shearY, Point center)
{
ShearAt(shearX, shearY, center.X, center.Y, MatrixOrder.Prepend);
}
public void ShearAt(float shearX, float shearY, float centerX, float centerY)
{
ShearAt(shearX, shearY, centerX, centerY, MatrixOrder.Prepend);
}
public void ShearAt(float shearX, float shearY, Point center, MatrixOrder order)
{
if (center == null)
throw new ArgumentNullException(nameof(center));
ShearAt(shearX, shearY, center.X, center.Y, order);
}
public void ShearAt(float shearX, float shearY, float centerX, float centerY, MatrixOrder order)
{
Matrix matrix = ShearingAt(shearX, shearY, centerX, centerY);
Multiply(matrix, order);
}
public void Skew(float skewX, float skewY)
{
Skew(skewX, skewY, MatrixOrder.Prepend);
}
public void Skew(float skewX, float skewY, MatrixOrder order)
{
Matrix matrix = Skewing(skewX, skewY);
Multiply(matrix, order);
}
public void SkewAt(float skewX, float skewY, Point center)
{
SkewAt(skewX, skewY, center.X, center.Y, MatrixOrder.Prepend);
}
public void SkewAt(float skewX, float skewY, float centerX, float centerY)
{
SkewAt(skewX, skewY, centerX, centerY, MatrixOrder.Prepend);
}
public void SkewAt(float skewX, float skewY, Point center, MatrixOrder order)
{
if (center == null)
throw new ArgumentNullException(nameof(center));
SkewAt(skewX, skewY, center.X, center.Y, order);
}
public void SkewAt(float skewX, float skewY, float centerX, float centerY, MatrixOrder order)
{
Matrix matrix = SkewingAt(skewX, skewY, centerX, centerY);
Multiply(matrix, order);
}
public Point TransformPoint(Point pt)
{
return TransformPoint(pt.X, pt.Y);
}
public Point TransformPoint(float x, float y)
{
float x1 = x * _m11 + y * _m21 + _offsetX;
float y1 = x * _m12 + y * _m22 + _offsetY;
return new Point(x1, y1);
}
public Matrix Clone()
{
return new Matrix(_m11, _m12, _m21, _m22, _offsetX, _offsetY);
}
public override int GetHashCode()
{
return _m11.GetHashCode() ^ _m12.GetHashCode() ^ _m21.GetHashCode() ^ _m22.GetHashCode() ^ _offsetX.GetHashCode() ^ _offsetY.GetHashCode();
}
public override bool Equals(object obj)
{
if (obj == null)
return false;
if (obj == this)
return true;
if (obj is Matrix)
{
Matrix matrix = (Matrix)obj;
if ((_m11 == matrix.M11) && (_m12 == matrix.M12) && (_m21 == matrix.M21) && (_m22 == matrix.M22) && (_offsetX == matrix.OffsetX) && (_offsetY == matrix.OffsetY))
return true;
}
return false;
}
public override string ToString()
{
CharBuffer cb = new CharBuffer();
cb.Add(Converter.ToString(_m11, 3));
cb.Add(", ");
cb.Add(Converter.ToString(_m12, 3));
cb.Add(", ");
cb.Add(Converter.ToString(_m21, 3));
cb.Add(", ");
cb.Add(Converter.ToString(_m22, 3));
cb.Add(", ");
cb.Add(Converter.ToString(_offsetX, 3));
cb.Add(", ");
cb.Add(Converter.ToString(_offsetY, 3));
return cb.ToString();
}
public static Matrix Parse(string s)
{
if (string.IsNullOrEmpty(s))
throw new ArgumentException(nameof(s));
string[] vals = Utils.Split(s);
float m11 = Convert.ToSingle(vals[0], Converter.NumberFormatInfo);
float m12 = Convert.ToSingle(vals[1], Converter.NumberFormatInfo);
float m21 = Convert.ToSingle(vals[2], Converter.NumberFormatInfo);
float m22 = Convert.ToSingle(vals[3], Converter.NumberFormatInfo);
float offsetX = Convert.ToSingle(vals[4], Converter.NumberFormatInfo);
float offsetY = Convert.ToSingle(vals[5], Converter.NumberFormatInfo);
return new Matrix(m11, m12, m21, m22, offsetX, offsetY);
}
public static Matrix Translation(Point offset)
{
return new Matrix(1F, 0F, 0F, 1F, offset.X, offset.Y);
}
public static Matrix Translation(float offsetX, float offsetY)
{
return new Matrix(1F, 0F, 0F, 1F, offsetX, offsetY);
}
public static Matrix Rotation(float angle)
{
double radianAngle = angle * Math.PI / 180F;
float cosAngle = (float)Math.Cos(radianAngle);
float sinAngle = (float)Math.Sin(radianAngle);
return new Matrix(cosAngle, sinAngle, -sinAngle, cosAngle, 0F, 0F);
}
public static Matrix RotationAt(float angle, Point center)
{
return RotationAt(angle, center.X, center.Y);
}
public static Matrix RotationAt(float angle, float centerX, float centerY)
{
double radianAngle = angle * Math.PI / 180F;
float cosAngle = (float)Math.Cos(radianAngle);
float sinAngle = (float)Math.Sin(radianAngle);
Matrix m1 = new Matrix(1F, 0F, 0F, 1F, -centerX, -centerY);
Matrix m2 = new Matrix(cosAngle, sinAngle, -sinAngle, cosAngle, 0F, 0F);
Matrix m3 = new Matrix(1F, 0F, 0F, 1F, centerX, centerY);
m1.Multiply(m2, MatrixOrder.Append);
m1.Multiply(m3, MatrixOrder.Append);
return m1;
}
public static Matrix Scaling(float scale)
{
return Scaling(scale, scale);
}
public static Matrix Scaling(float scaleX, float scaleY)
{
return new Matrix(scaleX, 0F, 0F, scaleY, 0F, 0F);
}
public static Matrix ScalingAt(float scale, Point center)
{
return ScalingAt(scale, scale, center.X, center.Y);
}
public static Matrix ScalingAt(float scaleX, float scaleY, Point center)
{
return ScalingAt(scaleX, scaleY, center.X, center.Y);
}
public static Matrix ScalingAt(float scale, float centerX, float centerY)
{
return ScalingAt(scale, scale, centerX, centerY);
}
public static Matrix ScalingAt(float scaleX, float scaleY, float centerX, float centerY)
{
Matrix m1 = new Matrix(1F, 0F, 0F, 1F, -centerX, -centerY);
Matrix m2 = new Matrix(scaleX, 0F, 0F, scaleY, 0F, 0F);
Matrix m3 = new Matrix(1F, 0F, 0F, 1F, centerX, centerY);
m1.Multiply(m2, MatrixOrder.Append);
m1.Multiply(m3, MatrixOrder.Append);
return m1;
}
public static Matrix Shearing(float shearX, float shearY)
{
return new Matrix(1F, shearY, shearX, 1F, 0F, 0F);
}
public static Matrix ShearingAt(float shearX, float shearY, Point center)
{
return ShearingAt(shearX, shearY, center.X, center.Y);
}
public static Matrix ShearingAt(float shearX, float shearY, float centerX, float centerY)
{
Matrix m1 = new Matrix(1F, 0F, 0F, 1F, -centerX, -centerY);
Matrix m2 = new Matrix(1F, shearY, shearX, 1F, 0F, 0F);
Matrix m3 = new Matrix(1F, 0F, 0F, 1F, centerX, centerY);
m1.Multiply(m2, MatrixOrder.Append);
m1.Multiply(m3, MatrixOrder.Append);
return m1;
}
public static Matrix Skewing(float skewX, float skewY)
{
double angleAlpha = skewX * Math.PI / 180F;
double angleBeta = skewY * Math.PI / 180F;
float tanAlpha = (float)Math.Tan(angleAlpha);
float tanBeta = (float)Math.Tan(angleBeta);
return new Matrix(1F, tanBeta, tanAlpha, 1F, 0F, 0F);
}
public static Matrix SkewingAt(float skewX, float skewY, Point center)
{
return SkewingAt(skewX, skewY, center.X, center.Y);
}
public static Matrix SkewingAt(float skewX, float skewY, float centerX, float centerY)
{
double angleAlpha = skewX * Math.PI / 180F;
double angleBeta = skewY * Math.PI / 180F;
float tanAlpha = (float)Math.Tan(angleAlpha);
float tanBeta = (float)Math.Tan(angleBeta);
Matrix m1 = new Matrix(1F, 0F, 0F, 1F, -centerX, -centerY);
Matrix m2 = new Matrix(1F, tanBeta, tanAlpha, 1F, 0F, 0F);
Matrix m3 = new Matrix(1F, 0F, 0F, 1F, centerX, centerY);
m1.Multiply(m2, MatrixOrder.Append);
m1.Multiply(m3, MatrixOrder.Append);
return m1;
}
public Android.Graphics.Matrix ToNative()
{
Android.Graphics.Matrix m = new Android.Graphics.Matrix();
ToNative(m);
return m;
}
public void ToNative(Android.Graphics.Matrix m)
{
if (_values == null)
_values = new float[9];
_values[Android.Graphics.Matrix.MscaleX] = _m11;
_values[Android.Graphics.Matrix.MskewX] = _m21;
_values[Android.Graphics.Matrix.MtransX] = _offsetX;
_values[Android.Graphics.Matrix.MskewY] = _m12;
_values[Android.Graphics.Matrix.MscaleY] = _m22;
_values[Android.Graphics.Matrix.MtransY] = _offsetY;
_values[Android.Graphics.Matrix.Mpersp2] = 1F;
m.SetValues(_values);
}
}
} | altavant/Fusion | Android/Source/Graphics/Matrix.cs | C# | apache-2.0 | 14,618 |
/*
* Copyright (c) 2008-2013, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.core;
import junit.framework.TestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@RunWith(com.hazelcast.util.RandomBlockJUnit4ClassRunner.class)
public class IMapAsyncTest {
private final String key = "key";
private final String value1 = "value1";
private final String value2 = "value2";
@BeforeClass
@AfterClass
public static void init() throws Exception {
Hazelcast.shutdownAll();
}
@Test
public void testGetAsync() throws Exception {
IMap<String, String> map = Hazelcast.getMap("map:test:getAsync");
map.put(key, value1);
Future<String> f1 = map.getAsync(key);
TestCase.assertEquals(value1, f1.get());
}
@Test
public void testPutAsync() throws Exception {
IMap<String, String> map = Hazelcast.getMap("map:test:putAsync");
Future<String> f1 = map.putAsync(key, value1);
String f1Val = f1.get();
TestCase.assertNull(f1Val);
Future<String> f2 = map.putAsync(key, value2);
String f2Val = f2.get();
TestCase.assertEquals(value1, f2Val);
}
@Test
public void testRemoveAsync() throws Exception {
IMap<String, String> map = Hazelcast.getMap("map:test:removeAsync");
// populate map
map.put(key, value1);
Future<String> f1 = map.removeAsync(key);
TestCase.assertEquals(value1, f1.get());
}
@Test
public void testRemoveAsyncWithImmediateTimeout() throws Exception {
final IMap<String, String> map = Hazelcast.getMap("map:test:removeAsync:timeout");
// populate map
map.put(key, value1);
final CountDownLatch latch = new CountDownLatch(1);
new Thread(new Runnable() {
public void run() {
map.lock(key);
latch.countDown();
}
}).start();
assertTrue(latch.await(20, TimeUnit.SECONDS));
Future<String> f1 = map.removeAsync(key);
try {
assertEquals(value1, f1.get(0L, TimeUnit.SECONDS));
} catch (TimeoutException e) {
// expected
return;
}
TestCase.fail("Failed to throw TimeoutException with zero timeout");
}
@Test
public void testRemoveAsyncWithNonExistantKey() throws Exception {
IMap<String, String> map = Hazelcast.getMap("map:test:removeAsync:nonexistant");
Future<String> f1 = map.removeAsync(key);
TestCase.assertNull(f1.get());
}
}
| health-and-care-developer-network/health-and-care-developer-network | library/hazelcast/2.5/hazelcast-2.5-source/hazelcast/src/test/java/com/hazelcast/core/IMapAsyncTest.java | Java | apache-2.0 | 3,450 |
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
*
*/
public class SpanFirstQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<SpanFirstQueryBuilder> {
public static final String NAME = "span_first";
private final SpanQueryBuilder matchBuilder;
private final int end;
private float boost = -1;
public SpanFirstQueryBuilder(SpanQueryBuilder matchBuilder, int end) {
this.matchBuilder = matchBuilder;
this.end = end;
}
public SpanFirstQueryBuilder boost(float boost) {
this.boost = boost;
return this;
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.field("match");
matchBuilder.toXContent(builder, params);
builder.field("end", end);
if (boost != -1) {
builder.field("boost", boost);
}
builder.endObject();
}
}
| jprante/elasticsearch-client | elasticsearch-client-search/src/main/java/org/elasticsearch/index/query/SpanFirstQueryBuilder.java | Java | apache-2.0 | 1,877 |
[assembly: Xamarin.Forms.Platform.WPF.ExportRenderer(typeof(Xamarin.Forms.Entry), typeof(Xamarin.Forms.Platform.WPF.Controls.EntryRenderer))]
namespace Xamarin.Forms.Platform.WPF.Controls
{
using System.Windows;
using System.Windows.Controls;
public partial class EntryRenderer : Xamarin.Forms.Platform.WPF.Rendereres.ViewRenderer
{
bool ignoreTextChange;
public new Entry Model
{
get { return (Entry)base.Model; }
set { base.Model = value; }
}
public EntryRenderer()
{
InitializeComponent();
}
protected override void LoadModel(View model)
{
((Entry)model).TextChanged += EntryModel_TextChanged;
base.LoadModel(model);
}
protected override void UnloadModel(View model)
{
((Entry)model).TextChanged -= EntryModel_TextChanged;
base.UnloadModel(model);
}
void EntryModel_TextChanged(object sender, Xamarin.Forms.TextChangedEventArgs e)
{
if (ignoreTextChange)
return;
ignoreTextChange = true;
TextBox.Text = e.NewTextValue;
PasswordBox.Password = e.NewTextValue;
ignoreTextChange = false;
}
void TextBox_TextChanged(object sender, TextChangedEventArgs e)
{
if (ignoreTextChange)
return;
ignoreTextChange = true;
Model.Text = TextBox.Text;
PasswordBox.Password = TextBox.Text;
ignoreTextChange = false;
}
void PasswordBox_PasswordChanged(object sender, RoutedEventArgs e)
{
if (ignoreTextChange)
return;
ignoreTextChange = true;
Model.Text = PasswordBox.Password;
TextBox.Text = PasswordBox.Password;
ignoreTextChange = false;
}
private void Entry_LostFocus(object sender, RoutedEventArgs e)
{
if (Model != null)
Model.SendCompleted();
}
private void Entry_PreviewKeyDown(object sender, System.Windows.Input.KeyEventArgs e)
{
if (Model != null && e.Key == System.Windows.Input.Key.Return)
{
Model.SendCompleted();
e.Handled = true;
}
}
protected override bool Handle_BackgroundColorProperty(BindableProperty property)
{
// Background color is set directly to the TextBox/PasswordBox with bindings.
return true;
}
}
}
| jvlppm/xamarin-forms-wpf | src/Xamarin.Forms.Platforms/Xamarin.Forms.Platform.WPF/Platform.WPF/Rendereres/EntryRenderer.xaml.cs | C# | apache-2.0 | 2,632 |
/**
* @license
* Copyright The Closure Library Authors.
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Displays and edits the value of a cookie.
* Intended only for debugging.
*/
goog.provide('goog.ui.CookieEditor');
goog.require('goog.asserts');
goog.require('goog.dom');
goog.require('goog.dom.TagName');
goog.require('goog.events.EventType');
goog.require('goog.net.Cookies');
goog.require('goog.string');
goog.require('goog.style');
goog.require('goog.ui.Component');
goog.requireType('goog.events.Event');
/**
* Displays and edits the value of a cookie.
* @final
* @unrestricted
*/
goog.ui.CookieEditor = class extends goog.ui.Component {
/**
* @param {goog.dom.DomHelper=} opt_domHelper Optional DOM helper.
*/
constructor(opt_domHelper) {
'use strict';
super(opt_domHelper);
}
/**
* Sets the cookie which this component will edit.
* @param {string} cookieKey Cookie key.
*/
selectCookie(cookieKey) {
'use strict';
goog.asserts.assert(goog.net.Cookies.getInstance().isValidName(cookieKey));
this.cookieKey_ = cookieKey;
if (this.textAreaElem_) {
this.textAreaElem_.value =
goog.net.Cookies.getInstance().get(cookieKey) || '';
}
}
/** @override */
canDecorate() {
'use strict';
return false;
}
/** @override */
createDom() {
'use strict';
// Debug-only, so we don't need i18n.
this.clearButtonElem_ = goog.dom.createDom(
goog.dom.TagName.BUTTON, /* attributes */ null, 'Clear');
this.updateButtonElem_ = goog.dom.createDom(
goog.dom.TagName.BUTTON, /* attributes */ null, 'Update');
var value =
this.cookieKey_ && goog.net.Cookies.getInstance().get(this.cookieKey_);
this.textAreaElem_ = goog.dom.createDom(
goog.dom.TagName.TEXTAREA, /* attibutes */ null, value || '');
this.valueWarningElem_ = goog.dom.createDom(
goog.dom.TagName.SPAN,
/* attibutes */ {'style': 'display:none;color:red'},
'Invalid cookie value.');
this.setElementInternal(goog.dom.createDom(
goog.dom.TagName.DIV,
/* attibutes */ null, this.valueWarningElem_,
goog.dom.createDom(goog.dom.TagName.BR), this.textAreaElem_,
goog.dom.createDom(goog.dom.TagName.BR), this.clearButtonElem_,
this.updateButtonElem_));
}
/** @override */
enterDocument() {
'use strict';
super.enterDocument();
this.getHandler().listen(
this.clearButtonElem_, goog.events.EventType.CLICK, this.handleClear_);
this.getHandler().listen(
this.updateButtonElem_, goog.events.EventType.CLICK,
this.handleUpdate_);
}
/**
* Handles user clicking clear button.
* @param {!goog.events.Event} e The click event.
* @private
*/
handleClear_(e) {
'use strict';
if (this.cookieKey_) {
goog.net.Cookies.getInstance().remove(this.cookieKey_);
}
this.textAreaElem_.value = '';
}
/**
* Handles user clicking update button.
* @param {!goog.events.Event} e The click event.
* @private
*/
handleUpdate_(e) {
'use strict';
if (this.cookieKey_) {
var value = this.textAreaElem_.value;
if (value) {
// Strip line breaks.
value = goog.string.stripNewlines(value);
}
if (goog.net.Cookies.getInstance().isValidValue(value)) {
goog.net.Cookies.getInstance().set(this.cookieKey_, value);
goog.style.setElementShown(this.valueWarningElem_, false);
} else {
goog.style.setElementShown(this.valueWarningElem_, true);
}
}
}
/** @override */
disposeInternal() {
'use strict';
this.clearButtonElem_ = null;
this.cookieKey_ = null;
this.textAreaElem_ = null;
this.updateButtonElem_ = null;
this.valueWarningElem_ = null;
}
};
/**
* Cookie key.
* @type {?string}
* @private
*/
goog.ui.CookieEditor.prototype.cookieKey_;
/**
* Text area.
* @type {HTMLTextAreaElement}
* @private
*/
goog.ui.CookieEditor.prototype.textAreaElem_;
/**
* Clear button.
* @type {HTMLButtonElement}
* @private
*/
goog.ui.CookieEditor.prototype.clearButtonElem_;
/**
* Invalid value warning text.
* @type {HTMLSpanElement}
* @private
*/
goog.ui.CookieEditor.prototype.valueWarningElem_;
/**
* Update button.
* @type {HTMLButtonElement}
* @private
*/
goog.ui.CookieEditor.prototype.updateButtonElem_;
// TODO(user): add combobox for user to select different cookies
| google/closure-library | closure/goog/ui/cookieeditor.js | JavaScript | apache-2.0 | 4,463 |
/*
* Forge SDK
*
* The Forge Platform contains an expanding collection of web service components that can be used with Autodesk cloud-based products or your own technologies. Take advantage of Autodesk’s expertise in design and engineering.
*
* OpenAPI spec version: 0.1.0
* Contact: forge.help@autodesk.com
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Linq;
using System.IO;
using System.Text;
using System.Collections;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Runtime.Serialization;
using Newtonsoft.Json;
using Newtonsoft.Json.Converters;
namespace Autodesk.Forge.Model
{
/// <summary>
/// RelRefMeta
/// </summary>
[DataContract]
public partial class RelRefMeta : IEquatable<RelRefMeta>
{
/// <summary>
/// Gets or Sets RefType
/// </summary>
[JsonConverter(typeof(StringEnumConverter))]
public enum RefTypeEnum
{
/// <summary>
/// Enum Derived for "derived"
/// </summary>
[EnumMember(Value = "derived")]
Derived,
/// <summary>
/// Enum Dependencies for "dependencies"
/// </summary>
[EnumMember(Value = "dependencies")]
Dependencies,
/// <summary>
/// Enum Auxiliary for "auxiliary"
/// </summary>
[EnumMember(Value = "auxiliary")]
Auxiliary,
/// <summary>
/// Enum Xrefs for "xrefs"
/// </summary>
[EnumMember(Value = "xrefs")]
Xrefs
}
/// <summary>
/// describes the direction of the reference relative to the resource the refs are queried for
/// </summary>
/// <value>describes the direction of the reference relative to the resource the refs are queried for</value>
[JsonConverter(typeof(StringEnumConverter))]
public enum DirectionEnum
{
/// <summary>
/// Enum From for "from"
/// </summary>
[EnumMember(Value = "from")]
From,
/// <summary>
/// Enum To for "to"
/// </summary>
[EnumMember(Value = "to")]
To
}
/// <summary>
/// Gets or Sets FromType
/// </summary>
[JsonConverter(typeof(StringEnumConverter))]
public enum FromTypeEnum
{
/// <summary>
/// Enum Folders for "folders"
/// </summary>
[EnumMember(Value = "folders")]
Folders,
/// <summary>
/// Enum Items for "items"
/// </summary>
[EnumMember(Value = "items")]
Items,
/// <summary>
/// Enum Versions for "versions"
/// </summary>
[EnumMember(Value = "versions")]
Versions
}
/// <summary>
/// Gets or Sets ToType
/// </summary>
[JsonConverter(typeof(StringEnumConverter))]
public enum ToTypeEnum
{
/// <summary>
/// Enum Folders for "folders"
/// </summary>
[EnumMember(Value = "folders")]
Folders,
/// <summary>
/// Enum Items for "items"
/// </summary>
[EnumMember(Value = "items")]
Items,
/// <summary>
/// Enum Versions for "versions"
/// </summary>
[EnumMember(Value = "versions")]
Versions
}
/// <summary>
/// Gets or Sets RefType
/// </summary>
[DataMember(Name="refType", EmitDefaultValue=false)]
public RefTypeEnum? RefType { get; set; }
/// <summary>
/// describes the direction of the reference relative to the resource the refs are queried for
/// </summary>
/// <value>describes the direction of the reference relative to the resource the refs are queried for</value>
[DataMember(Name="direction", EmitDefaultValue=false)]
public DirectionEnum? Direction { get; set; }
/// <summary>
/// Gets or Sets FromType
/// </summary>
[DataMember(Name="fromType", EmitDefaultValue=false)]
public FromTypeEnum? FromType { get; set; }
/// <summary>
/// Gets or Sets ToType
/// </summary>
[DataMember(Name="toType", EmitDefaultValue=false)]
public ToTypeEnum? ToType { get; set; }
/// <summary>
/// Initializes a new instance of the <see cref="RelRefMeta" /> class.
/// </summary>
[JsonConstructorAttribute]
protected RelRefMeta() { }
/// <summary>
/// Initializes a new instance of the <see cref="RelRefMeta" /> class.
/// </summary>
/// <param name="RefType">RefType (required).</param>
/// <param name="Direction">describes the direction of the reference relative to the resource the refs are queried for (required).</param>
/// <param name="FromId">FromId (required).</param>
/// <param name="FromType">FromType (required).</param>
/// <param name="ToId">ToId (required).</param>
/// <param name="ToType">ToType (required).</param>
/// <param name="Extension">Extension (required).</param>
public RelRefMeta(RefTypeEnum? RefType = null, DirectionEnum? Direction = null, string FromId = null, FromTypeEnum? FromType = null, string ToId = null, ToTypeEnum? ToType = null, BaseAttributesExtensionObject Extension = null)
{
// to ensure "RefType" is required (not null)
if (RefType == null)
{
throw new InvalidDataException("RefType is a required property for RelRefMeta and cannot be null");
}
else
{
this.RefType = RefType;
}
// to ensure "Direction" is required (not null)
if (Direction == null)
{
throw new InvalidDataException("Direction is a required property for RelRefMeta and cannot be null");
}
else
{
this.Direction = Direction;
}
// to ensure "FromId" is required (not null)
if (FromId == null)
{
throw new InvalidDataException("FromId is a required property for RelRefMeta and cannot be null");
}
else
{
this.FromId = FromId;
}
// to ensure "FromType" is required (not null)
if (FromType == null)
{
throw new InvalidDataException("FromType is a required property for RelRefMeta and cannot be null");
}
else
{
this.FromType = FromType;
}
// to ensure "ToId" is required (not null)
if (ToId == null)
{
throw new InvalidDataException("ToId is a required property for RelRefMeta and cannot be null");
}
else
{
this.ToId = ToId;
}
// to ensure "ToType" is required (not null)
if (ToType == null)
{
throw new InvalidDataException("ToType is a required property for RelRefMeta and cannot be null");
}
else
{
this.ToType = ToType;
}
// to ensure "Extension" is required (not null)
if (Extension == null)
{
throw new InvalidDataException("Extension is a required property for RelRefMeta and cannot be null");
}
else
{
this.Extension = Extension;
}
}
/// <summary>
/// Gets or Sets FromId
/// </summary>
[DataMember(Name="fromId", EmitDefaultValue=false)]
public string FromId { get; set; }
/// <summary>
/// Gets or Sets ToId
/// </summary>
[DataMember(Name="toId", EmitDefaultValue=false)]
public string ToId { get; set; }
/// <summary>
/// Gets or Sets Extension
/// </summary>
[DataMember(Name="extension", EmitDefaultValue=false)]
public BaseAttributesExtensionObject Extension { get; set; }
/// <summary>
/// Returns the string presentation of the object
/// </summary>
/// <returns>String presentation of the object</returns>
public override string ToString()
{
var sb = new StringBuilder();
sb.Append("class RelRefMeta {\n");
sb.Append(" RefType: ").Append(RefType).Append("\n");
sb.Append(" Direction: ").Append(Direction).Append("\n");
sb.Append(" FromId: ").Append(FromId).Append("\n");
sb.Append(" FromType: ").Append(FromType).Append("\n");
sb.Append(" ToId: ").Append(ToId).Append("\n");
sb.Append(" ToType: ").Append(ToType).Append("\n");
sb.Append(" Extension: ").Append(Extension).Append("\n");
sb.Append("}\n");
return sb.ToString();
}
/// <summary>
/// Returns the JSON string presentation of the object
/// </summary>
/// <returns>JSON string presentation of the object</returns>
public string ToJson()
{
return JsonConvert.SerializeObject(this, Formatting.Indented);
}
/// <summary>
/// Returns true if objects are equal
/// </summary>
/// <param name="obj">Object to be compared</param>
/// <returns>Boolean</returns>
public override bool Equals(object obj)
{
// credit: http://stackoverflow.com/a/10454552/677735
return this.Equals(obj as RelRefMeta);
}
/// <summary>
/// Returns true if RelRefMeta instances are equal
/// </summary>
/// <param name="other">Instance of RelRefMeta to be compared</param>
/// <returns>Boolean</returns>
public bool Equals(RelRefMeta other)
{
// credit: http://stackoverflow.com/a/10454552/677735
if (other == null)
return false;
return
(
this.RefType == other.RefType ||
this.RefType != null &&
this.RefType.Equals(other.RefType)
) &&
(
this.Direction == other.Direction ||
this.Direction != null &&
this.Direction.Equals(other.Direction)
) &&
(
this.FromId == other.FromId ||
this.FromId != null &&
this.FromId.Equals(other.FromId)
) &&
(
this.FromType == other.FromType ||
this.FromType != null &&
this.FromType.Equals(other.FromType)
) &&
(
this.ToId == other.ToId ||
this.ToId != null &&
this.ToId.Equals(other.ToId)
) &&
(
this.ToType == other.ToType ||
this.ToType != null &&
this.ToType.Equals(other.ToType)
) &&
(
this.Extension == other.Extension ||
this.Extension != null &&
this.Extension.Equals(other.Extension)
);
}
/// <summary>
/// Gets the hash code
/// </summary>
/// <returns>Hash code</returns>
public override int GetHashCode()
{
// credit: http://stackoverflow.com/a/263416/677735
unchecked // Overflow is fine, just wrap
{
int hash = 41;
// Suitable nullity checks etc, of course :)
if (this.RefType != null)
hash = hash * 59 + this.RefType.GetHashCode();
if (this.Direction != null)
hash = hash * 59 + this.Direction.GetHashCode();
if (this.FromId != null)
hash = hash * 59 + this.FromId.GetHashCode();
if (this.FromType != null)
hash = hash * 59 + this.FromType.GetHashCode();
if (this.ToId != null)
hash = hash * 59 + this.ToId.GetHashCode();
if (this.ToType != null)
hash = hash * 59 + this.ToType.GetHashCode();
if (this.Extension != null)
hash = hash * 59 + this.Extension.GetHashCode();
return hash;
}
}
}
}
| Autodesk-Forge/forge-api-dotnet-client | src/Autodesk.Forge/Model/RelRefMeta.cs | C# | apache-2.0 | 13,760 |
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package db contains functions related to database.
* db.go includes conneting to db, query and insertion.
* dbschema.go contains definitions of struct for db tables.
Currently it only contains Module struct.
*/
package db
import (
"database/sql"
"fmt"
"os"
"strconv"
"github.com/golang/glog"
// Go postgres driver for Go's database/sql package
_ "github.com/lib/pq"
)
// These are SQL stataments used in this package.
// Query statements can be appended based on its query parameters.
const (
// $4 and $5 should be assigned with the same value (the JSON data of module).
insertModule = `INSERT INTO modules (orgName, name, version, data) VALUES($1, $2, $3, $4) on conflict (orgName, name, version) do update set data=$5`
selectModules = `select * from modules`
// We want to ensure that user has to provide all three inputs,
// instead of deleting too many modules by mistake with some fields missing.
deleteModule = `delete from modules where orgName = $1 and name = $2 and version = $3`
selectFeatureBundles = `select * from featureBundles`
// $4 and $5 should be assigned with the same value (the JSON data of feature-bundle).
insertFeatureBundle = `INSERT INTO featureBundles (orgName, name, version, data) VALUES($1, $2, $3, $4) on conflict (orgName, name, version) do update set data=$5`
deleteFeatureBundle = `delete from featurebundles where orgName = $1 and name = $2 and version = $3`
)
// db is the global variable of connection to database.
// It would be assigned value when *ConnectDB* function is called.
//
// We choose to use this global variable due to that *gqlgen* automatically generates many server side codes.
// Resolver functions generated by *gqlgen* are handler functions for graphQL queries and are methods of *resolver* struct.
// If we define a struct with field of db connection instead of using the global variable
// and change *Query* functions to methods of that struct,
// we need to initialize db connection while initializing *resolver* object in server codes
// such that *resolver* functions can call these *Query* functions.
// However, initialization function of *resolver* struct is automatically generated and overwritten every time.
// Thus, we cannot initialize a db connection inside the *resolver* objects.
//
// Another option is to establish a new db connection for each *Query* function and close it after query finishes.
// However, that would be too expensive to connect to db every time server receives a new query.
var db *sql.DB
// ConnectDB establishes connection to database, *db* variable is assigned when opening database.
// This should only be called once before any other database function is called.
//
// Users need set environment variables for connection, including
// * DB_HOST: host address of target db instances, by default: localhost.
// * DB_PORT: port number of postgres db, by default: 5432.
// * DB_USERNAME: username of database, error would be returned if not set.
// * DB_PWD: password of target database, error would be returned if not set.
// * DB_NAME: name of database for connection, error would be returned if not set.
// * DB_SOCKER_DIR: directory of Unix socket in Cloud Run which serves as Cloud SQL
// Auth proxy to connect to postgres database.
// If service is deployed on Cloud Run, just use the default value.
// By default, it is set to `/cloudsql`.
func ConnectDB() error {
// read db config from env
// port number of target database
port := 5432
if portStr, ok := os.LookupEnv("DB_PORT"); !ok {
glog.Infof("DB_PORT not set, setting port to %d", port)
} else {
var err error
if port, err = strconv.Atoi(portStr); err != nil {
return fmt.Errorf("DB_PORT in incorrect format: %v", err)
}
}
// username of target database
user, ok := os.LookupEnv("DB_USERNAME")
if !ok {
return fmt.Errorf("DB_USERNAME not set")
}
// password of target database
password, ok := os.LookupEnv("DB_PWD")
if !ok {
return fmt.Errorf("DB_PWD not set")
}
// name of target database
dbname, ok := os.LookupEnv("DB_NAME")
if !ok {
return fmt.Errorf("DB_NAME not set")
}
// (Cloud Run only) Directory of Unix socket
socketDir, ok := os.LookupEnv("DB_SOCKET_DIR")
if !ok {
socketDir = "/cloudsql"
}
var psqlconn string // connection string used to connect to traget database
// host address of target database
host, ok := os.LookupEnv("DB_HOST")
switch {
case !ok:
glog.Infoln("DB_HOST not set, setting host to localhost")
host = "localhost"
fallthrough
case host == "localhost":
// This connection string is used if service is not deployed on Cloud Run,
// instead connection is made from localhost via Cloud SQL proxy.
psqlconn = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable", host, port, user, password, dbname)
default:
psqlconn = fmt.Sprintf("host=%s/%s port=%d user=%s password=%s dbname=%s", socketDir, host, port, user, password, dbname)
}
// open database
var err error
db, err = sql.Open("postgres", psqlconn)
if err != nil {
return fmt.Errorf("open database failed: %v", err)
}
// see if connection is established successfully
if err := db.Ping(); err != nil {
return fmt.Errorf("ping database failed: %v", err)
}
return nil
}
// Close function closes db connection
func Close() error {
return db.Close()
}
// InsertModule inserts module into database given values of four field of MODULE schema.
// Or if there is existing module with existing key (orgName, name, version), update data field.
// Error is returned when insertion failed.
func InsertModule(orgName string, name string, version string, data string) error {
if _, err := db.Exec(insertModule, orgName, name, version, data, data); err != nil {
return fmt.Errorf("insert/update module into db failed: %v", err)
}
return nil
}
// ReadModulesByRow scans from queried modules from rows one by one, rows are closed inside.
// Return slice of db Module struct each field of which corresponds to one column in db.
// Error is returned when scan rows failed.
func ReadModulesByRow(rows *sql.Rows) ([]Module, error) {
var modules []Module
defer rows.Close()
for rows.Next() {
var module Module
if err := rows.Scan(&module.OrgName, &module.Name, &module.Version, &module.Data); err != nil {
return nil, fmt.Errorf("scan db rows failure, %v", err)
}
modules = append(modules, module)
}
return modules, nil
}
// FormatQueryStr is used to generate query statement string based on query parameters.
// * parmNames is a list of names of all non-nil query parameters.
// * baseQuery is query statement without any query parameters.
func FormatQueryStr(parmNames []string, baseQuery string) string {
queryStmt := baseQuery
for i := 0; i < len(parmNames); i++ {
if i == 0 {
queryStmt += " where"
} else {
queryStmt += " and"
}
queryStmt += fmt.Sprintf(" %s=$%d", parmNames[i], i+1)
}
return queryStmt
}
// QueryModulesByOrgName queries modules of organization with *orgName* from database.
// If orgName is null then directly query all modules.
// Return slice of db Module struct each field of which corresponds to one column in db.
// Error is returned when query or reading data failed.
func QueryModulesByOrgName(orgName *string) ([]Module, error) {
var parms []interface{} // parms is used to store value of non-nil query parameters
parmNames := []string{} // parmNames is used to store name of non-nil query parameters
if orgName != nil {
parms = append(parms, *orgName)
parmNames = append(parmNames, "orgName")
}
// Format query statement string based on non-nil query parameters
queryStmt := FormatQueryStr(parmNames, selectModules)
rows, err := db.Query(queryStmt, parms...)
if err != nil {
return nil, fmt.Errorf("QueryModulesByOrgName failed: %v", err)
}
defer rows.Close()
return ReadModulesByRow(rows)
}
// QueryModulesByKey queries modules by its key (name, version), it is possible that parameters are null.
// If both parameters are null, this equals query for all modules.
// Return slice of db Module struct each field of which corresponds to one column in db.
// Error is returned when query or reading data failed.
func QueryModulesByKey(name *string, version *string) ([]Module, error) {
var parms []interface{} // parms is used to store value of non-nil query parameters
parmNames := []string{} // parmNames is used to store name of non-nil query parameters
if name != nil {
parms = append(parms, *name)
parmNames = append(parmNames, "name")
}
if version != nil {
parms = append(parms, *version)
parmNames = append(parmNames, "version")
}
// Format query statement string based on non-nil query parameters
queryStmt := FormatQueryStr(parmNames, selectModules)
rows, err := db.Query(queryStmt, parms...)
if err != nil {
return nil, fmt.Errorf("QueryModulesByOrgName failed: %v", err)
}
defer rows.Close()
return ReadModulesByRow(rows)
}
// DeleteModule takes three string, orgName, name, version,
// whose combination is key of one Module in DB's Module table.
// If deletion fails, an non-nil error is returned.
// If the number of rows affected by this deletion is not 1, an error is also returned.
func DeleteModule(orgName string, name string, version string) error {
result, err := db.Exec(deleteModule, orgName, name, version)
if err != nil {
return fmt.Errorf("DeleteModule failed: %v", err)
}
num, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("DeleteModule, access rows affected in result failed: %v", err)
}
// delete should only affect one row
if num != 1 {
return fmt.Errorf("DeleteModule: affected row is not one, it affects %d rows", num)
}
return nil
}
// ReadFeatureBundlesByRow scans from queried FeatureBundles from rows one by one, rows are closed inside.
// Return slice of db FeatureBundle struct each field of which corresponds to one column in db.
// Error is returned when scan rows failed.
func ReadFeatureBundlesByRow(rows *sql.Rows) ([]FeatureBundle, error) {
var featureBundles []FeatureBundle
defer rows.Close()
for rows.Next() {
var featureBundle FeatureBundle
if err := rows.Scan(&featureBundle.OrgName, &featureBundle.Name, &featureBundle.Version, &featureBundle.Data); err != nil {
return nil, fmt.Errorf("ReadFeatureBundlesByRow: scan db rows failure, %v", err)
}
featureBundles = append(featureBundles, featureBundle)
}
return featureBundles, nil
}
// QueryFeatureBundlesByOrgName queries feature-bundles of organization with *orgName* from database.
// If orgName is null then directly query all feature-bundles.
// Return slice of db FeatureBundle struct each field of which corresponds to one column in db.
// Error is returned when query or reading data failed.
func QueryFeatureBundlesByOrgName(orgName *string) ([]FeatureBundle, error) {
var parms []interface{} // parms is used to store value of non-nil query parameters
parmNames := []string{} // parmNames is used to store name of non-nil query parameters
if orgName != nil {
parms = append(parms, *orgName)
parmNames = append(parmNames, "orgName")
}
// Format query statement string based on non-nil query parameters
queryStmt := FormatQueryStr(parmNames, selectFeatureBundles)
rows, err := db.Query(queryStmt, parms...)
if err != nil {
return nil, fmt.Errorf("QueryFeatureBundlesByOrgName failed: %v", err)
}
return ReadFeatureBundlesByRow(rows)
}
// InsertFeatureBundle inserts FeatureBundle into database given values of four field of FeatureBundle schema.
// Or if there is existing FeatureBundle with existing key (orgName, name, version), update data field.
// Error is returned when insertion failed.
func InsertFeatureBundle(orgName string, name string, version string, data string) error {
if _, err := db.Exec(insertFeatureBundle, orgName, name, version, data, data); err != nil {
return fmt.Errorf("insert/update FeatureBundle into db failed: %v", err)
}
return nil
}
// DeleteFeatureBundle takes three pointer of string, orgName, name, version,
// whose combination is key of one FeatureBundle in DB's FeatureBundle table.
// If deletion fails, an non-nil error is returned.
// If the number of rows affected by this deletion is not 1, an error is also returned.
func DeleteFeatureBundle(orgName string, name string, version string) error {
result, err := db.Exec(deleteFeatureBundle, orgName, name, version)
if err != nil {
return fmt.Errorf("DeleteFeatureBundle failed: %v", err)
}
num, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("DeleteFeatureBundle, access rows affected in result failed: %v", err)
}
// delete should only affect one row
if num != 1 {
return fmt.Errorf("DeleteFeatureBundle: affected row is not one, it affects %d rows", num)
}
return nil
}
// QueryFeatureBundlesByKey queries feature-bundles by its key (name, version), it is possible that
// If both parameters are null, this equals query for all feature-bundles.
// Return slice of db FeatureBundle struct each field of which corresponds to one column in
// Error is returned when query or reading data failed.
func QueryFeatureBundlesByKey(name *string, version *string) ([]FeatureBundle, error) {
var parms []interface{} // parms is used to store value of non-nil query paramete
parmNames := []string{} // parmNames is used to store name of non-nil query param
if name != nil {
parms = append(parms, *name)
parmNames = append(parmNames, "name")
}
if version != nil {
parms = append(parms, *version)
parmNames = append(parmNames, "version")
}
// Format query statement string based on non-nil query parameters
queryStmt := FormatQueryStr(parmNames, selectFeatureBundles)
rows, err := db.Query(queryStmt, parms...)
if err != nil {
return nil, fmt.Errorf("QueryFeatureBundlesByKey failed: %v", err)
}
return ReadFeatureBundlesByRow(rows)
}
| openconfig/catalog-server | pkg/db/db.go | GO | apache-2.0 | 14,564 |
// LUCENENET specific - excluding this class in favor of DirectoryNotFoundException,
// although that means we need to catch DirectoryNotFoundException everywhere that
// FileNotFoundException is being caught (because it is a superclass) to be sure we have the same behavior.
//using System;
//using System.IO;
//namespace YAF.Lucene.Net.Store
//{
// /*
// * Licensed to the Apache Software Foundation (ASF) under one or more
// * contributor license agreements. See the NOTICE file distributed with
// * this work for additional information regarding copyright ownership.
// * The ASF licenses this file to You under the Apache License, Version 2.0
// * (the "License"); you may not use this file except in compliance with
// * the License. You may obtain a copy of the License at
// *
// * http://www.apache.org/licenses/LICENSE-2.0
// *
// * Unless required by applicable law or agreed to in writing, software
// * distributed under the License is distributed on an "AS IS" BASIS,
// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// * See the License for the specific language governing permissions and
// * limitations under the License.
// */
// /// <summary>
// /// This exception is thrown when you try to list a
// /// non-existent directory.
// /// </summary>
// // LUCENENET: It is no longer good practice to use binary serialization.
// // See: https://github.com/dotnet/corefx/issues/23584#issuecomment-325724568
//#if FEATURE_SERIALIZABLE_EXCEPTIONS
// [Serializable]
//#endif
// public class NoSuchDirectoryException : FileNotFoundException
// {
// public NoSuchDirectoryException(string message)
// : base(message)
// {
// }
// }
//} | YAFNET/YAFNET | yafsrc/Lucene.Net/Lucene.Net/Store/NoSuchDirectoryException.cs | C# | apache-2.0 | 1,813 |
//------------------------------------------------------------------------------
// <自动生成>
// 此代码由工具生成。
//
// 对此文件的更改可能导致不正确的行为,如果
// 重新生成代码,则所做更改将丢失。
// </自动生成>
//------------------------------------------------------------------------------
namespace News {
public partial class NewsEdit {
/// <summary>
/// tbTitle 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.WebControls.TextBox tbTitle;
/// <summary>
/// ddlSort 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.WebControls.DropDownList ddlSort;
/// <summary>
/// CKEditorControl1 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::CKEditor.NET.CKEditorControl CKEditorControl1;
/// <summary>
/// btnSave 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.WebControls.Button btnSave;
/// <summary>
/// btnBack 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.WebControls.Button btnBack;
}
}
| Rangowj/NewsSystem | News/News/NewsEdit.aspx.designer.cs | C# | apache-2.0 | 2,172 |
/*
* Copyright 2010 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.android.apps.iosched.provider;
import com.google.android.apps.iosched.provider.ScheduleContract.Blocks;
import com.google.android.apps.iosched.provider.ScheduleContract.Notes;
import com.google.android.apps.iosched.provider.ScheduleContract.Rooms;
import com.google.android.apps.iosched.provider.ScheduleContract.SearchSuggest;
import com.google.android.apps.iosched.provider.ScheduleContract.Sessions;
import com.google.android.apps.iosched.provider.ScheduleContract.Speakers;
import com.google.android.apps.iosched.provider.ScheduleContract.Tracks;
import com.google.android.apps.iosched.provider.ScheduleContract.Vendors;
import com.google.android.apps.iosched.provider.ScheduleDatabase.SessionsSearchColumns;
import com.google.android.apps.iosched.provider.ScheduleDatabase.SessionsSpeakers;
import com.google.android.apps.iosched.provider.ScheduleDatabase.SessionsTracks;
import com.google.android.apps.iosched.provider.ScheduleDatabase.Tables;
import com.google.android.apps.iosched.provider.ScheduleDatabase.VendorsSearchColumns;
import com.google.android.apps.iosched.util.NotesExporter;
import com.google.android.apps.iosched.util.SelectionBuilder;
import android.app.Activity;
import android.app.SearchManager;
import android.content.ContentProvider;
import android.content.ContentProviderOperation;
import android.content.ContentProviderResult;
import android.content.ContentUris;
import android.content.ContentValues;
import android.content.Context;
import android.content.OperationApplicationException;
import android.content.UriMatcher;
import android.database.Cursor;
import android.database.MatrixCursor;
import android.database.sqlite.SQLiteDatabase;
import android.net.Uri;
import android.os.ParcelFileDescriptor;
import android.provider.BaseColumns;
import android.provider.OpenableColumns;
import android.util.Log;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import net.bespokesystems.android.apps.wicsa2011.de.service.SyncService;
/**
* Provider that stores {@link ScheduleContract} data. Data is usually inserted
* by {@link SyncService}, and queried by various {@link Activity} instances.
*/
public class ScheduleProvider extends ContentProvider {
private static final String TAG = "ScheduleProvider";
private static final boolean LOGV = Log.isLoggable(TAG, Log.VERBOSE);
private ScheduleDatabase mOpenHelper;
private static final UriMatcher sUriMatcher = buildUriMatcher();
private static final int BLOCKS = 100;
private static final int BLOCKS_BETWEEN = 101;
private static final int BLOCKS_ID = 102;
private static final int BLOCKS_ID_SESSIONS = 103;
private static final int TRACKS = 200;
private static final int TRACKS_ID = 201;
private static final int TRACKS_ID_SESSIONS = 202;
private static final int TRACKS_ID_VENDORS = 203;
private static final int ROOMS = 300;
private static final int ROOMS_ID = 301;
private static final int ROOMS_ID_SESSIONS = 302;
private static final int SESSIONS = 400;
private static final int SESSIONS_STARRED = 401;
private static final int SESSIONS_SEARCH = 402;
private static final int SESSIONS_AT = 403;
private static final int SESSIONS_ID = 404;
private static final int SESSIONS_ID_SPEAKERS = 405;
private static final int SESSIONS_ID_TRACKS = 406;
private static final int SESSIONS_ID_NOTES = 407;
private static final int SPEAKERS = 500;
private static final int SPEAKERS_ID = 501;
private static final int SPEAKERS_ID_SESSIONS = 502;
private static final int VENDORS = 600;
private static final int VENDORS_STARRED = 601;
private static final int VENDORS_SEARCH = 603;
private static final int VENDORS_ID = 604;
private static final int NOTES = 700;
private static final int NOTES_EXPORT = 701;
private static final int NOTES_ID = 702;
private static final int SEARCH_SUGGEST = 800;
private static final String MIME_XML = "text/xml";
/**
* Build and return a {@link UriMatcher} that catches all {@link Uri}
* variations supported by this {@link ContentProvider}.
*/
private static UriMatcher buildUriMatcher() {
final UriMatcher matcher = new UriMatcher(UriMatcher.NO_MATCH);
final String authority = ScheduleContract.CONTENT_AUTHORITY;
matcher.addURI(authority, "blocks", BLOCKS);
matcher.addURI(authority, "blocks/between/*/*", BLOCKS_BETWEEN);
matcher.addURI(authority, "blocks/*", BLOCKS_ID);
matcher.addURI(authority, "blocks/*/sessions", BLOCKS_ID_SESSIONS);
matcher.addURI(authority, "tracks", TRACKS);
matcher.addURI(authority, "tracks/*", TRACKS_ID);
matcher.addURI(authority, "tracks/*/sessions", TRACKS_ID_SESSIONS);
matcher.addURI(authority, "tracks/*/vendors", TRACKS_ID_VENDORS);
matcher.addURI(authority, "rooms", ROOMS);
matcher.addURI(authority, "rooms/*", ROOMS_ID);
matcher.addURI(authority, "rooms/*/sessions", ROOMS_ID_SESSIONS);
matcher.addURI(authority, "sessions", SESSIONS);
matcher.addURI(authority, "sessions/starred", SESSIONS_STARRED);
matcher.addURI(authority, "sessions/search/*", SESSIONS_SEARCH);
matcher.addURI(authority, "sessions/at/*", SESSIONS_AT);
matcher.addURI(authority, "sessions/*", SESSIONS_ID);
matcher.addURI(authority, "sessions/*/speakers", SESSIONS_ID_SPEAKERS);
matcher.addURI(authority, "sessions/*/tracks", SESSIONS_ID_TRACKS);
matcher.addURI(authority, "sessions/*/notes", SESSIONS_ID_NOTES);
matcher.addURI(authority, "speakers", SPEAKERS);
matcher.addURI(authority, "speakers/*", SPEAKERS_ID);
matcher.addURI(authority, "speakers/*/sessions", SPEAKERS_ID_SESSIONS);
matcher.addURI(authority, "vendors", VENDORS);
matcher.addURI(authority, "vendors/starred", VENDORS_STARRED);
matcher.addURI(authority, "vendors/search/*", VENDORS_SEARCH);
matcher.addURI(authority, "vendors/*", VENDORS_ID);
matcher.addURI(authority, "notes", NOTES);
matcher.addURI(authority, "notes/export", NOTES_EXPORT);
matcher.addURI(authority, "notes/*", NOTES_ID);
matcher.addURI(authority, "search_suggest_query", SEARCH_SUGGEST);
return matcher;
}
@Override
public boolean onCreate() {
final Context context = getContext();
mOpenHelper = new ScheduleDatabase(context);
return true;
}
/** {@inheritDoc} */
@Override
public String getType(Uri uri) {
final int match = sUriMatcher.match(uri);
switch (match) {
case BLOCKS:
return Blocks.CONTENT_TYPE;
case BLOCKS_BETWEEN:
return Blocks.CONTENT_TYPE;
case BLOCKS_ID:
return Blocks.CONTENT_ITEM_TYPE;
case BLOCKS_ID_SESSIONS:
return Sessions.CONTENT_TYPE;
case TRACKS:
return Tracks.CONTENT_TYPE;
case TRACKS_ID:
return Tracks.CONTENT_ITEM_TYPE;
case TRACKS_ID_SESSIONS:
return Sessions.CONTENT_TYPE;
case TRACKS_ID_VENDORS:
return Vendors.CONTENT_TYPE;
case ROOMS:
return Rooms.CONTENT_TYPE;
case ROOMS_ID:
return Rooms.CONTENT_ITEM_TYPE;
case ROOMS_ID_SESSIONS:
return Sessions.CONTENT_TYPE;
case SESSIONS:
return Sessions.CONTENT_TYPE;
case SESSIONS_STARRED:
return Sessions.CONTENT_TYPE;
case SESSIONS_SEARCH:
return Sessions.CONTENT_TYPE;
case SESSIONS_AT:
return Sessions.CONTENT_TYPE;
case SESSIONS_ID:
return Sessions.CONTENT_ITEM_TYPE;
case SESSIONS_ID_SPEAKERS:
return Speakers.CONTENT_TYPE;
case SESSIONS_ID_TRACKS:
return Tracks.CONTENT_TYPE;
case SESSIONS_ID_NOTES:
return Notes.CONTENT_TYPE;
case SPEAKERS:
return Speakers.CONTENT_TYPE;
case SPEAKERS_ID:
return Speakers.CONTENT_ITEM_TYPE;
case SPEAKERS_ID_SESSIONS:
return Sessions.CONTENT_TYPE;
case VENDORS:
return Vendors.CONTENT_TYPE;
case VENDORS_STARRED:
return Vendors.CONTENT_TYPE;
case VENDORS_SEARCH:
return Vendors.CONTENT_TYPE;
case VENDORS_ID:
return Vendors.CONTENT_ITEM_TYPE;
case NOTES:
return Notes.CONTENT_TYPE;
case NOTES_EXPORT:
return MIME_XML;
case NOTES_ID:
return Notes.CONTENT_ITEM_TYPE;
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
}
/** {@inheritDoc} */
@Override
public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs,
String sortOrder) {
if (LOGV) Log.v(TAG, "query(uri=" + uri + ", proj=" + Arrays.toString(projection) + ")");
final SQLiteDatabase db = mOpenHelper.getReadableDatabase();
final int match = sUriMatcher.match(uri);
switch (match) {
default: {
// Most cases are handled with simple SelectionBuilder
final SelectionBuilder builder = buildExpandedSelection(uri, match);
return builder.where(selection, selectionArgs).query(db, projection, sortOrder);
}
case NOTES_EXPORT: {
// Provide query values for file attachments
final String[] columns = { OpenableColumns.DISPLAY_NAME, OpenableColumns.SIZE };
final MatrixCursor cursor = new MatrixCursor(columns, 1);
cursor.addRow(new String[] { "notes.xml", null });
return cursor;
}
case SEARCH_SUGGEST: {
final SelectionBuilder builder = new SelectionBuilder();
// Adjust incoming query to become SQL text match
selectionArgs[0] = selectionArgs[0] + "%";
builder.table(Tables.SEARCH_SUGGEST);
builder.where(selection, selectionArgs);
builder.map(SearchManager.SUGGEST_COLUMN_QUERY,
SearchManager.SUGGEST_COLUMN_TEXT_1);
projection = new String[] { BaseColumns._ID, SearchManager.SUGGEST_COLUMN_TEXT_1,
SearchManager.SUGGEST_COLUMN_QUERY };
final String limit = uri.getQueryParameter(SearchManager.SUGGEST_PARAMETER_LIMIT);
return builder.query(db, projection, null, null, SearchSuggest.DEFAULT_SORT, limit);
}
}
}
/** {@inheritDoc} */
@Override
public Uri insert(Uri uri, ContentValues values) {
if (LOGV) Log.v(TAG, "insert(uri=" + uri + ", values=" + values.toString() + ")");
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
switch (match) {
case BLOCKS: {
db.insertOrThrow(Tables.BLOCKS, null, values);
return Blocks.buildBlockUri(values.getAsString(Blocks.BLOCK_ID));
}
case TRACKS: {
db.insertOrThrow(Tables.TRACKS, null, values);
return Tracks.buildTrackUri(values.getAsString(Tracks.TRACK_ID));
}
case ROOMS: {
db.insertOrThrow(Tables.ROOMS, null, values);
return Rooms.buildRoomUri(values.getAsString(Rooms.ROOM_ID));
}
case SESSIONS: {
db.insertOrThrow(Tables.SESSIONS, null, values);
return Sessions.buildSessionUri(values.getAsString(Sessions.SESSION_ID));
}
case SESSIONS_ID_SPEAKERS: {
db.insertOrThrow(Tables.SESSIONS_SPEAKERS, null, values);
return Speakers.buildSpeakerUri(values.getAsString(SessionsSpeakers.SPEAKER_ID));
}
case SESSIONS_ID_TRACKS: {
db.insertOrThrow(Tables.SESSIONS_TRACKS, null, values);
return Tracks.buildTrackUri(values.getAsString(SessionsTracks.TRACK_ID));
}
case SESSIONS_ID_NOTES: {
final String sessionId = Sessions.getSessionId(uri);
values.put(Notes.SESSION_ID, sessionId);
final long noteId = db.insertOrThrow(Tables.NOTES, null, values);
return ContentUris.withAppendedId(Notes.CONTENT_URI, noteId);
}
case SPEAKERS: {
db.insertOrThrow(Tables.SPEAKERS, null, values);
return Speakers.buildSpeakerUri(values.getAsString(Speakers.SPEAKER_ID));
}
case VENDORS: {
db.insertOrThrow(Tables.VENDORS, null, values);
return Vendors.buildVendorUri(values.getAsString(Vendors.VENDOR_ID));
}
case NOTES: {
final long noteId = db.insertOrThrow(Tables.NOTES, null, values);
return ContentUris.withAppendedId(Notes.CONTENT_URI, noteId);
}
case SEARCH_SUGGEST: {
db.insertOrThrow(Tables.SEARCH_SUGGEST, null, values);
return SearchSuggest.CONTENT_URI;
}
default: {
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
}
}
/** {@inheritDoc} */
@Override
public int update(Uri uri, ContentValues values, String selection, String[] selectionArgs) {
if (LOGV) Log.v(TAG, "update(uri=" + uri + ", values=" + values.toString() + ")");
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final SelectionBuilder builder = buildSimpleSelection(uri);
return builder.where(selection, selectionArgs).update(db, values);
}
/** {@inheritDoc} */
@Override
public int delete(Uri uri, String selection, String[] selectionArgs) {
if (LOGV) Log.v(TAG, "delete(uri=" + uri + ")");
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final SelectionBuilder builder = buildSimpleSelection(uri);
return builder.where(selection, selectionArgs).delete(db);
}
/**
* Apply the given set of {@link ContentProviderOperation}, executing inside
* a {@link SQLiteDatabase} transaction. All changes will be rolled back if
* any single one fails.
*/
@Override
public ContentProviderResult[] applyBatch(ArrayList<ContentProviderOperation> operations)
throws OperationApplicationException {
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
db.beginTransaction();
try {
final int numOperations = operations.size();
final ContentProviderResult[] results = new ContentProviderResult[numOperations];
for (int i = 0; i < numOperations; i++) {
results[i] = operations.get(i).apply(this, results, i);
}
db.setTransactionSuccessful();
return results;
} finally {
db.endTransaction();
}
}
/**
* Build a simple {@link SelectionBuilder} to match the requested
* {@link Uri}. This is usually enough to support {@link #insert},
* {@link #update}, and {@link #delete} operations.
*/
private SelectionBuilder buildSimpleSelection(Uri uri) {
final SelectionBuilder builder = new SelectionBuilder();
final int match = sUriMatcher.match(uri);
switch (match) {
case BLOCKS: {
return builder.table(Tables.BLOCKS);
}
case BLOCKS_ID: {
final String blockId = Blocks.getBlockId(uri);
return builder.table(Tables.BLOCKS)
.where(Blocks.BLOCK_ID + "=?", blockId);
}
case TRACKS: {
return builder.table(Tables.TRACKS);
}
case TRACKS_ID: {
final String trackId = Tracks.getTrackId(uri);
return builder.table(Tables.TRACKS)
.where(Tracks.TRACK_ID + "=?", trackId);
}
case ROOMS: {
return builder.table(Tables.ROOMS);
}
case ROOMS_ID: {
final String roomId = Rooms.getRoomId(uri);
return builder.table(Tables.ROOMS)
.where(Rooms.ROOM_ID + "=?", roomId);
}
case SESSIONS: {
return builder.table(Tables.SESSIONS);
}
case SESSIONS_ID: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS)
.where(Sessions.SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_SPEAKERS: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_SPEAKERS)
.where(Sessions.SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_TRACKS: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_TRACKS)
.where(Sessions.SESSION_ID + "=?", sessionId);
}
case SPEAKERS: {
return builder.table(Tables.SPEAKERS);
}
case SPEAKERS_ID: {
final String speakerId = Speakers.getSpeakerId(uri);
return builder.table(Tables.SPEAKERS)
.where(Speakers.SPEAKER_ID + "=?", speakerId);
}
case VENDORS: {
return builder.table(Tables.VENDORS);
}
case VENDORS_ID: {
final String vendorId = Vendors.getVendorId(uri);
return builder.table(Tables.VENDORS)
.where(Vendors.VENDOR_ID + "=?", vendorId);
}
case NOTES: {
return builder.table(Tables.NOTES);
}
case NOTES_ID: {
final String noteId = uri.getPathSegments().get(1);
return builder.table(Tables.NOTES)
.where(Notes._ID + "=?", noteId);
}
case SEARCH_SUGGEST: {
return builder.table(Tables.SEARCH_SUGGEST);
}
default: {
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
}
}
/**
* Build an advanced {@link SelectionBuilder} to match the requested
* {@link Uri}. This is usually only used by {@link #query}, since it
* performs table joins useful for {@link Cursor} data.
*/
private SelectionBuilder buildExpandedSelection(Uri uri, int match) {
final SelectionBuilder builder = new SelectionBuilder();
switch (match) {
case BLOCKS: {
return builder.table(Tables.BLOCKS);
}
case BLOCKS_BETWEEN: {
final List<String> segments = uri.getPathSegments();
final String startTime = segments.get(2);
final String endTime = segments.get(3);
return builder.table(Tables.BLOCKS)
.map(Blocks.SESSIONS_COUNT, Subquery.BLOCK_SESSIONS_COUNT)
.map(Blocks.CONTAINS_STARRED, Subquery.BLOCK_CONTAINS_STARRED)
.where(Blocks.BLOCK_START + ">=?", startTime)
.where(Blocks.BLOCK_START + "<=?", endTime);
}
case BLOCKS_ID: {
final String blockId = Blocks.getBlockId(uri);
return builder.table(Tables.BLOCKS)
.map(Blocks.SESSIONS_COUNT, Subquery.BLOCK_SESSIONS_COUNT)
.map(Blocks.CONTAINS_STARRED, Subquery.BLOCK_CONTAINS_STARRED)
.where(Blocks.BLOCK_ID + "=?", blockId);
}
case BLOCKS_ID_SESSIONS: {
final String blockId = Blocks.getBlockId(uri);
return builder.table(Tables.SESSIONS_JOIN_BLOCKS_ROOMS)
.map(Blocks.SESSIONS_COUNT, Subquery.BLOCK_SESSIONS_COUNT)
.map(Blocks.CONTAINS_STARRED, Subquery.BLOCK_CONTAINS_STARRED)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Qualified.SESSIONS_BLOCK_ID + "=?", blockId);
}
case TRACKS: {
return builder.table(Tables.TRACKS)
.map(Tracks.SESSIONS_COUNT, Subquery.TRACK_SESSIONS_COUNT)
.map(Tracks.VENDORS_COUNT, Subquery.TRACK_VENDORS_COUNT);
}
case TRACKS_ID: {
final String trackId = Tracks.getTrackId(uri);
return builder.table(Tables.TRACKS)
.where(Tracks.TRACK_ID + "=?", trackId);
}
case TRACKS_ID_SESSIONS: {
final String trackId = Tracks.getTrackId(uri);
return builder.table(Tables.SESSIONS_TRACKS_JOIN_SESSIONS_BLOCKS_ROOMS)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Qualified.SESSIONS_TRACKS_TRACK_ID + "=?", trackId);
}
case TRACKS_ID_VENDORS: {
final String trackId = Tracks.getTrackId(uri);
return builder.table(Tables.VENDORS_JOIN_TRACKS)
.mapToTable(Vendors._ID, Tables.VENDORS)
.mapToTable(Vendors.TRACK_ID, Tables.VENDORS)
.where(Qualified.VENDORS_TRACK_ID + "=?", trackId);
}
case ROOMS: {
return builder.table(Tables.ROOMS);
}
case ROOMS_ID: {
final String roomId = Rooms.getRoomId(uri);
return builder.table(Tables.ROOMS)
.where(Rooms.ROOM_ID + "=?", roomId);
}
case ROOMS_ID_SESSIONS: {
final String roomId = Rooms.getRoomId(uri);
return builder.table(Tables.SESSIONS_JOIN_BLOCKS_ROOMS)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Qualified.SESSIONS_ROOM_ID + "=?", roomId);
}
case SESSIONS: {
return builder.table(Tables.SESSIONS_JOIN_BLOCKS_ROOMS)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS);
}
case SESSIONS_STARRED: {
return builder.table(Tables.SESSIONS_JOIN_BLOCKS_ROOMS)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Sessions.STARRED + "=1");
}
case SESSIONS_SEARCH: {
final String query = Sessions.getSearchQuery(uri);
return builder.table(Tables.SESSIONS_SEARCH_JOIN_SESSIONS_BLOCKS_ROOMS)
.map(Sessions.SEARCH_SNIPPET, Subquery.SESSIONS_SNIPPET)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(SessionsSearchColumns.BODY + " MATCH ?", query);
}
case SESSIONS_AT: {
final List<String> segments = uri.getPathSegments();
final String time = segments.get(2);
return builder.table(Tables.SESSIONS_JOIN_BLOCKS_ROOMS)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Sessions.BLOCK_START + "<=?", time)
.where(Sessions.BLOCK_END + ">=?", time);
}
case SESSIONS_ID: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_JOIN_BLOCKS_ROOMS)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Qualified.SESSIONS_SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_SPEAKERS: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_SPEAKERS_JOIN_SPEAKERS)
.mapToTable(Speakers._ID, Tables.SPEAKERS)
.mapToTable(Speakers.SPEAKER_ID, Tables.SPEAKERS)
.where(Qualified.SESSIONS_SPEAKERS_SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_TRACKS: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.SESSIONS_TRACKS_JOIN_TRACKS)
.mapToTable(Tracks._ID, Tables.TRACKS)
.mapToTable(Tracks.TRACK_ID, Tables.TRACKS)
.where(Qualified.SESSIONS_TRACKS_SESSION_ID + "=?", sessionId);
}
case SESSIONS_ID_NOTES: {
final String sessionId = Sessions.getSessionId(uri);
return builder.table(Tables.NOTES)
.where(Notes.SESSION_ID + "=?", sessionId);
}
case SPEAKERS: {
return builder.table(Tables.SPEAKERS);
}
case SPEAKERS_ID: {
final String speakerId = Speakers.getSpeakerId(uri);
return builder.table(Tables.SPEAKERS)
.where(Speakers.SPEAKER_ID + "=?", speakerId);
}
case SPEAKERS_ID_SESSIONS: {
final String speakerId = Speakers.getSpeakerId(uri);
return builder.table(Tables.SESSIONS_SPEAKERS_JOIN_SESSIONS_BLOCKS_ROOMS)
.mapToTable(Sessions._ID, Tables.SESSIONS)
.mapToTable(Sessions.SESSION_ID, Tables.SESSIONS)
.mapToTable(Sessions.BLOCK_ID, Tables.SESSIONS)
.mapToTable(Sessions.ROOM_ID, Tables.SESSIONS)
.where(Qualified.SESSIONS_SPEAKERS_SPEAKER_ID + "=?", speakerId);
}
case VENDORS: {
return builder.table(Tables.VENDORS_JOIN_TRACKS)
.mapToTable(Vendors._ID, Tables.VENDORS)
.mapToTable(Vendors.TRACK_ID, Tables.VENDORS);
}
case VENDORS_STARRED: {
return builder.table(Tables.VENDORS_JOIN_TRACKS)
.mapToTable(Vendors._ID, Tables.VENDORS)
.mapToTable(Vendors.TRACK_ID, Tables.VENDORS)
.where(Vendors.STARRED + "=1");
}
case VENDORS_SEARCH: {
final String query = Vendors.getSearchQuery(uri);
return builder.table(Tables.VENDORS_SEARCH_JOIN_VENDORS_TRACKS)
.map(Vendors.SEARCH_SNIPPET, Subquery.VENDORS_SNIPPET)
.mapToTable(Vendors._ID, Tables.VENDORS)
.mapToTable(Vendors.VENDOR_ID, Tables.VENDORS)
.mapToTable(Vendors.TRACK_ID, Tables.VENDORS)
.where(VendorsSearchColumns.BODY + " MATCH ?", query);
}
case VENDORS_ID: {
final String vendorId = Vendors.getVendorId(uri);
return builder.table(Tables.VENDORS_JOIN_TRACKS)
.mapToTable(Vendors._ID, Tables.VENDORS)
.mapToTable(Vendors.TRACK_ID, Tables.VENDORS)
.where(Vendors.VENDOR_ID + "=?", vendorId);
}
case NOTES: {
return builder.table(Tables.NOTES);
}
case NOTES_ID: {
final long noteId = Notes.getNoteId(uri);
return builder.table(Tables.NOTES)
.where(Notes._ID + "=?", Long.toString(noteId));
}
default: {
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
}
}
@Override
public ParcelFileDescriptor openFile(Uri uri, String mode) throws FileNotFoundException {
final int match = sUriMatcher.match(uri);
switch (match) {
case NOTES_EXPORT: {
try {
final File notesFile = NotesExporter.writeExportedNotes(getContext());
return ParcelFileDescriptor
.open(notesFile, ParcelFileDescriptor.MODE_READ_ONLY);
} catch (IOException e) {
throw new FileNotFoundException("Unable to export notes: " + e.toString());
}
}
default: {
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
}
}
private interface Subquery {
String BLOCK_SESSIONS_COUNT = "(SELECT COUNT(" + Qualified.SESSIONS_SESSION_ID + ") FROM "
+ Tables.SESSIONS + " WHERE " + Qualified.SESSIONS_BLOCK_ID + "="
+ Qualified.BLOCKS_BLOCK_ID + ")";
String BLOCK_CONTAINS_STARRED = "(SELECT MAX(" + Qualified.SESSIONS_STARRED + ") FROM "
+ Tables.SESSIONS + " WHERE " + Qualified.SESSIONS_BLOCK_ID + "="
+ Qualified.BLOCKS_BLOCK_ID + ")";
String TRACK_SESSIONS_COUNT = "(SELECT COUNT(" + Qualified.SESSIONS_TRACKS_SESSION_ID
+ ") FROM " + Tables.SESSIONS_TRACKS + " WHERE "
+ Qualified.SESSIONS_TRACKS_TRACK_ID + "=" + Qualified.TRACKS_TRACK_ID + ")";
String TRACK_VENDORS_COUNT = "(SELECT COUNT(" + Qualified.VENDORS_VENDOR_ID + ") FROM "
+ Tables.VENDORS + " WHERE " + Qualified.VENDORS_TRACK_ID + "="
+ Qualified.TRACKS_TRACK_ID + ")";
String SESSIONS_SNIPPET = "snippet(" + Tables.SESSIONS_SEARCH + ",'{','}','\u2026')";
String VENDORS_SNIPPET = "snippet(" + Tables.VENDORS_SEARCH + ",'{','}','\u2026')";
}
/**
* {@link ScheduleContract} fields that are fully qualified with a specific
* parent {@link Tables}. Used when needed to work around SQL ambiguity.
*/
private interface Qualified {
String SESSIONS_SESSION_ID = Tables.SESSIONS + "." + Sessions.SESSION_ID;
String SESSIONS_BLOCK_ID = Tables.SESSIONS + "." + Sessions.BLOCK_ID;
String SESSIONS_ROOM_ID = Tables.SESSIONS + "." + Sessions.ROOM_ID;
String SESSIONS_TRACKS_SESSION_ID = Tables.SESSIONS_TRACKS + "."
+ SessionsTracks.SESSION_ID;
String SESSIONS_TRACKS_TRACK_ID = Tables.SESSIONS_TRACKS + "."
+ SessionsTracks.TRACK_ID;
String SESSIONS_SPEAKERS_SESSION_ID = Tables.SESSIONS_SPEAKERS + "."
+ SessionsSpeakers.SESSION_ID;
String SESSIONS_SPEAKERS_SPEAKER_ID = Tables.SESSIONS_SPEAKERS + "."
+ SessionsSpeakers.SPEAKER_ID;
String VENDORS_VENDOR_ID = Tables.VENDORS + "." + Vendors.VENDOR_ID;
String VENDORS_TRACK_ID = Tables.VENDORS + "." + Vendors.TRACK_ID;
@SuppressWarnings("hiding")
String SESSIONS_STARRED = Tables.SESSIONS + "." + Sessions.STARRED;
String TRACKS_TRACK_ID = Tables.TRACKS + "." + Tracks.TRACK_ID;
String BLOCKS_BLOCK_ID = Tables.BLOCKS + "." + Blocks.BLOCK_ID;
}
}
| ghchinoy/wicsa2011 | src/com/google/android/apps/iosched/provider/ScheduleProvider.java | Java | apache-2.0 | 33,646 |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.inspector.model;
import com.amazonaws.AmazonServiceException;
/**
*
*/
public class NoSuchEntityException extends AmazonServiceException {
private static final long serialVersionUID = 1L;
/**
* Constructs a new NoSuchEntityException with the specified error message.
*
* @param message
* Describes the error encountered.
*/
public NoSuchEntityException(String message) {
super(message);
}
} | trasa/aws-sdk-java | aws-java-sdk-inspector/src/main/java/com/amazonaws/services/inspector/model/NoSuchEntityException.java | Java | apache-2.0 | 1,070 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.dataexchange.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/DeleteEventAction" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DeleteEventActionRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* <p>
* The unique identifier for the event action.
* </p>
*/
private String eventActionId;
/**
* <p>
* The unique identifier for the event action.
* </p>
*
* @param eventActionId
* The unique identifier for the event action.
*/
public void setEventActionId(String eventActionId) {
this.eventActionId = eventActionId;
}
/**
* <p>
* The unique identifier for the event action.
* </p>
*
* @return The unique identifier for the event action.
*/
public String getEventActionId() {
return this.eventActionId;
}
/**
* <p>
* The unique identifier for the event action.
* </p>
*
* @param eventActionId
* The unique identifier for the event action.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DeleteEventActionRequest withEventActionId(String eventActionId) {
setEventActionId(eventActionId);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getEventActionId() != null)
sb.append("EventActionId: ").append(getEventActionId());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DeleteEventActionRequest == false)
return false;
DeleteEventActionRequest other = (DeleteEventActionRequest) obj;
if (other.getEventActionId() == null ^ this.getEventActionId() == null)
return false;
if (other.getEventActionId() != null && other.getEventActionId().equals(this.getEventActionId()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getEventActionId() == null) ? 0 : getEventActionId().hashCode());
return hashCode;
}
@Override
public DeleteEventActionRequest clone() {
return (DeleteEventActionRequest) super.clone();
}
}
| aws/aws-sdk-java | aws-java-sdk-dataexchange/src/main/java/com/amazonaws/services/dataexchange/model/DeleteEventActionRequest.java | Java | apache-2.0 | 3,714 |
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Linq;
using System.Text;
using System.Windows.Forms;
namespace com.huawei.ebg.esdk.ucsdk.client.control
{
public partial class DialCall : Button
{
public DialCall()
{
InitializeComponent();
}
# region 自定义字段
private string _ucAccount;
[Description("被叫UC帐号")]
public string UCAccount
{
get { return _ucAccount; }
set { _ucAccount = value; }
}
private string _calledNumber;
[Description("被叫号码")]
public string CalledNumber
{
get { return _calledNumber; }
set { _calledNumber = value; }
}
# endregion
protected override void OnClick(EventArgs e)
{
base.OnClick(e);
try
{
CtrlBusiness.DirectCall(_ucAccount, _calledNumber);
}
catch (Exception ex)
{
//throw ex;
}
}
}
}
| eSDK/esdk_uc_control_net | com.huawei.ebg.esdk.ucsdk.client.control/Client Control/DialCall.cs | C# | apache-2.0 | 1,163 |
/*
* =============================================================================
*
* Copyright (c) 2011-2016, The THYMELEAF team (http://www.thymeleaf.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* =============================================================================
*/
package org.thymeleaf.dialect.dialectwrapping;
import org.thymeleaf.context.ITemplateContext;
import org.thymeleaf.model.IModel;
import org.thymeleaf.processor.element.AbstractElementModelProcessor;
import org.thymeleaf.processor.element.IElementModelStructureHandler;
import org.thymeleaf.templatemode.TemplateMode;
public class ElementModelProcessor extends AbstractElementModelProcessor {
public ElementModelProcessor(final String dialectPrefix) {
super(TemplateMode.HTML, dialectPrefix, "div", true, null, false, 100);
}
@Override
protected void doProcess(final ITemplateContext context, final IModel model, final IElementModelStructureHandler structureHandler) {
}
}
| thymeleaf/thymeleaf-tests | src/test/java/org/thymeleaf/dialect/dialectwrapping/ElementModelProcessor.java | Java | apache-2.0 | 1,538 |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: AssemblyTitle("Accela.WindowsStoreSDK")]
[assembly: AssemblyDescription("Accela SDK for Windows & Windows Phone")]
[assembly: AssemblyCompany("Accela Inc.")]
[assembly: AssemblyProduct("AccelaSDK")]
[assembly: AssemblyCopyright("Copyright © 2013, Accela Inc.")]
[assembly: AssemblyVersion("3.0.1.3")]
[assembly: AssemblyFileVersion("3.0.1.3")]
[assembly: ComVisible(false)] | Accela-Inc/Windows-SDK | src/AccelaSDK/Properties/AssemblyInfo.cs | C# | apache-2.0 | 492 |
/**
* @license Apache-2.0
*
* Copyright (c) 2020 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var Float32Array = require( '@stdlib/array/float32' );
var addon = require( './smin.native.js' );
// MAIN //
/**
* Computes the minimum value of a single-precision floating-point strided array.
*
* @param {PositiveInteger} N - number of indexed elements
* @param {Float32Array} x - input array
* @param {integer} stride - stride length
* @param {NonNegativeInteger} offset - starting index
* @returns {number} minimum value
*
* @example
* var Float32Array = require( '@stdlib/array/float32' );
* var floor = require( '@stdlib/math/base/special/floor' );
*
* var x = new Float32Array( [ 2.0, 1.0, 2.0, -2.0, -2.0, 2.0, 3.0, 4.0 ] );
* var N = floor( x.length / 2 );
*
* var v = smin( N, x, 2, 1 );
* // returns -2.0
*/
function smin( N, x, stride, offset ) {
var view;
if ( stride < 0 ) {
offset += (N-1) * stride;
}
view = new Float32Array( x.buffer, x.byteOffset+(x.BYTES_PER_ELEMENT*offset), x.length-offset ); // eslint-disable-line max-len
return addon( N, view, stride );
}
// EXPORTS //
module.exports = smin;
| stdlib-js/stdlib | lib/node_modules/@stdlib/stats/base/smin/lib/ndarray.native.js | JavaScript | apache-2.0 | 1,677 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.lightsail.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.lightsail.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* DetachStaticIpResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DetachStaticIpResultJsonUnmarshaller implements Unmarshaller<DetachStaticIpResult, JsonUnmarshallerContext> {
public DetachStaticIpResult unmarshall(JsonUnmarshallerContext context) throws Exception {
DetachStaticIpResult detachStaticIpResult = new DetachStaticIpResult();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return detachStaticIpResult;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("operations", targetDepth)) {
context.nextToken();
detachStaticIpResult.setOperations(new ListUnmarshaller<Operation>(OperationJsonUnmarshaller.getInstance()).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return detachStaticIpResult;
}
private static DetachStaticIpResultJsonUnmarshaller instance;
public static DetachStaticIpResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new DetachStaticIpResultJsonUnmarshaller();
return instance;
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-lightsail/src/main/java/com/amazonaws/services/lightsail/model/transform/DetachStaticIpResultJsonUnmarshaller.java | Java | apache-2.0 | 2,855 |
package io.teknek.nibiru.transport.rpc;
import io.teknek.nibiru.transport.BaseResponse;
public class BlockingRpcResponse<T> implements BaseResponse {
private String exception;
private T rpcResult;
public BlockingRpcResponse(){
}
public String getException() {
return exception;
}
public void setException(String exception) {
this.exception = exception;
}
public T getRpcResult() {
return rpcResult;
}
public void setRpcResult(T rpcResult) {
this.rpcResult = rpcResult;
}
}
| edwardcapriolo/nibiru | src/main/java/io/teknek/nibiru/transport/rpc/BlockingRpcResponse.java | Java | apache-2.0 | 530 |
/*
Copyright 2021 The Kubeflow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/go-openapi/spec"
"github.com/kubeflow/katib/pkg/apis/v1beta1"
"k8s.io/klog"
"k8s.io/kube-openapi/pkg/common"
)
// Generate OpenAPI spec definitions for Katib Resource
func main() {
if len(os.Args) <= 2 {
klog.Fatal("Supply Swagger version and Katib Version")
}
version := os.Args[1]
if !strings.HasPrefix(version, "v") {
version = "v" + version
}
refCallback := func(name string) spec.Ref {
return spec.MustCreateRef("#/definitions/" + common.EscapeJsonPointer(swaggify(name)))
}
katibVersion := os.Args[2]
oAPIDefs := make(map[string]common.OpenAPIDefinition)
if katibVersion == "v1beta1" {
oAPIDefs = v1beta1.GetOpenAPIDefinitions(refCallback)
} else {
klog.Fatalf("Katib version %v is not supported", katibVersion)
}
defs := spec.Definitions{}
for defName, val := range oAPIDefs {
defs[swaggify(defName)] = val.Schema
}
swagger := spec.Swagger{
SwaggerProps: spec.SwaggerProps{
Swagger: "2.0",
Definitions: defs,
Paths: &spec.Paths{Paths: map[string]spec.PathItem{}},
Info: &spec.Info{
InfoProps: spec.InfoProps{
Title: "Katib",
Description: "Swagger description for Katib",
Version: version,
},
},
},
}
jsonBytes, err := json.MarshalIndent(swagger, "", " ")
if err != nil {
klog.Fatal(err.Error())
}
fmt.Println(string(jsonBytes))
}
func swaggify(name string) string {
name = strings.Replace(name, "github.com/kubeflow/katib/pkg/apis/controller/common/", "", -1)
name = strings.Replace(name, "github.com/kubeflow/katib/pkg/apis/controller/experiments/", "", -1)
name = strings.Replace(name, "github.com/kubeflow/katib/pkg/apis/controller/suggestions", "", -1)
name = strings.Replace(name, "github.com/kubeflow/katib/pkg/apis/controller/trials", "", -1)
name = strings.Replace(name, "k8s.io/api/core/", "", -1)
name = strings.Replace(name, "k8s.io/apimachinery/pkg/apis/meta/", "", -1)
name = strings.Replace(name, "/", ".", -1)
return name
}
| kubeflow/katib | hack/swagger/main.go | GO | apache-2.0 | 2,606 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.config.model.transform;
import java.util.List;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.config.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* DescribeConfigRulesRequestMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class DescribeConfigRulesRequestMarshaller {
private static final MarshallingInfo<List> CONFIGRULENAMES_BINDING = MarshallingInfo.builder(MarshallingType.LIST)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ConfigRuleNames").build();
private static final MarshallingInfo<String> NEXTTOKEN_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD)
.marshallLocationName("NextToken").build();
private static final DescribeConfigRulesRequestMarshaller instance = new DescribeConfigRulesRequestMarshaller();
public static DescribeConfigRulesRequestMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(DescribeConfigRulesRequest describeConfigRulesRequest, ProtocolMarshaller protocolMarshaller) {
if (describeConfigRulesRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(describeConfigRulesRequest.getConfigRuleNames(), CONFIGRULENAMES_BINDING);
protocolMarshaller.marshall(describeConfigRulesRequest.getNextToken(), NEXTTOKEN_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| aws/aws-sdk-java | aws-java-sdk-config/src/main/java/com/amazonaws/services/config/model/transform/DescribeConfigRulesRequestMarshaller.java | Java | apache-2.0 | 2,415 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.iotevents.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27/GetDetectorModelAnalysisResults"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class GetDetectorModelAnalysisResultsRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* <p>
* The ID of the analysis result that you want to retrieve.
* </p>
*/
private String analysisId;
/**
* <p>
* The token that you can use to return the next set of results.
* </p>
*/
private String nextToken;
/**
* <p>
* The maximum number of results to be returned per request.
* </p>
*/
private Integer maxResults;
/**
* <p>
* The ID of the analysis result that you want to retrieve.
* </p>
*
* @param analysisId
* The ID of the analysis result that you want to retrieve.
*/
public void setAnalysisId(String analysisId) {
this.analysisId = analysisId;
}
/**
* <p>
* The ID of the analysis result that you want to retrieve.
* </p>
*
* @return The ID of the analysis result that you want to retrieve.
*/
public String getAnalysisId() {
return this.analysisId;
}
/**
* <p>
* The ID of the analysis result that you want to retrieve.
* </p>
*
* @param analysisId
* The ID of the analysis result that you want to retrieve.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetDetectorModelAnalysisResultsRequest withAnalysisId(String analysisId) {
setAnalysisId(analysisId);
return this;
}
/**
* <p>
* The token that you can use to return the next set of results.
* </p>
*
* @param nextToken
* The token that you can use to return the next set of results.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* The token that you can use to return the next set of results.
* </p>
*
* @return The token that you can use to return the next set of results.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* The token that you can use to return the next set of results.
* </p>
*
* @param nextToken
* The token that you can use to return the next set of results.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetDetectorModelAnalysisResultsRequest withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* <p>
* The maximum number of results to be returned per request.
* </p>
*
* @param maxResults
* The maximum number of results to be returned per request.
*/
public void setMaxResults(Integer maxResults) {
this.maxResults = maxResults;
}
/**
* <p>
* The maximum number of results to be returned per request.
* </p>
*
* @return The maximum number of results to be returned per request.
*/
public Integer getMaxResults() {
return this.maxResults;
}
/**
* <p>
* The maximum number of results to be returned per request.
* </p>
*
* @param maxResults
* The maximum number of results to be returned per request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public GetDetectorModelAnalysisResultsRequest withMaxResults(Integer maxResults) {
setMaxResults(maxResults);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getAnalysisId() != null)
sb.append("AnalysisId: ").append(getAnalysisId()).append(",");
if (getNextToken() != null)
sb.append("NextToken: ").append(getNextToken()).append(",");
if (getMaxResults() != null)
sb.append("MaxResults: ").append(getMaxResults());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof GetDetectorModelAnalysisResultsRequest == false)
return false;
GetDetectorModelAnalysisResultsRequest other = (GetDetectorModelAnalysisResultsRequest) obj;
if (other.getAnalysisId() == null ^ this.getAnalysisId() == null)
return false;
if (other.getAnalysisId() != null && other.getAnalysisId().equals(this.getAnalysisId()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false)
return false;
if (other.getMaxResults() == null ^ this.getMaxResults() == null)
return false;
if (other.getMaxResults() != null && other.getMaxResults().equals(this.getMaxResults()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getAnalysisId() == null) ? 0 : getAnalysisId().hashCode());
hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode());
hashCode = prime * hashCode + ((getMaxResults() == null) ? 0 : getMaxResults().hashCode());
return hashCode;
}
@Override
public GetDetectorModelAnalysisResultsRequest clone() {
return (GetDetectorModelAnalysisResultsRequest) super.clone();
}
}
| aws/aws-sdk-java | aws-java-sdk-iotevents/src/main/java/com/amazonaws/services/iotevents/model/GetDetectorModelAnalysisResultsRequest.java | Java | apache-2.0 | 7,053 |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
"fmt"
"io"
jsonpatch "github.com/evanphx/json-patch"
"github.com/golang/glog"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/genericclioptions/printers"
"k8s.io/cli-runtime/pkg/genericclioptions/resource"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/scheme"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
)
// AnnotateOptions have the data required to perform the annotate operation
type AnnotateOptions struct {
PrintFlags *genericclioptions.PrintFlags
PrintObj printers.ResourcePrinterFunc
// Filename options
resource.FilenameOptions
RecordFlags *genericclioptions.RecordFlags
// Common user flags
overwrite bool
local bool
dryrun bool
all bool
resourceVersion string
selector string
fieldSelector string
outputFormat string
// results of arg parsing
resources []string
newAnnotations map[string]string
removeAnnotations []string
Recorder genericclioptions.Recorder
namespace string
enforceNamespace bool
builder *resource.Builder
unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error)
includeUninitialized bool
genericclioptions.IOStreams
}
var (
annotateLong = templates.LongDesc(`
Update the annotations on one or more resources
All Kubernetes objects support the ability to store additional data with the object as
annotations. Annotations are key/value pairs that can be larger than labels and include
arbitrary string values such as structured JSON. Tools and system extensions may use
annotations to store their own data.
Attempting to set an annotation that already exists will fail unless --overwrite is set.
If --resource-version is specified and does not match the current resource version on
the server the command will fail.`)
annotateExample = templates.Examples(i18n.T(`
# Update pod 'foo' with the annotation 'description' and the value 'my frontend'.
# If the same annotation is set multiple times, only the last value will be applied
kubectl annotate pods foo description='my frontend'
# Update a pod identified by type and name in "pod.json"
kubectl annotate -f pod.json description='my frontend'
# Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value.
kubectl annotate --overwrite pods foo description='my frontend running nginx'
# Update all pods in the namespace
kubectl annotate pods --all description='my frontend running nginx'
# Update pod 'foo' only if the resource is unchanged from version 1.
kubectl annotate pods foo description='my frontend running nginx' --resource-version=1
# Update pod 'foo' by removing an annotation named 'description' if it exists.
# Does not require the --overwrite flag.
kubectl annotate pods foo description-`))
)
func NewAnnotateOptions(ioStreams genericclioptions.IOStreams) *AnnotateOptions {
return &AnnotateOptions{
PrintFlags: genericclioptions.NewPrintFlags("annotated").WithTypeSetter(scheme.Scheme),
RecordFlags: genericclioptions.NewRecordFlags(),
Recorder: genericclioptions.NoopRecorder{},
IOStreams: ioStreams,
}
}
func NewCmdAnnotate(parent string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command {
o := NewAnnotateOptions(ioStreams)
cmd := &cobra.Command{
Use: "annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]",
DisableFlagsInUseLine: true,
Short: i18n.T("Update the annotations on a resource"),
Long: annotateLong + "\n\n" + cmdutil.SuggestApiResources(parent),
Example: annotateExample,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(f, cmd, args))
cmdutil.CheckErr(o.Validate())
cmdutil.CheckErr(o.RunAnnotate())
},
}
// bind flag structs
o.RecordFlags.AddFlags(cmd)
o.PrintFlags.AddFlags(cmd)
cmdutil.AddIncludeUninitializedFlag(cmd)
cmd.Flags().BoolVar(&o.overwrite, "overwrite", o.overwrite, "If true, allow annotations to be overwritten, otherwise reject annotation updates that overwrite existing annotations.")
cmd.Flags().BoolVar(&o.local, "local", o.local, "If true, annotation will NOT contact api-server but run locally.")
cmd.Flags().StringVarP(&o.selector, "selector", "l", o.selector, "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2).")
cmd.Flags().StringVar(&o.fieldSelector, "field-selector", o.fieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
cmd.Flags().BoolVar(&o.all, "all", o.all, "Select all resources, including uninitialized ones, in the namespace of the specified resource types.")
cmd.Flags().StringVar(&o.resourceVersion, "resource-version", o.resourceVersion, i18n.T("If non-empty, the annotation update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource."))
usage := "identifying the resource to update the annotation"
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
cmdutil.AddDryRunFlag(cmd)
return cmd
}
// Complete adapts from the command line args and factory to the data required.
func (o *AnnotateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
var err error
o.RecordFlags.Complete(cmd)
o.Recorder, err = o.RecordFlags.ToRecorder()
if err != nil {
return err
}
o.outputFormat = cmdutil.GetFlagString(cmd, "output")
o.dryrun = cmdutil.GetDryRunFlag(cmd)
if o.dryrun {
o.PrintFlags.Complete("%s (dry run)")
}
printer, err := o.PrintFlags.ToPrinter()
if err != nil {
return err
}
o.PrintObj = func(obj runtime.Object, out io.Writer) error {
return printer.PrintObj(obj, out)
}
o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
o.includeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, false)
o.builder = f.NewBuilder()
o.unstructuredClientForMapping = f.UnstructuredClientForMapping
// retrieves resource and annotation args from args
// also checks args to verify that all resources are specified before annotations
resources, annotationArgs, err := cmdutil.GetResourcesAndPairs(args, "annotation")
if err != nil {
return err
}
o.resources = resources
o.newAnnotations, o.removeAnnotations, err = parseAnnotations(annotationArgs)
if err != nil {
return err
}
return nil
}
// Validate checks to the AnnotateOptions to see if there is sufficient information run the command.
func (o AnnotateOptions) Validate() error {
if o.all && len(o.selector) > 0 {
return fmt.Errorf("cannot set --all and --selector at the same time")
}
if o.all && len(o.fieldSelector) > 0 {
return fmt.Errorf("cannot set --all and --field-selector at the same time")
}
if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames) {
return fmt.Errorf("one or more resources must be specified as <resource> <name> or <resource>/<name>")
}
if len(o.newAnnotations) < 1 && len(o.removeAnnotations) < 1 {
return fmt.Errorf("at least one annotation update is required")
}
return validateAnnotations(o.removeAnnotations, o.newAnnotations)
}
// RunAnnotate does the work
func (o AnnotateOptions) RunAnnotate() error {
b := o.builder.
Unstructured().
LocalParam(o.local).
ContinueOnError().
NamespaceParam(o.namespace).DefaultNamespace().
FilenameParam(o.enforceNamespace, &o.FilenameOptions).
IncludeUninitialized(o.includeUninitialized).
Flatten()
if !o.local {
b = b.LabelSelectorParam(o.selector).
FieldSelectorParam(o.fieldSelector).
ResourceTypeOrNameArgs(o.all, o.resources...).
Latest()
}
r := b.Do()
if err := r.Err(); err != nil {
return err
}
var singleItemImpliedResource bool
r.IntoSingleItemImplied(&singleItemImpliedResource)
// only apply resource version locking on a single resource.
// we must perform this check after o.builder.Do() as
// []o.resources can not not accurately return the proper number
// of resources when they are not passed in "resource/name" format.
if !singleItemImpliedResource && len(o.resourceVersion) > 0 {
return fmt.Errorf("--resource-version may only be used with a single resource")
}
return r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
var outputObj runtime.Object
obj := info.Object
if o.dryrun || o.local {
if err := o.updateAnnotations(obj); err != nil {
return err
}
outputObj = obj
} else {
name, namespace := info.Name, info.Namespace
oldData, err := json.Marshal(obj)
if err != nil {
return err
}
if err := o.Recorder.Record(info.Object); err != nil {
glog.V(4).Infof("error recording current command: %v", err)
}
if err := o.updateAnnotations(obj); err != nil {
return err
}
newData, err := json.Marshal(obj)
if err != nil {
return err
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
createdPatch := err == nil
if err != nil {
glog.V(2).Infof("couldn't compute patch: %v", err)
}
mapping := info.ResourceMapping()
client, err := o.unstructuredClientForMapping(mapping)
if err != nil {
return err
}
helper := resource.NewHelper(client, mapping)
if createdPatch {
outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes)
} else {
outputObj, err = helper.Replace(namespace, name, false, obj)
}
if err != nil {
return err
}
}
return o.PrintObj(outputObj, o.Out)
})
}
// parseAnnotations retrieves new and remove annotations from annotation args
func parseAnnotations(annotationArgs []string) (map[string]string, []string, error) {
return cmdutil.ParsePairs(annotationArgs, "annotation", true)
}
// validateAnnotations checks the format of annotation args and checks removed annotations aren't in the new annotations map
func validateAnnotations(removeAnnotations []string, newAnnotations map[string]string) error {
var modifyRemoveBuf bytes.Buffer
for _, removeAnnotation := range removeAnnotations {
if _, found := newAnnotations[removeAnnotation]; found {
if modifyRemoveBuf.Len() > 0 {
modifyRemoveBuf.WriteString(", ")
}
modifyRemoveBuf.WriteString(fmt.Sprintf(removeAnnotation))
}
}
if modifyRemoveBuf.Len() > 0 {
return fmt.Errorf("can not both modify and remove the following annotation(s) in the same command: %s", modifyRemoveBuf.String())
}
return nil
}
// validateNoAnnotationOverwrites validates that when overwrite is false, to-be-updated annotations don't exist in the object annotation map (yet)
func validateNoAnnotationOverwrites(accessor metav1.Object, annotations map[string]string) error {
var buf bytes.Buffer
for key := range annotations {
// change-cause annotation can always be overwritten
if key == kubectl.ChangeCauseAnnotation {
continue
}
if value, found := accessor.GetAnnotations()[key]; found {
if buf.Len() > 0 {
buf.WriteString("; ")
}
buf.WriteString(fmt.Sprintf("'%s' already has a value (%s)", key, value))
}
}
if buf.Len() > 0 {
return fmt.Errorf("--overwrite is false but found the following declared annotation(s): %s", buf.String())
}
return nil
}
// updateAnnotations updates annotations of obj
func (o AnnotateOptions) updateAnnotations(obj runtime.Object) error {
accessor, err := meta.Accessor(obj)
if err != nil {
return err
}
if !o.overwrite {
if err := validateNoAnnotationOverwrites(accessor, o.newAnnotations); err != nil {
return err
}
}
annotations := accessor.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
for key, value := range o.newAnnotations {
annotations[key] = value
}
for _, annotation := range o.removeAnnotations {
delete(annotations, annotation)
}
accessor.SetAnnotations(annotations)
if len(o.resourceVersion) != 0 {
accessor.SetResourceVersion(o.resourceVersion)
}
return nil
}
| mdshuai/kubernetes | pkg/kubectl/cmd/annotate.go | GO | apache-2.0 | 13,212 |
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.api.ads.adwords.jaxws.v201809.cm;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
/**
*
* Returns a list of CampaignSharedSets based on the given selector.
* @param selector the selector specifying the query
* @return a list of CampaignSharedSet entities that meet the criterion specified
* by the selector
* @throws ApiException
*
*
* <p>Java class for get element declaration.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <element name="get">
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="selector" type="{https://adwords.google.com/api/adwords/cm/v201809}Selector" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </element>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"selector"
})
@XmlRootElement(name = "get")
public class CampaignSharedSetServiceInterfaceget {
protected Selector selector;
/**
* Gets the value of the selector property.
*
* @return
* possible object is
* {@link Selector }
*
*/
public Selector getSelector() {
return selector;
}
/**
* Sets the value of the selector property.
*
* @param value
* allowed object is
* {@link Selector }
*
*/
public void setSelector(Selector value) {
this.selector = value;
}
}
| googleads/googleads-java-lib | modules/adwords_appengine/src/main/java/com/google/api/ads/adwords/jaxws/v201809/cm/CampaignSharedSetServiceInterfaceget.java | Java | apache-2.0 | 2,446 |
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
fakeapiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
apiextensionsv1listers "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
fakekubeclientset "k8s.io/client-go/kubernetes/fake"
appsv1listers "k8s.io/client-go/listers/apps/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
rbacv1listers "k8s.io/client-go/listers/rbac/v1"
"k8s.io/client-go/tools/cache"
sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
fakeeventingclientset "knative.dev/eventing/pkg/client/clientset/versioned/fake"
sourcev1beta2listers "knative.dev/eventing/pkg/client/listers/sources/v1beta2"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/reconciler/testing"
)
var subscriberAddToScheme = func(scheme *runtime.Scheme) error {
scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "testing.eventing.knative.dev", Version: "v1", Kind: "Subscriber"}, &unstructured.Unstructured{})
return nil
}
var sourceAddToScheme = func(scheme *runtime.Scheme) error {
scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "testing.sources.knative.dev", Version: "v1", Kind: "TestSource"}, &duckv1.Source{})
return nil
}
var clientSetSchemes = []func(*runtime.Scheme) error{
fakekubeclientset.AddToScheme,
fakeeventingclientset.AddToScheme,
fakeapiextensionsclientset.AddToScheme,
subscriberAddToScheme,
sourceAddToScheme,
}
type Listers struct {
sorter testing.ObjectSorter
}
func NewScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
for _, addTo := range clientSetSchemes {
addTo(scheme)
}
return scheme
}
func NewListers(objs []runtime.Object) Listers {
scheme := runtime.NewScheme()
for _, addTo := range clientSetSchemes {
addTo(scheme)
}
ls := Listers{
sorter: testing.NewObjectSorter(scheme),
}
ls.sorter.AddObjects(objs...)
return ls
}
func (l *Listers) indexerFor(obj runtime.Object) cache.Indexer {
return l.sorter.IndexerForObjectType(obj)
}
func (l *Listers) GetKubeObjects() []runtime.Object {
return l.sorter.ObjectsForSchemeFunc(fakekubeclientset.AddToScheme)
}
func (l *Listers) GetEventingObjects() []runtime.Object {
return l.sorter.ObjectsForSchemeFunc(fakeeventingclientset.AddToScheme)
}
func (l *Listers) GetAPIExtentionsObjects() []runtime.Object {
return l.sorter.ObjectsForSchemeFunc(fakeapiextensionsclientset.AddToScheme)
}
func (l *Listers) GetSubscriberObjects() []runtime.Object {
return l.sorter.ObjectsForSchemeFunc(subscriberAddToScheme)
}
func (l *Listers) GetAllObjects() []runtime.Object {
all := l.GetSubscriberObjects()
all = append(all, l.GetEventingObjects()...)
all = append(all, l.GetKubeObjects()...)
return all
}
func (l *Listers) GetPingSourceV1beta2Lister() sourcev1beta2listers.PingSourceLister {
return sourcev1beta2listers.NewPingSourceLister(l.indexerFor(&sourcesv1beta2.PingSource{}))
}
func (l *Listers) GetDeploymentLister() appsv1listers.DeploymentLister {
return appsv1listers.NewDeploymentLister(l.indexerFor(&appsv1.Deployment{}))
}
func (l *Listers) GetK8sServiceLister() corev1listers.ServiceLister {
return corev1listers.NewServiceLister(l.indexerFor(&corev1.Service{}))
}
func (l *Listers) GetSecretLister() corev1listers.SecretLister {
return corev1listers.NewSecretLister(l.indexerFor(&corev1.Secret{}))
}
func (l *Listers) GetNamespaceLister() corev1listers.NamespaceLister {
return corev1listers.NewNamespaceLister(l.indexerFor(&corev1.Namespace{}))
}
func (l *Listers) GetServiceAccountLister() corev1listers.ServiceAccountLister {
return corev1listers.NewServiceAccountLister(l.indexerFor(&corev1.ServiceAccount{}))
}
func (l *Listers) GetServiceLister() corev1listers.ServiceLister {
return corev1listers.NewServiceLister(l.indexerFor(&corev1.Service{}))
}
func (l *Listers) GetRoleBindingLister() rbacv1listers.RoleBindingLister {
return rbacv1listers.NewRoleBindingLister(l.indexerFor(&rbacv1.RoleBinding{}))
}
func (l *Listers) GetEndpointsLister() corev1listers.EndpointsLister {
return corev1listers.NewEndpointsLister(l.indexerFor(&corev1.Endpoints{}))
}
func (l *Listers) GetConfigMapLister() corev1listers.ConfigMapLister {
return corev1listers.NewConfigMapLister(l.indexerFor(&corev1.ConfigMap{}))
}
func (l *Listers) GetCustomResourceDefinitionLister() apiextensionsv1listers.CustomResourceDefinitionLister {
return apiextensionsv1listers.NewCustomResourceDefinitionLister(l.indexerFor(&apiextensionsv1.CustomResourceDefinition{}))
}
| google/knative-gcp | vendor/knative.dev/eventing/pkg/reconciler/testing/listers.go | GO | apache-2.0 | 5,343 |
/**
* AET
*
* Copyright (C) 2013 Cognifide Limited
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* Copyright [2016] [http://bmp.lightbody.net/]
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the License.
*/
package org.browsermob.core.json;
import java.io.IOException;
import java.lang.reflect.Type;
import java.text.DateFormat;
import java.util.Date;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.SerializerProvider;
import org.codehaus.jackson.map.ser.ScalarSerializerBase;
public class ISO8601DateFormatter extends ScalarSerializerBase<Date> {
public final static ISO8601DateFormatter instance = new ISO8601DateFormatter();
public ISO8601DateFormatter() {
super(Date.class);
}
@Override
public void serialize(Date value, JsonGenerator jgen, SerializerProvider provider)
throws IOException, JsonGenerationException {
DateFormat df = (DateFormat) provider.getConfig().getDateFormat().clone();
jgen.writeString(df.format(value));
}
@Override
public JsonNode getSchema(SerializerProvider provider, Type typeHint)
throws JsonMappingException {
return createSchemaNode("string", true);
}
}
| Cognifide/AET | api/jobs-api/src/main/java/org/browsermob/core/json/ISO8601DateFormatter.java | Java | apache-2.0 | 2,315 |
package org.apache.lucene.index;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.codecs.TermVectorsWriter;
import org.apache.lucene.util.ByteBlockPool;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
final class TermVectorsConsumerPerField extends TermsHashConsumerPerField {
final TermsHashPerField termsHashPerField;
final TermVectorsConsumer termsWriter;
final FieldInfo fieldInfo;
final DocumentsWriterPerThread.DocState docState;
final FieldInvertState fieldState;
boolean doVectors;
boolean doVectorPositions;
boolean doVectorOffsets;
boolean doVectorPayloads;
int maxNumPostings;
OffsetAttribute offsetAttribute;
PayloadAttribute payloadAttribute;
boolean hasPayloads; // if enabled, and we actually saw any for this field
public TermVectorsConsumerPerField(TermsHashPerField termsHashPerField, TermVectorsConsumer termsWriter, FieldInfo fieldInfo) {
this.termsHashPerField = termsHashPerField;
this.termsWriter = termsWriter;
this.fieldInfo = fieldInfo;
docState = termsHashPerField.docState;
fieldState = termsHashPerField.fieldState;
}
@Override
int getStreamCount() {
return 2;
}
@Override
boolean start(IndexableField[] fields, int count) {
doVectors = false;
doVectorPositions = false;
doVectorOffsets = false;
doVectorPayloads = false;
hasPayloads = false;
for(int i=0;i<count;i++) {
IndexableField field = fields[i];
if (field.fieldType().indexed()) {
if (field.fieldType().storeTermVectors()) {
doVectors = true;
doVectorPositions |= field.fieldType().storeTermVectorPositions();
doVectorOffsets |= field.fieldType().storeTermVectorOffsets();
if (doVectorPositions) {
doVectorPayloads |= field.fieldType().storeTermVectorPayloads();
} else if (field.fieldType().storeTermVectorPayloads()) {
// TODO: move this check somewhere else, and impl the other missing ones
throw new IllegalArgumentException("cannot index term vector payloads without term vector positions (field=\"" + field.name() + "\")");
}
} else {
if (field.fieldType().storeTermVectorOffsets()) {
throw new IllegalArgumentException("cannot index term vector offsets when term vectors are not indexed (field=\"" + field.name() + "\")");
}
if (field.fieldType().storeTermVectorPositions()) {
throw new IllegalArgumentException("cannot index term vector positions when term vectors are not indexed (field=\"" + field.name() + "\")");
}
if (field.fieldType().storeTermVectorPayloads()) {
throw new IllegalArgumentException("cannot index term vector payloads when term vectors are not indexed (field=\"" + field.name() + "\")");
}
}
} else {
if (field.fieldType().storeTermVectors()) {
throw new IllegalArgumentException("cannot index term vectors when field is not indexed (field=\"" + field.name() + "\")");
}
if (field.fieldType().storeTermVectorOffsets()) {
throw new IllegalArgumentException("cannot index term vector offsets when field is not indexed (field=\"" + field.name() + "\")");
}
if (field.fieldType().storeTermVectorPositions()) {
throw new IllegalArgumentException("cannot index term vector positions when field is not indexed (field=\"" + field.name() + "\")");
}
if (field.fieldType().storeTermVectorPayloads()) {
throw new IllegalArgumentException("cannot index term vector payloads when field is not indexed (field=\"" + field.name() + "\")");
}
}
}
if (doVectors) {
termsWriter.hasVectors = true;
if (termsHashPerField.bytesHash.size() != 0) {
// Only necessary if previous doc hit a
// non-aborting exception while writing vectors in
// this field:
termsHashPerField.reset();
}
}
// TODO: only if needed for performance
//perThread.postingsCount = 0;
return doVectors;
}
public void abort() {}
/** Called once per field per document if term vectors
* are enabled, to write the vectors to
* RAMOutputStream, which is then quickly flushed to
* the real term vectors files in the Directory. */ @Override
void finish() {
if (!doVectors || termsHashPerField.bytesHash.size() == 0) {
return;
}
termsWriter.addFieldToFlush(this);
}
void finishDocument() throws IOException {
assert docState.testPoint("TermVectorsTermsWriterPerField.finish start");
final int numPostings = termsHashPerField.bytesHash.size();
final BytesRef flushTerm = termsWriter.flushTerm;
assert numPostings >= 0;
if (numPostings > maxNumPostings)
maxNumPostings = numPostings;
// This is called once, after inverting all occurrences
// of a given field in the doc. At this point we flush
// our hash into the DocWriter.
assert termsWriter.vectorFieldsInOrder(fieldInfo);
TermVectorsPostingsArray postings = (TermVectorsPostingsArray) termsHashPerField.postingsArray;
final TermVectorsWriter tv = termsWriter.writer;
final int[] termIDs = termsHashPerField.sortPostings();
tv.startField(fieldInfo, numPostings, doVectorPositions, doVectorOffsets, hasPayloads);
final ByteSliceReader posReader = doVectorPositions ? termsWriter.vectorSliceReaderPos : null;
final ByteSliceReader offReader = doVectorOffsets ? termsWriter.vectorSliceReaderOff : null;
final ByteBlockPool termBytePool = termsHashPerField.termBytePool;
for(int j=0;j<numPostings;j++) {
final int termID = termIDs[j];
final int freq = postings.freqs[termID];
// Get BytesRef
termBytePool.setBytesRef(flushTerm, postings.textStarts[termID]);
tv.startTerm(flushTerm, freq);
if (doVectorPositions || doVectorOffsets) {
if (posReader != null) {
termsHashPerField.initReader(posReader, termID, 0);
}
if (offReader != null) {
termsHashPerField.initReader(offReader, termID, 1);
}
tv.addProx(freq, posReader, offReader);
}
tv.finishTerm();
}
tv.finishField();
termsHashPerField.reset();
fieldInfo.setStoreTermVectors();
}
@Override
void start(IndexableField f) {
if (doVectorOffsets) {
offsetAttribute = fieldState.attributeSource.addAttribute(OffsetAttribute.class);
} else {
offsetAttribute = null;
}
if (doVectorPayloads && fieldState.attributeSource.hasAttribute(PayloadAttribute.class)) {
payloadAttribute = fieldState.attributeSource.getAttribute(PayloadAttribute.class);
} else {
payloadAttribute = null;
}
}
void writeProx(TermVectorsPostingsArray postings, int termID) {
if (doVectorOffsets) {
int startOffset = fieldState.offset + offsetAttribute.startOffset();
int endOffset = fieldState.offset + offsetAttribute.endOffset();
termsHashPerField.writeVInt(1, startOffset - postings.lastOffsets[termID]);
termsHashPerField.writeVInt(1, endOffset - startOffset);
postings.lastOffsets[termID] = endOffset;
}
if (doVectorPositions) {
final BytesRef payload;
if (payloadAttribute == null) {
payload = null;
} else {
payload = payloadAttribute.getPayload();
}
final int pos = fieldState.position - postings.lastPositions[termID];
if (payload != null && payload.length > 0) {
termsHashPerField.writeVInt(0, (pos<<1)|1);
termsHashPerField.writeVInt(0, payload.length);
termsHashPerField.writeBytes(0, payload.bytes, payload.offset, payload.length);
hasPayloads = true;
} else {
termsHashPerField.writeVInt(0, pos<<1);
}
postings.lastPositions[termID] = fieldState.position;
}
}
@Override
void newTerm(final int termID) {
assert docState.testPoint("TermVectorsTermsWriterPerField.newTerm start");
TermVectorsPostingsArray postings = (TermVectorsPostingsArray) termsHashPerField.postingsArray;
postings.freqs[termID] = 1;
postings.lastOffsets[termID] = 0;
postings.lastPositions[termID] = 0;
writeProx(postings, termID);
}
@Override
void addTerm(final int termID) {
assert docState.testPoint("TermVectorsTermsWriterPerField.addTerm start");
TermVectorsPostingsArray postings = (TermVectorsPostingsArray) termsHashPerField.postingsArray;
postings.freqs[termID]++;
writeProx(postings, termID);
}
@Override
void skippingLongTerm() {}
@Override
ParallelPostingsArray createPostingsArray(int size) {
return new TermVectorsPostingsArray(size);
}
static final class TermVectorsPostingsArray extends ParallelPostingsArray {
public TermVectorsPostingsArray(int size) {
super(size);
freqs = new int[size];
lastOffsets = new int[size];
lastPositions = new int[size];
}
int[] freqs; // How many times this term occurred in the current doc
int[] lastOffsets; // Last offset we saw
int[] lastPositions; // Last position where this term occurred
@Override
ParallelPostingsArray newInstance(int size) {
return new TermVectorsPostingsArray(size);
}
@Override
void copyTo(ParallelPostingsArray toArray, int numToCopy) {
assert toArray instanceof TermVectorsPostingsArray;
TermVectorsPostingsArray to = (TermVectorsPostingsArray) toArray;
super.copyTo(toArray, numToCopy);
System.arraycopy(freqs, 0, to.freqs, 0, size);
System.arraycopy(lastOffsets, 0, to.lastOffsets, 0, size);
System.arraycopy(lastPositions, 0, to.lastPositions, 0, size);
}
@Override
int bytesPerPosting() {
return super.bytesPerPosting() + 3 * RamUsageEstimator.NUM_BYTES_INT;
}
}
}
| fogbeam/Heceta_solr | lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumerPerField.java | Java | apache-2.0 | 10,979 |
#
# Author:: Scott Bonds (scott@ggr.com)
# Copyright:: Copyright 2014-2016, Scott Bonds
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "spec_helper"
require "ostruct"
describe Chef::Provider::Package::Openbsd do
let(:node) do
node = Chef::Node.new
node.default["kernel"] = { "name" => "OpenBSD", "release" => "5.5", "machine" => "amd64" }
node
end
let (:provider) do
events = Chef::EventDispatch::Dispatcher.new
run_context = Chef::RunContext.new(node, {}, events)
Chef::Provider::Package::Openbsd.new(new_resource, run_context)
end
let(:new_resource) { Chef::Resource::Package.new(name) }
before(:each) do
ENV["PKG_PATH"] = nil
end
describe "install a package" do
let(:name) { "ihavetoes" }
let(:version) { "0.0" }
context "when not already installed" do
before do
allow(provider).to receive(:shell_out_compacted!).with("pkg_info", "-e", "#{name}->0", anything).and_return(instance_double("shellout", stdout: ""))
end
context "when there is a single candidate" do
context "when source is not provided" do
it "should run the installation command" do
expect(provider).to receive(:shell_out_compacted!).with("pkg_info", "-I", name, anything).and_return(
instance_double("shellout", stdout: "#{name}-#{version}\n")
)
expect(provider).to receive(:shell_out_compacted!).with(
"pkg_add", "-r", "#{name}-#{version}",
{ env: { "PKG_PATH" => "http://ftp.OpenBSD.org/pub/OpenBSD/5.5/packages/amd64/" }, timeout: 900 }
) { OpenStruct.new status: true }
provider.run_action(:install)
end
end
end
context "when there are multiple candidates" do
let(:flavor_a) { "flavora" }
let(:flavor_b) { "flavorb" }
context "if no version is specified" do
it "should raise an exception" do
expect(provider).to receive(:shell_out_compacted!).with("pkg_info", "-I", name, anything).and_return(
instance_double("shellout", stdout: "#{name}-#{version}-#{flavor_a}\n#{name}-#{version}-#{flavor_b}\n")
)
expect { provider.run_action(:install) }.to raise_error(Chef::Exceptions::Package, /multiple matching candidates/)
end
end
context "if a flavor is specified" do
let(:flavor) { "flavora" }
let(:package_name) { "ihavetoes" }
let(:name) { "#{package_name}--#{flavor}" }
context "if no version is specified" do
it "should run the installation command" do
expect(provider).to receive(:shell_out_compacted!).with("pkg_info", "-e", "#{package_name}->0", anything).and_return(instance_double("shellout", stdout: ""))
expect(provider).to receive(:shell_out_compacted!).with("pkg_info", "-I", name, anything).and_return(
instance_double("shellout", stdout: "#{name}-#{version}-#{flavor}\n")
)
expect(provider).to receive(:shell_out_compacted!).with(
"pkg_add", "-r", "#{name}-#{version}-#{flavor}",
{ env: { "PKG_PATH" => "http://ftp.OpenBSD.org/pub/OpenBSD/5.5/packages/amd64/" }, timeout: 900 }
) { OpenStruct.new status: true }
provider.run_action(:install)
end
end
end
context "if a version is specified" do
it "should use the flavor from the version" do
expect(provider).to receive(:shell_out_compacted!).with("pkg_info", "-I", "#{name}-#{version}-#{flavor_b}", anything).and_return(
instance_double("shellout", stdout: "#{name}-#{version}-#{flavor_a}\n")
)
new_resource.version("#{version}-#{flavor_b}")
expect(provider).to receive(:shell_out_compacted!).with(
"pkg_add", "-r", "#{name}-#{version}-#{flavor_b}",
{ env: { "PKG_PATH" => "http://ftp.OpenBSD.org/pub/OpenBSD/5.5/packages/amd64/" }, timeout: 900 }
) { OpenStruct.new status: true }
provider.run_action(:install)
end
end
end
end
end
describe "delete a package" do
before do
@name = "ihavetoes"
@new_resource = Chef::Resource::Package.new(@name)
@current_resource = Chef::Resource::Package.new(@name)
@provider = Chef::Provider::Package::Openbsd.new(@new_resource, @run_context)
@provider.current_resource = @current_resource
end
it "should run the command to delete the installed package" do
expect(@provider).to receive(:shell_out_compacted!).with(
"pkg_delete", @name, env: nil, timeout: 900
) { OpenStruct.new status: true }
@provider.remove_package(@name, nil)
end
end
end
| jaymzh/chef | spec/unit/provider/package/openbsd_spec.rb | Ruby | apache-2.0 | 5,369 |
/*
* Copyright 2011 Ning, Inc.
*
* Ning licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.mogwee.executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Factory that sets the name of each thread it creates to {@code [name]-[id]}.
* This makes debugging stack traces much easier.
*/
public class NamedThreadFactory implements ThreadFactory
{
private final AtomicInteger count = new AtomicInteger(0);
private final String name;
public NamedThreadFactory(String name)
{
this.name = name;
}
@Override
public Thread newThread(final Runnable runnable)
{
Thread thread = new Thread(runnable);
thread.setName(name + "-" + count.incrementAndGet());
return thread;
}
}
| twilliamson/mogwee-executors | src/main/java/com/mogwee/executors/NamedThreadFactory.java | Java | apache-2.0 | 1,331 |
#
# farmwork/forms.py
#
from django import forms
from django.utils.text import slugify
from .models import Farmwork
# ========================================================
# FARMWORK FORM
# ========================================================
class FarmworkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(FarmworkForm, self).__init__(*args, **kwargs)
class Meta:
model = Farmwork
fields = [
'job_role',
'job_fruit',
'job_pay',
'job_pay_type',
'job_start_date',
'job_duration',
'job_duration_type',
'job_description',
'con_first_name',
'con_surname',
'con_number',
'con_email',
'con_description',
'acc_variety',
'acc_price',
'acc_price_type',
'acc_description',
'loc_street_address',
'loc_city',
'loc_state',
'loc_post_code',
]
# --
# AUTO GENERATE SLUG ON SAVE
# Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django
# --
def save(self):
if self.instance.pk:
return super(FarmworkForm, self).save()
instance = super(FarmworkForm, self).save(commit=False)
instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city)
instance.save()
return instance
| ianmilliken/rwf | backend/apps/farmwork/forms.py | Python | apache-2.0 | 1,542 |
namespace Konves.ChordPro.Directives
{
public sealed class CommentBoxDirective : Directive
{
public CommentBoxDirective(string text)
{
Text = text;
}
public string Text { get; set; }
}
}
| skonves/Konves.ChordPro | src/Konves.ChordPro/Directives/CommentBoxDirective.cs | C# | apache-2.0 | 205 |
from django.contrib.auth.models import AnonymousUser
from core.models import Identity
from api.v2.serializers.post import AccountSerializer
from api.v2.views.base import AdminAuthViewSet
class AccountViewSet(AdminAuthViewSet):
"""
API endpoint that allows providers to be viewed or edited.
"""
lookup_fields = ("id", "uuid")
queryset = Identity.objects.all()
serializer_class = AccountSerializer
http_method_names = ['post', 'head', 'options', 'trace']
def get_queryset(self):
"""
Filter providers by current user
"""
user = self.request.user
if (type(user) == AnonymousUser):
return Identity.objects.none()
identities = user.current_identities()
return identities
| CCI-MOC/GUI-Backend | api/v2/views/account.py | Python | apache-2.0 | 771 |
/*
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import ApiCreationController from './api-creation.controller';
import { shouldDisplayHint } from './form.helper';
import ApiPrimaryOwnerModeService from '../../../../services/apiPrimaryOwnerMode.service';
const ApiCreationStep1Component: ng.IComponentOptions = {
require: {
parent: '^apiCreation',
},
template: require('./api-creation-step1.html'),
controller: class {
private parent: ApiCreationController;
private advancedMode: boolean;
private useGroupAsPrimaryOwner: boolean;
public shouldDisplayHint = shouldDisplayHint;
constructor(private ApiPrimaryOwnerModeService: ApiPrimaryOwnerModeService) {
'ngInject';
this.advancedMode = false;
this.useGroupAsPrimaryOwner = this.ApiPrimaryOwnerModeService.isGroupOnly();
}
toggleAdvancedMode = () => {
this.advancedMode = !this.advancedMode;
if (!this.advancedMode) {
this.parent.api.groups = [];
}
};
canUseAdvancedMode = () => {
return (
(this.ApiPrimaryOwnerModeService.isHybrid() &&
((this.parent.attachableGroups && this.parent.attachableGroups.length > 0) ||
(this.parent.poGroups && this.parent.poGroups.length > 0))) ||
(this.ApiPrimaryOwnerModeService.isGroupOnly() && this.parent.attachableGroups && this.parent.attachableGroups.length > 0)
);
};
},
};
export default ApiCreationStep1Component;
| gravitee-io/gravitee-management-webui | src/management/api/creation/steps/api-creation-step1.component.ts | TypeScript | apache-2.0 | 2,038 |
/*
* Copyright 2013 the original author or authors.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.micromata.jira.rest.core.misc;
/**
* @author Christian Schulze
* @author Vitali Filippow
*/
public interface RestPathConstants {
// Common Stuff for Jersey Client
String AUTHORIZATION = "Authorization";
String BASIC = "Basic";
// REST Paths
String BASE_REST_PATH = "/rest/api/2";
String PROJECT = "/project";
String USER = "/user";
String SEARCH = "/search";
String ISSUE = "/issue";
String COMMENT = "/comment";
String VERSIONS = "/versions";
String COMPONENTS = "/components";
String ISSUETPYES = "/issuetype";
String STATUS = "/status";
String PRIORITY = "/priority";
String TRANSITIONS = "/transitions";
String WORKLOG = "/worklog";
String ATTACHMENTS = "/attachments";
String ATTACHMENT = "/attachment";
String ASSIGNABLE = "/assignable";
String FILTER = "/filter";
String FAVORITE = "/favourite";
String FIELD = "/field";
String META = "/meta";
String CREATEMETA = "/createmeta";
String MYPERMISSIONS = "/mypermissions";
String CONFIGURATION = "/configuration";
}
| micromata/jiraRestClient | src/main/java/de/micromata/jira/rest/core/misc/RestPathConstants.java | Java | apache-2.0 | 1,734 |
/*****************************************************************************
* Copyright (C) jparsec.org *
* ------------------------------------------------------------------------- *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
*****************************************************************************/
package org.jparsec.examples.sql.parser;
import static org.jparsec.examples.sql.parser.TerminalParser.phrase;
import static org.jparsec.examples.sql.parser.TerminalParser.term;
import java.util.List;
import java.util.function.BinaryOperator;
import java.util.function.UnaryOperator;
import org.jparsec.OperatorTable;
import org.jparsec.Parser;
import org.jparsec.Parsers;
import org.jparsec.examples.sql.ast.BetweenExpression;
import org.jparsec.examples.sql.ast.BinaryExpression;
import org.jparsec.examples.sql.ast.BinaryRelationalExpression;
import org.jparsec.examples.sql.ast.Expression;
import org.jparsec.examples.sql.ast.FullCaseExpression;
import org.jparsec.examples.sql.ast.FunctionExpression;
import org.jparsec.examples.sql.ast.LikeExpression;
import org.jparsec.examples.sql.ast.NullExpression;
import org.jparsec.examples.sql.ast.NumberExpression;
import org.jparsec.examples.sql.ast.Op;
import org.jparsec.examples.sql.ast.QualifiedName;
import org.jparsec.examples.sql.ast.QualifiedNameExpression;
import org.jparsec.examples.sql.ast.Relation;
import org.jparsec.examples.sql.ast.SimpleCaseExpression;
import org.jparsec.examples.sql.ast.StringExpression;
import org.jparsec.examples.sql.ast.TupleExpression;
import org.jparsec.examples.sql.ast.UnaryExpression;
import org.jparsec.examples.sql.ast.UnaryRelationalExpression;
import org.jparsec.examples.sql.ast.WildcardExpression;
import org.jparsec.functors.Pair;
/**
* Parser for expressions.
*
* @author Ben Yu
*/
public final class ExpressionParser {
static final Parser<Expression> NULL = term("null").<Expression>retn(NullExpression.instance);
static final Parser<Expression> NUMBER = TerminalParser.NUMBER.map(NumberExpression::new);
static final Parser<Expression> QUALIFIED_NAME = TerminalParser.QUALIFIED_NAME
.map(QualifiedNameExpression::new);
static final Parser<Expression> QUALIFIED_WILDCARD = TerminalParser.QUALIFIED_NAME
.followedBy(phrase(". *"))
.map(WildcardExpression::new);
static final Parser<Expression> WILDCARD =
term("*").<Expression>retn(new WildcardExpression(QualifiedName.of()))
.or(QUALIFIED_WILDCARD);
static final Parser<Expression> STRING = TerminalParser.STRING.map(StringExpression::new);
static Parser<Expression> functionCall(Parser<Expression> param) {
return Parsers.sequence(
TerminalParser.QUALIFIED_NAME, paren(param.sepBy(TerminalParser.term(","))),
FunctionExpression::new);
}
static Parser<Expression> tuple(Parser<Expression> expr) {
return paren(expr.sepBy(term(","))).map(TupleExpression::new);
}
static Parser<Expression> simpleCase(Parser<Expression> expr) {
return Parsers.sequence(
term("case").next(expr),
whenThens(expr, expr),
term("else").next(expr).optional().followedBy(term("end")),
SimpleCaseExpression::new);
}
static Parser<Expression> fullCase(Parser<Expression> cond, Parser<Expression> expr) {
return Parsers.sequence(
term("case").next(whenThens(cond, expr)),
term("else").next(expr).optional().followedBy(term("end")),
FullCaseExpression::new);
}
private static Parser<List<Pair<Expression, Expression>>> whenThens(
Parser<Expression> cond, Parser<Expression> expr) {
return Parsers.pair(term("when").next(cond), term("then").next(expr)).many1();
}
static <T> Parser<T> paren(Parser<T> parser) {
return parser.between(term("("), term(")"));
}
static Parser<Expression> arithmetic(Parser<Expression> atom) {
Parser.Reference<Expression> reference = Parser.newReference();
Parser<Expression> operand =
Parsers.or(paren(reference.lazy()), functionCall(reference.lazy()), atom);
Parser<Expression> parser = new OperatorTable<Expression>()
.infixl(binary("+", Op.PLUS), 10)
.infixl(binary("-", Op.MINUS), 10)
.infixl(binary("*", Op.MUL), 20)
.infixl(binary("/", Op.DIV), 20)
.infixl(binary("%", Op.MOD), 20)
.prefix(unary("-", Op.NEG), 50)
.build(operand);
reference.set(parser);
return parser;
}
static Parser<Expression> expression(Parser<Expression> cond) {
Parser.Reference<Expression> reference = Parser.newReference();
Parser<Expression> lazyExpr = reference.lazy();
Parser<Expression> atom = Parsers.or(
NUMBER, WILDCARD, QUALIFIED_NAME, simpleCase(lazyExpr), fullCase(cond, lazyExpr));
Parser<Expression> expression = arithmetic(atom).label("expression");
reference.set(expression);
return expression;
}
/************************** boolean expressions ****************************/
static Parser<Expression> compare(Parser<Expression> expr) {
return Parsers.or(
compare(expr, ">", Op.GT), compare(expr, ">=", Op.GE),
compare(expr, "<", Op.LT), compare(expr, "<=", Op.LE),
compare(expr, "=", Op.EQ), compare(expr, "<>", Op.NE),
nullCheck(expr), like(expr), between(expr));
}
static Parser<Expression> like(Parser<Expression> expr) {
return Parsers.sequence(
expr, Parsers.or(term("like").retn(true), phrase("not like").retn(false)),
expr, term("escape").next(expr).optional(),
LikeExpression::new);
}
static Parser<Expression> nullCheck(Parser<Expression> expr) {
return Parsers.sequence(
expr, phrase("is not").retn(Op.NOT).or(phrase("is").retn(Op.IS)), NULL,
BinaryExpression::new);
}
static Parser<Expression> logical(Parser<Expression> expr) {
Parser.Reference<Expression> ref = Parser.newReference();
Parser<Expression> parser = new OperatorTable<Expression>()
.prefix(unary("not", Op.NOT), 30)
.infixl(binary("and", Op.AND), 20)
.infixl(binary("or", Op.OR), 10)
.build(paren(ref.lazy()).or(expr)).label("logical expression");
ref.set(parser);
return parser;
}
static Parser<Expression> between(Parser<Expression> expr) {
return Parsers.sequence(
expr, Parsers.or(term("between").retn(true), phrase("not between").retn(false)),
expr, term("and").next(expr),
BetweenExpression::new);
}
static Parser<Expression> exists(Parser<Relation> relation) {
return term("exists").next(relation).map(e -> new UnaryRelationalExpression(e, Op.EXISTS));
}
static Parser<Expression> notExists(Parser<Relation> relation) {
return phrase("not exists").next(relation)
.map(e -> new UnaryRelationalExpression(e, Op.NOT_EXISTS));
}
static Parser<Expression> inRelation(Parser<Expression> expr, Parser<Relation> relation) {
return Parsers.sequence(
expr, Parsers.between(phrase("in ("), relation, term(")")),
(e, r) -> new BinaryRelationalExpression(e, Op.IN, r));
}
static Parser<Expression> notInRelation(Parser<Expression> expr, Parser<Relation> relation) {
return Parsers.sequence(
expr, Parsers.between(phrase("not in ("), relation, term(")")),
(e, r) -> new BinaryRelationalExpression(e, Op.NOT_IN, r));
}
static Parser<Expression> in(Parser<Expression> expr) {
return Parsers.sequence(
expr, term("in").next(tuple(expr)),
(e, t) -> new BinaryExpression(e, Op.IN, t));
}
static Parser<Expression> notIn(Parser<Expression> expr) {
return Parsers.sequence(
expr, phrase("not in").next(tuple(expr)),
(e, t) -> new BinaryExpression(e, Op.NOT_IN, t));
}
static Parser<Expression> condition(Parser<Expression> expr, Parser<Relation> rel) {
Parser<Expression> atom = Parsers.or(
compare(expr), in(expr), notIn(expr),
exists(rel), notExists(rel), inRelation(expr, rel), notInRelation(expr, rel));
return logical(atom);
}
/************************** utility methods ****************************/
private static Parser<Expression> compare(
Parser<Expression> operand, String name, Op op) {
return Parsers.sequence(
operand, term(name).retn(op), operand,
BinaryExpression::new);
}
private static Parser<BinaryOperator<Expression>> binary(String name, Op op) {
return term(name).retn((l, r) -> new BinaryExpression(l, op, r));
}
private static Parser<UnaryOperator<Expression>> unary(String name, Op op) {
return term(name).retn(e -> new UnaryExpression(op, e));
}
}
| jparsec/jparsec | jparsec-examples/src/main/java/org/jparsec/examples/sql/parser/ExpressionParser.java | Java | apache-2.0 | 9,523 |
/*
* Copyright 2004-2008 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.codehaus.groovy.grails.web.json;
import static org.codehaus.groovy.grails.web.json.JSONWriter.Mode.ARRAY;
import static org.codehaus.groovy.grails.web.json.JSONWriter.Mode.KEY;
import static org.codehaus.groovy.grails.web.json.JSONWriter.Mode.OBJECT;
import java.io.IOException;
import java.io.Writer;
/**
* A JSONWriter dedicated to create indented/pretty printed output.
*
* @author Siegfried Puchbauer
* @since 1.1
*/
public class PrettyPrintJSONWriter extends JSONWriter {
public static final String DEFAULT_INDENT_STR = " ";
public static final String NEWLINE;
static {
String nl = System.getProperty("line.separator");
NEWLINE = nl != null ? nl : "\n";
}
private int indentLevel = 0;
private final String indentStr;
public PrettyPrintJSONWriter(Writer w) {
this(w, DEFAULT_INDENT_STR);
}
public PrettyPrintJSONWriter(Writer w, String indentStr) {
super(w);
this.indentStr = indentStr;
}
private void newline() {
try {
writer.write(NEWLINE);
}
catch (IOException e) {
throw new JSONException(e);
}
}
private void indent() {
try {
for (int i = 0; i < indentLevel; i++) {
writer.write(indentStr);
}
}
catch (IOException e) {
throw new JSONException(e);
}
}
@Override
protected JSONWriter append(String s) {
if (s == null) {
throw new JSONException("Null pointer");
}
if (mode == OBJECT || mode == ARRAY) {
try {
if (comma && mode == ARRAY) {
comma();
}
if (mode == ARRAY) {
newline();
indent();
}
writer.write(s);
}
catch (IOException e) {
throw new JSONException(e);
}
if (mode == OBJECT) {
mode = KEY;
}
comma = true;
return this;
}
throw new JSONException("Value out of sequence.");
}
@Override
protected JSONWriter end(Mode m, char c) {
newline();
indent();
return super.end(m, c);
}
@Override
public JSONWriter array() {
super.array();
indentLevel++;
return this;
}
@Override
public JSONWriter endArray() {
indentLevel--;
super.endArray();
return this;
}
@Override
public JSONWriter object() {
super.object();
indentLevel++;
return this;
}
@Override
public JSONWriter endObject() {
indentLevel--;
super.endObject();
return this;
}
@Override
public JSONWriter key(String s) {
if (s == null) {
throw new JSONException("Null key.");
}
if (mode == KEY) {
try {
if (comma) {
comma();
}
newline();
indent();
writer.write(JSONObject.quote(s));
writer.write(": ");
comma = false;
mode = OBJECT;
return this;
}
catch (IOException e) {
throw new JSONException(e);
}
}
throw new JSONException("Misplaced key.");
}
}
| jeffbrown/grailsnolib | grails-web/src/main/groovy/org/codehaus/groovy/grails/web/json/PrettyPrintJSONWriter.java | Java | apache-2.0 | 4,112 |
/**
* Test for LOOMIA TILE Token
*
* @author Pactum IO <dev@pactum.io>
*/
import {getEvents, BigNumber} from './helpers/tools';
import expectThrow from './helpers/expectThrow';
const loomiaToken = artifacts.require('./TileToken');
const should = require('chai') // eslint-disable-line
.use(require('chai-as-promised'))
.use(require('chai-bignumber')(BigNumber))
.should();
/**
* Tile Token Contract
*/
contract('TileToken', (accounts) => {
const owner = accounts[0];
const tokenHolder1 = accounts[1];
const spendingAddress = accounts[2];
const recipient = accounts[3];
const anotherAccount = accounts[4];
const tokenHolder5 = accounts[5];
const zeroTokenHolder = accounts[6];
const zeroAddress = '0x0000000000000000000000000000000000000000';
const zero = new BigNumber(0);
const tokenSupply = new BigNumber(1046000000000000000000000000);
// Provide Tile Token instance for every test case
let tileTokenInstance;
beforeEach(async () => {
tileTokenInstance = await loomiaToken.deployed();
});
it('should instantiate the token correctly', async () => {
const name = await tileTokenInstance.NAME();
const symbol = await tileTokenInstance.SYMBOL();
const decimals = await tileTokenInstance.DECIMALS();
const totalSupply = await tileTokenInstance.totalSupply();
assert.equal(name, 'LOOMIA TILE', 'Name does not match');
assert.equal(symbol, 'TILE', 'Symbol does not match');
assert.equal(decimals, 18, 'Decimals does not match');
totalSupply.should.be.bignumber.equal(tokenSupply);
});
describe('balanceOf', function () {
describe('when the requested account has no tokens', function () {
it('returns zero', async function () {
const balance = await tileTokenInstance.balanceOf(zeroTokenHolder);
balance.should.be.bignumber.equal(zero);
});
});
describe('when the requested account has some tokens', function () {
it('returns the total amount of tokens', async function () {
const balance = await tileTokenInstance.balanceOf(owner);
balance.should.be.bignumber.equal(tokenSupply);
});
});
});
describe('transfer', function () {
describe('when the recipient is not the zero address', function () {
const to = tokenHolder1;
describe('when the sender does not have enough balance', function () {
const transferAmount = tokenSupply.add(1);
it('reverts', async function () {
await expectThrow(tileTokenInstance.transfer(to, transferAmount, {
from: owner
}));
});
});
describe('when the sender has enough balance', function () {
const transferAmount = new BigNumber(500 * 1e18);
it('transfers the requested amount', async function () {
await tileTokenInstance.transfer(to, transferAmount, {
from: owner
});
const senderBalance = await tileTokenInstance.balanceOf(owner);
senderBalance.should.be.bignumber.equal(tokenSupply.sub(transferAmount));
const recipientBalance = await tileTokenInstance.balanceOf(to);
recipientBalance.should.be.bignumber.equal(transferAmount);
});
it('emits a transfer event', async function () {
const tx = await tileTokenInstance.transfer(to, transferAmount, {
from: owner
});
// Test the event
const events = getEvents(tx, 'Transfer');
assert.equal(events[0].from, owner, 'From address does not match');
assert.equal(events[0].to, to, 'To address does not match');
(events[0].value).should.be.bignumber.equal(transferAmount);
});
});
});
describe('when the recipient is the zero address', function () {
const to = zeroAddress;
it('reverts', async function () {
await expectThrow(tileTokenInstance.transfer(to, 100, {
from: owner
}));
});
});
});
describe('approve', function () {
describe('when the spender is not the zero address', function () {
const spender = spendingAddress;
describe('when the sender has enough balance', function () {
const amount = new BigNumber(100 * 1e18);
it('emits an approval event', async function () {
const tx = await tileTokenInstance.approve(spender, amount, {
from: owner
});
// Test the event
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, owner, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(amount);
});
describe('when there was no approved amount before', function () {
it('approves the requested amount', async function () {
await tileTokenInstance.approve(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(amount);
});
});
describe('when the spender had an approved amount', function () {
beforeEach(async function () {
await tileTokenInstance.approve(spender, 1, {
from: owner
});
});
it('approves the requested amount and replaces the previous one', async function () {
await tileTokenInstance.approve(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(amount);
});
});
});
describe('when the sender does not have enough balance', function () {
const amount = new BigNumber(101 * 1e18);
it('emits an approval event', async function () {
const tx = await tileTokenInstance.approve(spender, amount, {
from: owner
});
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, owner, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(amount);
});
describe('when there was no approved amount before', function () {
it('approves the requested amount', async function () {
await tileTokenInstance.approve(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(amount);
});
});
describe('when the spender had an approved amount', function () {
beforeEach(async function () {
await tileTokenInstance.approve(spender, 1, {
from: owner
});
});
it('approves the requested amount and replaces the previous one', async function () {
await tileTokenInstance.approve(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(amount);
});
});
});
});
describe('when the spender is the zero address', function () {
const amount = new BigNumber(100 * 1e18);
const spender = zeroAddress;
it('approves the requested amount', async function () {
await tileTokenInstance.approve(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(amount);
});
it('emits an approval event', async function () {
const tx = await tileTokenInstance.approve(spender, amount, {
from: owner
});
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, owner, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(amount);
});
});
});
describe('transfer from', function () {
const spender = recipient;
describe('when the recipient is not the zero address', function () {
const to = anotherAccount;
describe('when the spender has enough approved balance', function () {
beforeEach(async function () {
const approvalAmount = new BigNumber(30000 * 1e18);
await tileTokenInstance.approve(spender, approvalAmount, {
from: owner
});
await tileTokenInstance.approve(spender, approvalAmount, {
from: zeroTokenHolder
});
});
describe('when the owner has enough balance', function () {
const amount = new BigNumber(30000 * 1e18);
it('transfers the requested amount', async function () {
const balanceBefore = await tileTokenInstance.balanceOf(owner);
await tileTokenInstance.transferFrom(owner, to, amount, {
from: spender
});
const senderBalance = await tileTokenInstance.balanceOf(owner);
senderBalance.should.be.bignumber.equal(balanceBefore.sub(amount));
const recipientBalance = await tileTokenInstance.balanceOf(to);
recipientBalance.should.be.bignumber.equal(amount);
});
it('decreases the spender allowance', async function () {
await tileTokenInstance.transferFrom(owner, to, amount, {
from: spender
});
const allowance = await tileTokenInstance.allowance(owner, spender);
assert(allowance.eq(0));
});
it('emits a transfer event', async function () {
const tx = await tileTokenInstance.transferFrom(owner, to, amount, {
from: spender
});
// Test the event
const events = getEvents(tx, 'Transfer');
assert.equal(events[0].from, owner, 'address does not match');
assert.equal(events[0].to, to, 'To address does not match');
(events[0].value).should.be.bignumber.equal(amount);
});
});
describe('when the owner does not have enough balance', function () {
const amount = new BigNumber(1 * 1e18);
it('reverts', async function () {
await expectThrow(tileTokenInstance.transferFrom(zeroTokenHolder, to, amount, {
from: spender
}));
});
});
});
describe('when the spender does not have enough approved balance', function () {
beforeEach(async function () {
const approvalAmount = new BigNumber(99 * 1e18);
const bigApprovalAmount = new BigNumber(999 * 1e18);
await tileTokenInstance.approve(spender, approvalAmount, {
from: owner
});
await tileTokenInstance.approve(spender, bigApprovalAmount, {
from: tokenHolder1
});
});
describe('when the owner has enough balance', function () {
const amount = new BigNumber(100 * 1e18);
it('reverts', async function () {
await expectThrow(tileTokenInstance.transferFrom(owner, to, amount, {
from: spender
}));
});
});
describe('when the owner does not have enough balance', function () {
const amount = new BigNumber(1001 * 1e18);
it('reverts', async function () {
await expectThrow(tileTokenInstance.transferFrom(tokenHolder1, to, amount, {
from: spender
}));
});
});
});
});
describe('when the recipient is the zero address', function () {
const amount = new BigNumber(100 * 1e18);
const to = zeroAddress;
beforeEach(async function () {
await tileTokenInstance.approve(spender, amount, {
from: owner
});
});
it('reverts', async function () {
await expectThrow(tileTokenInstance.transferFrom(owner, to, amount, {
from: spender
}));
});
});
});
describe('decrease approval', function () {
describe('when the spender is not the zero address', function () {
const spender = recipient;
describe('when the sender has enough balance', function () {
const amount = new BigNumber(100 * 1e18);
it('emits an approval event', async function () {
const tx = await tileTokenInstance.decreaseApproval(spender, amount, {
from: owner
});
// Test the event
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, owner, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(0);
});
describe('when there was no approved amount before', function () {
it('keeps the allowance to zero', async function () {
await tileTokenInstance.decreaseApproval(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(0);
});
});
describe('when the spender had an approved amount', function () {
beforeEach(async function () {
const approvalAmount = new BigNumber(1000 * 1e18);
await tileTokenInstance.approve(spender, approvalAmount, {
from: owner
});
});
it('decreases the spender allowance subtracting the requested amount', async function () {
await tileTokenInstance.decreaseApproval(spender, amount, {
from: owner
});
const approvalAmount = new BigNumber(1000 * 1e18);
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(approvalAmount.sub(amount));
});
});
});
describe('when the sender does not have enough balance', function () {
const amount = new BigNumber(1000 * 1e18);
it('emits an approval event', async function () {
const tx = await tileTokenInstance.decreaseApproval(spender, amount, {
from: owner
});
// Test the event
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, owner, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(0);
});
describe('when there was no approved amount before', function () {
it('keeps the allowance to zero', async function () {
await tileTokenInstance.decreaseApproval(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(0);
});
});
describe('when the spender had an approved amount', function () {
beforeEach(async function () {
const approvalAmount = new BigNumber(1001 * 1e18);
await tileTokenInstance.approve(spender, approvalAmount, {
from: owner
});
});
it('decreases the spender allowance subtracting the requested amount', async function () {
await tileTokenInstance.decreaseApproval(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(1 * 1e18);
});
});
});
});
describe('when the spender is the zero address', function () {
const amount = new BigNumber(100 * 1e18);
const spender = zeroAddress;
it('decreases the requested amount', async function () {
await tileTokenInstance.decreaseApproval(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(0);
});
it('emits an approval event', async function () {
const tx = await tileTokenInstance.decreaseApproval(spender, amount, {
from: owner
});
// Test the event
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, owner, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(0);
});
});
});
describe('increase approval', function () {
const amount = new BigNumber(100 * 1e18);
describe('when the spender is not the zero address', function () {
const spender = recipient;
describe('when the sender has enough balance', function () {
it('emits an approval event', async function () {
const oldAllowance = await tileTokenInstance.allowance(owner, spender);
const tx = await tileTokenInstance.increaseApproval(spender, amount, {
from: owner
});
// Test the event
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, owner, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(amount.add(oldAllowance));
});
describe('when there was no approved amount before', function () {
it('approves the requested amount', async function () {
const oldAllowance = await tileTokenInstance.allowance(owner, spender);
await tileTokenInstance.increaseApproval(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(amount.add(oldAllowance));
});
});
describe('when the spender had an approved amount', function () {
beforeEach(async function () {
const approvalAmount = new BigNumber(1 * 1e18);
await tileTokenInstance.approve(spender, approvalAmount, {
from: owner
});
});
it('increases the spender allowance adding the requested amount', async function () {
const oldAllowance = await tileTokenInstance.allowance(owner, spender);
await tileTokenInstance.increaseApproval(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(oldAllowance.add(amount));
});
});
});
describe('when the sender does not have enough balance', function () {
const amount = new BigNumber(2 * 1e18);
it('emits an approval event', async function () {
const oldAllowance = await tileTokenInstance.allowance(tokenHolder1, spender);
const tx = await tileTokenInstance.increaseApproval(spender, amount, {
from: tokenHolder1
});
// Test the event
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, tokenHolder1, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(oldAllowance.add(amount));
});
describe('when there was no approved amount before', function () {
it('approves the requested amount', async function () {
await tileTokenInstance.increaseApproval(spender, amount, {
from: tokenHolder5
});
const allowance = await tileTokenInstance.allowance(tokenHolder5, spender);
allowance.should.be.bignumber.equal(amount);
});
});
describe('when the spender had an approved amount', function () {
beforeEach(async function () {
const approvalAmount = new BigNumber(1 * 1e18);
await tileTokenInstance.approve(spender, approvalAmount, {
from: tokenHolder5
});
});
it('increases the spender allowance adding the requested amount', async function () {
const oldAllowance = await tileTokenInstance.allowance(tokenHolder5, spender);
await tileTokenInstance.increaseApproval(spender, amount, {
from: tokenHolder5
});
const allowance = await tileTokenInstance.allowance(tokenHolder5, spender);
allowance.should.be.bignumber.equal(oldAllowance.add(amount));
});
});
});
});
describe('when the spender is the zero address', function () {
const spender = zeroAddress;
it('approves the requested amount', async function () {
await tileTokenInstance.increaseApproval(spender, amount, {
from: owner
});
const allowance = await tileTokenInstance.allowance(owner, spender);
allowance.should.be.bignumber.equal(amount);
});
it('emits an approval event', async function () {
const oldAllowance = await tileTokenInstance.allowance(owner, spender);
const tx = await tileTokenInstance.increaseApproval(spender, amount, {
from: owner
});
// Test the event
const events = getEvents(tx, 'Approval');
assert.equal(events[0].owner, owner, 'address does not match');
assert.equal(events[0].spender, spender, 'Spender address does not match');
(events[0].value).should.be.bignumber.equal(oldAllowance.add(amount));
});
});
});
});
| LOOMIA/loomia | tiletoken/test/contracts/0_TileToken.js | JavaScript | apache-2.0 | 26,410 |
/**
* Copyright (c) 2013-2019 Contributors to the Eclipse Foundation
*
* <p> See the NOTICE file distributed with this work for additional information regarding copyright
* ownership. All rights reserved. This program and the accompanying materials are made available
* under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
* available at http://www.apache.org/licenses/LICENSE-2.0.txt
*/
package org.locationtech.geowave.core.store.util;
import java.util.Iterator;
import java.util.Map;
import org.locationtech.geowave.core.store.adapter.PersistentAdapterStore;
import org.locationtech.geowave.core.store.adapter.RowMergingDataAdapter;
import org.locationtech.geowave.core.store.adapter.RowMergingDataAdapter.RowTransform;
import org.locationtech.geowave.core.store.api.Index;
import org.locationtech.geowave.core.store.entities.GeoWaveRow;
import org.locationtech.geowave.core.store.operations.RowDeleter;
import org.locationtech.geowave.core.store.operations.RowWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RewritingMergingEntryIterator<T> extends MergingEntryIterator<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(RewritingMergingEntryIterator.class);
private final RowWriter writer;
private final RowDeleter deleter;
public RewritingMergingEntryIterator(
final PersistentAdapterStore adapterStore,
final Index index,
final Iterator<GeoWaveRow> scannerIt,
final Map<Short, RowMergingDataAdapter> mergingAdapters,
final RowWriter writer,
final RowDeleter deleter) {
super(adapterStore, index, scannerIt, null, null, mergingAdapters, null, null);
this.writer = writer;
this.deleter = deleter;
}
@Override
protected GeoWaveRow mergeSingleRowValues(
final GeoWaveRow singleRow,
final RowTransform rowTransform) {
if (singleRow.getFieldValues().length < 2) {
return singleRow;
}
deleter.delete(singleRow);
deleter.flush();
final GeoWaveRow merged = super.mergeSingleRowValues(singleRow, rowTransform);
writer.write(merged);
return merged;
}
}
| spohnan/geowave | core/store/src/main/java/org/locationtech/geowave/core/store/util/RewritingMergingEntryIterator.java | Java | apache-2.0 | 2,157 |
package plugin
/*
usage: !day
*/
import (
"strings"
"time"
"github.com/microamp/gerri/cmd"
"github.com/microamp/gerri/data"
)
func ReplyDay(pm data.Privmsg, config *data.Config) (string, error) {
return cmd.Privmsg(pm.Target, strings.ToLower(time.Now().Weekday().String())), nil
}
| kesara/gerri | plugin/day.go | GO | apache-2.0 | 289 |
/*
* Copyright (C) 2019 Contentful GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.contentful.java.cma;
import com.contentful.java.cma.model.CMAArray;
import com.contentful.java.cma.model.CMATag;
import io.reactivex.Flowable;
import retrofit2.Response;
import retrofit2.http.GET;
import retrofit2.http.PUT;
import retrofit2.http.Path;
import retrofit2.http.QueryMap;
import retrofit2.http.Body;
import retrofit2.http.DELETE;
import java.util.Map;
/**
* Spaces Service.
*/
interface ServiceContentTags {
@GET("/spaces/{space_id}/environments/{environment_id}/tags")
Flowable<CMAArray<CMATag>> fetchAll(
@Path("space_id") String spaceId,
@Path("environment_id") String environmentID,
@QueryMap Map<String, String> query
);
@PUT("/spaces/{space_id}/environments/{environment_id}/tags/{tag_id}")
Flowable<CMATag> create(
@Path("space_id") String spaceId,
@Path("environment_id") String environmentID,
@Path("tag_id") String tagId,
@Body CMATag tag);
@GET("/spaces/{space_id}/environments/{environment_id}/tags/{tag_id}")
Flowable<CMATag> fetchOne(
@Path("space_id") String spaceId,
@Path("environment_id") String environmentID,
@Path("tag_id") String tagId
);
@PUT("/spaces/{space_id}/environments/{environment_id}/tags/{tag_id}")
Flowable<CMATag> update(
@Path("space_id") String spaceId,
@Path("environment_id") String environmentID,
@Path("tag_id") String tagId,
@Body CMATag tag);
@DELETE("/spaces/{space_id}/environments/{environment_id}/tags/{tag_id}")
Flowable<Response<Void>> delete(
@Path("space_id") String spaceId,
@Path("environment_id") String environmentID,
@Path("tag_id") String tagId);
}
| contentful/contentful-management.java | src/main/java/com/contentful/java/cma/ServiceContentTags.java | Java | apache-2.0 | 2,309 |
package main
import (
"net/url"
"time"
"github.com/codegangsta/cli"
"github.com/michaeltrobinson/cadvisor-integration/scraper"
"github.com/signalfx/metricproxy/protocol/signalfx"
log "github.com/Sirupsen/logrus"
)
var (
sfxAPIToken string
sfxIngestURL string
clusterName string
sendInterval time.Duration
cadvisorPort int
discoveryInterval time.Duration
maxDatapoints int
kubeUser string
kubePass string
)
func init() {
app.Commands = append(app.Commands, cli.Command{
Name: "run",
Usage: "start the service (the default)",
Action: run,
Before: setupRun,
Flags: []cli.Flag{
cli.StringFlag{
Name: "sfx-ingest-url",
EnvVar: "SFX_ENDPOINT",
Value: "https://ingest.signalfx.com",
Usage: "SignalFx ingest URL",
},
cli.StringFlag{
Name: "sfx-api-token",
EnvVar: "SFX_API_TOKEN",
Usage: "SignalFx API token",
},
cli.StringFlag{
Name: "cluster-name",
EnvVar: "CLUSTER_NAME",
Usage: "Cluster name will appear as dimension",
},
cli.DurationFlag{
Name: "send-interval",
EnvVar: "SEND_INTERVAL",
Value: time.Second * 30,
Usage: "Rate at which data is queried from cAdvisor and send to SignalFx",
},
cli.IntFlag{
Name: "cadvisor-port",
EnvVar: "CADVISOR_PORT",
Value: 4194,
Usage: "Port on which cAdvisor listens",
},
cli.DurationFlag{
Name: "discovery-interval",
EnvVar: "NODE_SERVICE_DISCOVERY_INTERVAL",
Value: time.Minute * 5,
Usage: "Rate at which nodes and services will be rediscovered",
},
cli.StringFlag{
Name: "kube-user",
EnvVar: "KUBE_USER",
Usage: "Username to authenticate to kubernetes api",
},
cli.StringFlag{
Name: "kube-pass",
EnvVar: "KUBE_PASS",
Usage: "Password to authenticate to kubernetes api",
},
cli.IntFlag{
Name: "max-datapoints",
EnvVar: "MAX_DATAPOINTS",
Value: 50,
Usage: "How many datapoints to batch before forwarding to SignalFX",
},
},
})
}
func setupRun(c *cli.Context) error {
sfxAPIToken = c.String("sfx-api-token")
if sfxAPIToken == "" {
cli.ShowAppHelp(c)
log.Fatal("API token is required")
}
clusterName = c.String("cluster-name")
if clusterName == "" {
cli.ShowAppHelp(c)
log.Fatal("cluster name is required")
}
sfxIngestURL = c.String("sfx-ingest-url")
sendInterval = c.Duration("send-interval")
cadvisorPort = c.Int("cadvisor-port")
discoveryInterval = c.Duration("discovery-interval")
kubeUser = c.String("kube-user")
kubePass = c.String("kube-pass")
if kubeUser == "" || kubePass == "" {
cli.ShowAppHelp(c)
log.Fatal("kubernetes credentials are required")
}
maxDatapoints = c.Int("max-datapoints")
return nil
}
func run(c *cli.Context) {
s := scraper.New(
newSfxClient(sfxIngestURL, sfxAPIToken),
scraper.Config{
ClusterName: clusterName,
CadvisorPort: cadvisorPort,
KubeUser: kubeUser,
KubePass: kubePass,
MaxDatapoints: maxDatapoints,
})
if err := s.Run(sendInterval, discoveryInterval); err != nil {
log.WithError(err).Fatal("failure")
}
}
func newSfxClient(ingestURL, authToken string) *signalfx.Forwarder {
sfxEndpoint, err := url.Parse(ingestURL)
if err != nil {
panic("failed to parse SFX ingest URL")
}
return signalfx.NewSignalfxJSONForwarder(sfxEndpoint.String(), time.Second*10, authToken, 10, "", "", "")
}
| michaeltrobinson/cadvisor-integration | cmd/signalfx-cadvisord/run.go | GO | apache-2.0 | 3,432 |
/*
* Copyright (c) 2012. Piraso Alvin R. de Leon. All Rights Reserved.
*
* See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The Piraso licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.piraso.server.sql;
import org.piraso.server.AbstractContextLoggerBeanProcessor;
import javax.sql.DataSource;
/**
* Create a bean post processor which ensures that any bean instance of type {@link DataSource} will
* be wrap by a context logger aware instance.
*
*/
public class SQLContextLoggerBeanPostProcessor extends AbstractContextLoggerBeanProcessor<DataSource> {
public SQLContextLoggerBeanPostProcessor() {
super(DataSource.class);
}
@Override
public DataSource createProxy(DataSource o, String id) {
return SQLContextLogger.create(o, id);
}
}
| piraso/piraso-sql | context-logger/server/src/main/java/org/piraso/server/sql/SQLContextLoggerBeanPostProcessor.java | Java | apache-2.0 | 1,383 |
package org.lionsoul.jcseg.util;
import java.io.Serializable;
/**
* string buffer class
*
* @author chenxin<chenxin619315@gmail.com>
*/
public class IStringBuffer implements Serializable
{
private static final long serialVersionUID = 1L;
/**
* buffer char array.
*/
private char buff[];
private int count;
/**
* create a buffer with a default length 16
*/
public IStringBuffer()
{
this(16);
}
/**
* create a buffer with a specified length
*
* @param length
*/
public IStringBuffer( int length )
{
if ( length <= 0 ) {
throw new IllegalArgumentException("length <= 0");
}
buff = new char[length];
count = 0;
}
/**
* create a buffer with a specified string
*
* @param str
*/
public IStringBuffer( String str )
{
this(str.length()+16);
append(str);
}
/**
* resize the buffer
* this will have to copy the old chars from the old buffer to the new buffer
*
* @param length
*/
private void resizeTo( int length )
{
if ( length <= 0 )
throw new IllegalArgumentException("length <= 0");
if ( length != buff.length ) {
int len = ( length > buff.length ) ? buff.length : length;
//System.out.println("resize:"+length);
char[] obuff = buff;
buff = new char[length];
/*for ( int j = 0; j < len; j++ ) {
buff[j] = obuff[j];
}*/
System.arraycopy(obuff, 0, buff, 0, len);
}
}
/**
* append a string to the buffer
*
* @param str string to append to
*/
public IStringBuffer append( String str )
{
if ( str == null )
throw new NullPointerException();
//check the necessary to resize the buffer.
if ( count + str.length() > buff.length ) {
resizeTo( (count + str.length()) * 2 + 1 );
}
for ( int j = 0; j < str.length(); j++ ) {
buff[count++] = str.charAt(j);
}
return this;
}
/**
* append parts of the chars to the buffer
*
* @param chars
* @param start the start index
* @param length length of chars to append to
*/
public IStringBuffer append( char[] chars, int start, int length )
{
if ( chars == null )
throw new NullPointerException();
if ( start < 0 )
throw new IndexOutOfBoundsException();
if ( length <= 0 )
throw new IndexOutOfBoundsException();
if ( start + length > chars.length )
throw new IndexOutOfBoundsException();
//check the necessary to resize the buffer.
if ( count + length > buff.length ) {
resizeTo( (count + length) * 2 + 1 );
}
for ( int j = 0; j < length; j++ ) {
buff[count++] = chars[start+j];
}
return this;
}
/**
* append the rest of the chars to the buffer
*
* @param chars
* @param start the start index
* @return IStringBuffer
*
*/
public IStringBuffer append( char[] chars, int start )
{
append(chars, start, chars.length - start);
return this;
}
/**
* append some chars to the buffer
*
* @param chars
*/
public IStringBuffer append( char[] chars )
{
return append(chars, 0, chars.length);
}
/**
* append a char to the buffer
*
* @param c the char to append to
*/
public IStringBuffer append( char c )
{
if ( count == buff.length ) {
resizeTo( buff.length * 2 + 1 );
}
buff[count++] = c;
return this;
}
/**
* append a boolean value
*
* @param bool
*/
public IStringBuffer append(boolean bool)
{
String str = bool ? "true" : "false";
return append(str);
}
/**
* append a short value
*
* @param shortv
*/
public IStringBuffer append(short shortv)
{
return append(String.valueOf(shortv));
}
/**
* append a int value
*
* @param intv
*/
public IStringBuffer append(int intv)
{
return append(String.valueOf(intv));
}
/**
* append a long value
*
* @param longv
*/
public IStringBuffer append(long longv)
{
return append(String.valueOf(longv));
}
/**
* append a float value
*
* @param floatv
*/
public IStringBuffer append(float floatv)
{
return append(String.valueOf(floatv));
}
/**
* append a double value
*
* @param doublev
*/
public IStringBuffer append(double doublev)
{
return append(String.valueOf(doublev));
}
/**
* return the length of the buffer
*
* @return int the length of the buffer
*/
public int length()
{
return count;
}
/**
* set the length of the buffer
* actually it just override the count and the actual buffer
* has nothing changed
*
* @param length
*/
public int setLength(int length)
{
int oldCount = count;
count = length;
return oldCount;
}
/**
* get the char at a specified position in the buffer
*/
public char charAt( int idx )
{
if ( idx < 0 )
throw new IndexOutOfBoundsException("idx{"+idx+"} < 0");
if ( idx >= count )
throw new IndexOutOfBoundsException("idx{"+idx+"} >= buffer.length");
return buff[idx];
}
/**
* always return the last char
*
* @return char
*/
public char last()
{
if ( count == 0 ) {
throw new IndexOutOfBoundsException("Empty buffer");
}
return buff[count-1];
}
/**
* always return the first char
*
* @return char
*/
public char first()
{
if ( count == 0 ) {
throw new IndexOutOfBoundsException("Empty buffer");
}
return buff[0];
}
/**
* delete the char at the specified position
*/
public IStringBuffer deleteCharAt( int idx )
{
if ( idx < 0 )
throw new IndexOutOfBoundsException("idx < 0");
if ( idx >= count )
throw new IndexOutOfBoundsException("idx >= buffer.length");
//here we got a bug for j < count
//change over it to count - 1
//thanks for the feedback of xuyijun@gmail.com
//@date 2013-08-22
for ( int j = idx; j < count - 1; j++ ) {
buff[j] = buff[j+1];
}
count--;
return this;
}
/**
* set the char at the specified index
*
* @param idx
* @param chr
*/
public void set(int idx, char chr)
{
if ( idx < 0 )
throw new IndexOutOfBoundsException("idx < 0");
if ( idx >= count )
throw new IndexOutOfBoundsException("idx >= buffer.length");
buff[idx] = chr;
}
/**
* return the chars of the buffer
*
* @return char[]
*/
public char[] buffer()
{
return buff;
}
/**
* clear the buffer by reset the count to 0
*/
public IStringBuffer clear()
{
count = 0;
return this;
}
/**
* return the string of the current buffer
*
* @return String
* @see Object#toString()
*/
public String toString()
{
return new String(buff, 0, count);
}
}
| lionsoul2014/jcseg | jcseg-core/src/main/java/org/lionsoul/jcseg/util/IStringBuffer.java | Java | apache-2.0 | 7,966 |
package net.happybrackets.core.control;
import com.google.gson.Gson;
import de.sciss.net.OSCMessage;
import net.happybrackets.core.Device;
import net.happybrackets.core.OSCVocabulary;
import net.happybrackets.core.scheduling.HBScheduler;
import net.happybrackets.core.scheduling.ScheduledEventListener;
import net.happybrackets.core.scheduling.ScheduledObject;
import net.happybrackets.device.HB;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.*;
/**
* This class facilitates sending message values between sketches,
* devices, and a graphical environment.
* The values can be represented as sliders, text boxes, check boxes, and buttons
*
* A message can either be an integer, a double, a string, a boolean, a trigger or a complete class.
*
* Although similar to the send and receive objects in Max in that the name and type
* parameter of the {@link DynamicControl} determines message interconnection,
* DynamicControls also have an attribute called {@link ControlScope}, which dictates how far (in
* a topological sense) the object can reach in order to communicate with other
* DynamicControls. DynamicControls can be bound to different objects, the default being the class that instantiated it.
*
* <br>The classes are best accessed through {@link DynamicControlParent} abstractions
*
*/
public class DynamicControl implements ScheduledEventListener {
static Gson gson = new Gson();
// flag for testing
static boolean ignoreName = false;
private boolean isPersistentControl = false;
/**
* Set ignore name for testing
* @param ignore true to ignore
*/
static void setIgnoreName(boolean ignore){
ignoreName = true;
}
static int deviceSendId = 0; // we will use this to number all messages we send. They can be filtered at receiver by testing last message mapped
/**
* Define a list of target devices. Can be either device name or IP address
* If it is a device name, there will be a lookup of stored device names
*/
Set<String> targetDevices = new HashSet<>();
// we will map Message ID to device name. If the last ID is in this map, we will ignore message
static Map<String, Integer> messageIdMap = new Hashtable<>();
/**
* See if we will process a control message based on device name and message_id
* If the message_id is mapped against the device_name, ignore message, otherwise store mapping and return true;
* @param device_name the device name
* @param message_id the message_id
* @return true if we are going to process this message
*/
public static boolean enableProcessControlMessage(String device_name, int message_id){
boolean ret = true;
if (messageIdMap.containsKey(device_name)) {
if (messageIdMap.get(device_name) == message_id) {
ret = false;
}
}
if (ret){
messageIdMap.put(device_name, message_id);
}
return ret;
}
// The device name that set last message to this control
// A Null value will indicate that it was this device
String sendingDevice = null;
/**
* Get the name of the device that sent the message. If the message was local, will return this device name
* @return name of device that sent message
*/
public String getSendingDevice(){
String ret = sendingDevice;
if (ret == null) {
ret = deviceName;
}
return ret;
}
/**
* Define how we want the object displayed in the plugin
*/
public enum DISPLAY_TYPE {
DISPLAY_DEFAULT,
DISPLAY_HIDDEN,
DISPLAY_DISABLED,
DISPLAY_ENABLED_BUDDY,
DISPLAY_DISABLED_BUDDY
}
/**
* Return all mapped device addresses for this control
* @return returns the set of mapped targeted devices
*/
public Set<String> getTargetDeviceAddresses(){
return targetDevices;
}
@Override
public void doScheduledEvent(double scheduledTime, Object param) {
FutureControlMessage message = (FutureControlMessage) param;
this.objVal = message.controlValue;
this.executionTime = 0;
this.sendingDevice = message.sourceDevice;
notifyLocalListeners();
if (!message.localOnly) {
notifyValueSetListeners();
}
synchronized (futureMessageListLock) {
futureMessageList.remove(message);
}
}
/**
* Add one or more device names or addresses as strings to use in {@link ControlScope#TARGET} Message
* @param deviceNames device name or IP Address
*/
public synchronized void addTargetDevice(String... deviceNames){
for (String name:
deviceNames) {
targetDevices.add(name);
}
}
/**
* Remove all set target devices and replace with the those provided as arguments
* Adds device address as a string or device name to {@link ControlScope#TARGET} Message
* @param deviceNames device name or IP Address
*/
public synchronized void setTargetDevice(String... deviceNames){
targetDevices.clear();
addTargetDevice(deviceNames);
}
/**
* Remove all set target devices and replace with the those provided as arguments
* Adds device addresses to {@link ControlScope#TARGET} Message
* @param inetAddresses device name or IP Address
*/
public synchronized void setTargetDevice(InetAddress... inetAddresses){
targetDevices.clear();
addTargetDevice(inetAddresses);
}
/**
* Add one or more device {@link InetAddress} for use in {@link ControlScope#TARGET} Message
* @param inetAddresses the target addresses to add
*/
public void addTargetDevice(InetAddress... inetAddresses){
for (InetAddress address:
inetAddresses) {
targetDevices.add(address.getHostAddress());
}
}
/**
* Clear all devices as Targets
*/
public synchronized void clearTargetDevices(){
targetDevices.clear();
}
/**
* Remove one or more device names or addresses as a string.
* For use in {@link ControlScope#TARGET} Messages
* @param deviceNames device names or IP Addresses to remove
*/
public synchronized void removeTargetDevice(String... deviceNames){
for (String name:
deviceNames) {
targetDevices.remove(name);
}
}
/**
* Remove one or more {@link InetAddress} for use in {@link ControlScope#TARGET} Message
* @param inetAddresses the target addresses to remove
*/
public void removeTargetDevice(InetAddress... inetAddresses){
for (InetAddress address:
inetAddresses) {
targetDevices.remove(address.getHostAddress());
}
}
/**
* Create an Interface to listen to
*/
public interface DynamicControlListener {
void update(DynamicControl control);
}
public interface ControlScopeChangedListener {
void controlScopeChanged(ControlScope new_scope);
}
/**
* The way Create Messages are sent
*/
private enum CREATE_MESSAGE_ARGS {
DEVICE_NAME,
MAP_KEY,
CONTROL_NAME,
PARENT_SKETCH_NAME,
PARENT_SKETCH_ID,
CONTROL_TYPE,
OBJ_VAL,
MIN_VAL,
MAX_VAL,
CONTROL_SCOPE,
DISPLAY_TYPE_VAL
}
// Define the Arguments used in an Update message
private enum UPDATE_MESSAGE_ARGS {
DEVICE_NAME,
CONTROL_NAME,
CONTROL_TYPE,
MAP_KEY,
OBJ_VAL,
CONTROL_SCOPE,
DISPLAY_TYPE_VAL,
MIN_VALUE,
MAX_VALUE
}
// Define Global Message arguments
public enum NETWORK_TRANSMIT_MESSAGE_ARGS {
DEVICE_NAME,
CONTROL_NAME,
CONTROL_TYPE,
OBJ_VAL,
EXECUTE_TIME_MLILI_MS, // Most Significant Int of Milliseconds - stored as int
EXECUTE_TIME_MLILI_LS, // Least Significant Bit of Milliseconds - stored as int
EXECUTE_TIME_NANO, // Number on Nano Seconds - stored as int
MESSAGE_ID // we will increment an integer and send the message multiple times. We will ignore message if last message was this one
}
// Define Device Name Message arguments
private enum DEVICE_NAME_ARGS {
DEVICE_NAME
}
// Define where our first Array type global dynamic control message is in OSC
final static int OSC_TRANSMIT_ARRAY_ARG = NETWORK_TRANSMIT_MESSAGE_ARGS.MESSAGE_ID.ordinal() + 1;
// When an event is scheduled in the future, we will create one of these and schedule it
class FutureControlMessage{
/**
* Create a Future Control message
* @param source_device the source device name
* @param value the value to be executed
* @param execution_time the time the value needs to be executed
*/
public FutureControlMessage(String source_device, Object value, double execution_time){
sourceDevice = source_device;
controlValue = value;
executionTime = execution_time;
}
Object controlValue;
double executionTime;
boolean localOnly = false; // if we are local only, we will not sendValue changed listeners
String sourceDevice;
/// have a copy of our pending scheduled object in case we want to cancel it
ScheduledObject pendingSchedule = null;
}
static ControlMap controlMap = ControlMap.getInstance();
private static final Object controlMapLock = new Object();
private static int instanceCounter = 0; // we will use this to order the creation of our objects and give them a unique number on device
private final Object instanceCounterLock = new Object();
private final Object valueChangedLock = new Object();
private final String controlMapKey;
private List<DynamicControlListener> controlListenerList = new ArrayList<>();
private List<DynamicControlListener> globalControlListenerList = new ArrayList<>();
private List<ControlScopeChangedListener> controlScopeChangedList = new ArrayList<>();
private List<FutureControlMessage> futureMessageList = new ArrayList<>();
// This listener is only called when value on control set
private List<DynamicControlListener> valueSetListenerList = new ArrayList<>();
// Create Object to lock shared resources
private final Object controlScopeChangedLock = new Object();
private final Object controlListenerLock = new Object();
private final Object globalListenerLock = new Object();
private final Object valueSetListenerLock = new Object();
private final Object futureMessageListLock = new Object();
static boolean disableScheduler = false; // set flag if we are going to disable scheduler - eg, in GUI
/**
* Create the text we will display at the beginning of tooltip
* @param tooltipPrefix The starting text of the tooltip
* @return this object
*/
public DynamicControl setTooltipPrefix(String tooltipPrefix) {
this.tooltipPrefix = tooltipPrefix;
return this;
}
private String tooltipPrefix = "";
// The Object sketch that this control was created in
private Object parentSketch = null;
final int parentId;
private final String deviceName;
private String parentSketchName;
private ControlType controlType;
final String controlName;
private ControlScope controlScope = ControlScope.SKETCH;
private Object objVal = 0;
private Object maximumDisplayValue = 0;
private Object minimumDisplayValue = 0;
// This is the time we want to execute the control value
private double executionTime = 0;
DISPLAY_TYPE displayType = DISPLAY_TYPE.DISPLAY_DEFAULT; // Whether the control is displayType on control Screen
/**
* Set whether we disable setting all values in context of scheduler
* @param disabled set true to disable
*/
public static void setDisableScheduler(boolean disabled){
disableScheduler = disabled;
}
/**
* Whether we disable the control on the screen
* @return How we will disable control on screen
*/
public DISPLAY_TYPE getDisplayType(){
return displayType;
}
/**
* Set how we will display control object on the screen
* @param display_type how we will display control
* @return this
*/
public DynamicControl setDisplayType(DISPLAY_TYPE display_type){
displayType = display_type;
notifyValueSetListeners();
//notifyLocalListeners();
return this;
}
/**
* Returns the JVM execution time we last used when we set the value
* @return lastExecution time set
*/
public double getExecutionTime(){
return executionTime;
}
/**
* Convert a float or int into required number type based on control. If not a FLOAT or INT, will just return value
* @param control_type the control type
* @param source_value the value we want
* @return the converted value
*/
static private Object convertValue (ControlType control_type, Object source_value) {
Object ret = source_value;
// Convert if we are a float control
if (control_type == ControlType.FLOAT) {
if (source_value == null){
ret = 0.0;
}else if (source_value instanceof Integer) {
Integer i = (Integer) source_value;
double f = i.doubleValue();
ret = f;
}else if (source_value instanceof Double) {
Double d = (Double) source_value;
ret = d;
}else if (source_value instanceof Long) {
Long l = (Long) source_value;
double f = l.doubleValue();
ret = f;
} else if (source_value instanceof Float) {
double f = (Float) source_value;
ret = f;
} else if (source_value instanceof String) {
double f = Double.parseDouble((String)source_value);
ret = f;
}
// Convert if we are an int control
} else if (control_type == ControlType.INT) {
if (source_value == null){
ret = 0;
}else if (source_value instanceof Float) {
Float f = (Float) source_value;
Integer i = f.intValue();
ret = i;
}else if (source_value instanceof Double) {
Double d = (Double) source_value;
Integer i = d.intValue();
ret = i;
}else if (source_value instanceof Long) {
Long l = (Long) source_value;
Integer i = l.intValue();
ret = i;
}
// Convert if we are a BOOLEAN control
} else if (control_type == ControlType.BOOLEAN) {
if (source_value == null){
ret = 0;
}if (source_value instanceof Integer) {
Integer i = (Integer) source_value;
Boolean b = i != 0;
ret = b;
}else if (source_value instanceof Long) {
Long l = (Long) source_value;
Integer i = l.intValue();
Boolean b = i != 0;
ret = b;
}
// Convert if we are a TRIGGER control
}else if (control_type == ControlType.TRIGGER) {
if (source_value == null) {
ret = System.currentTimeMillis();
}
// Convert if we are a TEXT control
}else if (control_type == ControlType.TEXT) {
if (source_value == null) {
ret = "";
}
}
return ret;
}
/**
* Get the Sketch or class object linked to this control
* @return the parentSketch or Object
*/
public Object getParentSketch() {
return parentSketch;
}
/**
* This is a private constructor used to initialise constant attributes of this object
*
* @param parent_sketch the object calling - typically this
* @param control_type The type of control you want to create
* @param name The name we will give to differentiate between different controls in this class
* @param initial_value The initial value of the control
* @param display_type how we want to display the object
*
*/
private DynamicControl(Object parent_sketch, ControlType control_type, String name, Object initial_value, DISPLAY_TYPE display_type) {
if (parent_sketch == null){
parent_sketch = new Object();
}
displayType = display_type;
parentSketch = parent_sketch;
parentSketchName = parent_sketch.getClass().getName();
controlType = control_type;
controlName = name;
objVal = convertValue (control_type, initial_value);
parentId = parent_sketch.hashCode();
deviceName = Device.getDeviceName();
synchronized (instanceCounterLock) {
controlMapKey = Device.getDeviceName() + instanceCounter;
instanceCounter++;
}
}
/**
* Ascertain the Control Type based on the Value
* @param value the value we are obtaing a control value from
* @return a control type
*/
public static ControlType getControlType(Object value){
ControlType ret = ControlType.OBJECT;
if (value == null){
ret = ControlType.TRIGGER;
}
else if (value instanceof Float || value instanceof Double){
ret = ControlType.FLOAT;
}
else if (value instanceof Boolean){
ret = ControlType.BOOLEAN;
}
else if (value instanceof String){
ret = ControlType.TEXT;
}
else if (value instanceof Integer || value instanceof Long){
ret = ControlType.INT;
}
return ret;
}
/**
* A dynamic control that can be accessed from outside this sketch
* it is created with the sketch object that contains it along with the type
*
* @param name The name we will give to associate it with other DynamicControls with identical ControlScope and type.
* @param initial_value The initial value of the control
*/
public DynamicControl(String name, Object initial_value) {
this(new Object(), getControlType(initial_value), name, initial_value, DISPLAY_TYPE.DISPLAY_DEFAULT);
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* A dynamic control that can be accessed from outside this sketch
* it is created with the sketch object that contains it along with the type
*
* @param control_type The type of control message you want to send
* @param name The name we will give to associate it with other DynamicControls with identical ControlScope and type.
* @param initial_value The initial value of the control
*/
public DynamicControl(ControlType control_type, String name, Object initial_value) {
this(new Object(), control_type, name, initial_value, DISPLAY_TYPE.DISPLAY_DEFAULT);
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* A dynamic control that can be accessed from outside this sketch
* it is created with the sketch object that contains it along with the type
* @param parent_sketch the object calling - typically this, however, you can use any class object
* @param control_type The type of control message you want to send
* @param name The name we will give to associate it with other DynamicControls with identical ControlScope and type.
*/
public DynamicControl(Object parent_sketch, ControlType control_type, String name) {
this(parent_sketch, control_type, name, null, DISPLAY_TYPE.DISPLAY_DEFAULT);
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* A dynamic control that can be accessed from outside this sketch
* it is created with the sketch object that contains it along with the type
*
* @param control_type The type of control message you want to send
* @param name The name we will give to associate it with other DynamicControls with identical ControlScope and type.
*/
public DynamicControl(ControlType control_type, String name) {
this(new Object(), control_type, name, convertValue(control_type, null), DISPLAY_TYPE.DISPLAY_DEFAULT);
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* A dynamic control that can be accessed from outside this sketch
* it is created with the sketch object that contains it along with the type
*
* @param parent_sketch the object calling - typically this, however, you can use any class object
* @param control_type The type of control message you want to send
* @param name The name we will give to associate it with other DynamicControls with identical ControlScope and type.
* @param initial_value The initial value of the control
*/
public DynamicControl(Object parent_sketch, ControlType control_type, String name, Object initial_value) {
this(parent_sketch, control_type, name, initial_value, DISPLAY_TYPE.DISPLAY_DEFAULT);
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* Set this control as a persistentSimulation control so it does not get removed on reset
* @return this
*/
public DynamicControl setPersistentController(){
controlMap.addPersistentControl(this);
isPersistentControl = true;
return this;
}
/**
* See if control is a persistent control
* @return true if a simulator control
*/
public boolean isPersistentControl() {
return isPersistentControl;
}
/**
* A dynamic control that can be accessed from outside
* it is created with the sketch object that contains it along with the type
*
* @param parent_sketch the object calling - typically this, however, you can use any class object
* @param control_type The type of control message you want to send
* @param name The name we will give to associate it with other DynamicControls with identical ControlScope and type.
* @param initial_value The initial value of the control
* @param min_value The minimum display value of the control. Only used for display purposes
* @param max_value The maximum display value of the control. Only used for display purposes
*/
public DynamicControl(Object parent_sketch, ControlType control_type, String name, Object initial_value, Object min_value, Object max_value) {
this(parent_sketch, control_type, name, initial_value, DISPLAY_TYPE.DISPLAY_DEFAULT);
minimumDisplayValue = convertValue (control_type, min_value);
maximumDisplayValue = convertValue (control_type, max_value);
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* A dynamic control that can be accessed from outside
* it is created with the sketch object that contains it along with the type
*
* @param parent_sketch the object calling - typically this, however, you can use any class object
* @param control_type The type of control message you want to send
* @param name The name we will give to associate it with other DynamicControls with identical ControlScope and type.
* @param initial_value The initial value of the control
* @param min_value The minimum display value of the control. Only used for display purposes
* @param max_value The maximum display value of the control. Only used for display purposes
* @param display_type The way we want the control displayed
*/
public DynamicControl(Object parent_sketch, ControlType control_type, String name, Object initial_value, Object min_value, Object max_value, DISPLAY_TYPE display_type) {
this(parent_sketch, control_type, name, initial_value, display_type);
minimumDisplayValue = convertValue (control_type, min_value);
maximumDisplayValue = convertValue (control_type, max_value);
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* A dynamic control that can be accessed from outside
* it is created with the sketch object that contains it along with the type
*
* @param control_type The type of control message you want to send
* @param name The name we will give to associate it with other DynamicControls with identical ControlScope and type.
* @param initial_value The initial value of the control
* @param min_value The minimum display value of the control. Only used for display purposes
* @param max_value The maximum display value of the control. Only used for display purposes
*/
public DynamicControl(ControlType control_type, String name, Object initial_value, Object min_value, Object max_value) {
this(new Object(), control_type, name, initial_value, DISPLAY_TYPE.DISPLAY_DEFAULT);
minimumDisplayValue = convertValue (control_type, min_value);
maximumDisplayValue = convertValue (control_type, max_value);
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* Get the type of control we want
* @return The type of value this control is
*/
public ControlType getControlType(){
return controlType;
}
/**
* Get the scope of this control. Can be Sketch, Class, Device, or global
* @return The Scope
*/
public ControlScope getControlScope(){
return controlScope;
}
/**
* Changed the scope that the control has. It will update control map so the correct events will be generated based on its scope
* @param new_scope The new Control Scope
* @return this object
*/
public synchronized DynamicControl setControlScope(ControlScope new_scope)
{
ControlScope old_scope = controlScope;
if (old_scope != new_scope) {
controlScope = new_scope;
notifyValueSetListeners();
// prevent control scope from changing the value
//notifyLocalListeners();
notifyControlChangeListeners();
}
return this;
}
/**
* Get the Dynamic control based on Map key
*
* @param map_key the string that we are using as the key
* @return the Object associated with this control
*/
public static DynamicControl getControl(String map_key) {
DynamicControl ret = null;
synchronized (controlMapLock) {
ret = controlMap.getControl(map_key);
}
return ret;
}
/**
* Update the parameters of this control with another. This would have been caused by an object having other than SKETCH control scope
* If the parameters are changed, this object will notify it's listeners that a change has occurred
* @param mirror_control The control that we are copying from
* @return this object
*/
public DynamicControl updateControl(DynamicControl mirror_control){
if (mirror_control != null) {
// first check our scope and type are the same
boolean scope_matches = getControlScope() == mirror_control.getControlScope() && getControlType() == mirror_control.getControlType();
if (scope_matches)
{
// Now we need to check whether the scope matches us
if (getControlScope() == ControlScope.SKETCH)
{
scope_matches = this.parentSketch == mirror_control.parentSketch && this.parentSketch != null;
}
// Now we need to check whether the scope matches us
else if (getControlScope() == ControlScope.CLASS)
{
scope_matches = this.parentSketchName.equals(mirror_control.parentSketchName);
}
else if (getControlScope() == ControlScope.DEVICE){
scope_matches = this.deviceName.equals(mirror_control.deviceName);
}
else if (getControlScope() == ControlScope.TARGET){
// check if our mirror has this address
scope_matches = mirror_control.targetsThisDevice();
}
// Otherwise it must be global. We have a match
}
if (scope_matches) {
// do not use setters as we only want to generate one notifyLocalListeners
boolean changed = false;
if (mirror_control.executionTime <= 0.0) { // his needs to be done now
if (!objVal.equals(mirror_control.objVal)) {
//objVal = mirror_control.objVal; // let this get done inside the scheduleValue return
changed = true;
}
if (changed) {
scheduleValue(null, mirror_control.objVal, 0);
}
}
else
{
scheduleValue(null, mirror_control.objVal, mirror_control.executionTime);
}
}
}
return this;
}
/**
* Check whether this device is targeted by checking the loopback, localhost and devicenames
* @return
*/
private boolean targetsThisDevice() {
boolean ret = false;
String device_name = Device.getDeviceName();
String loopback = InetAddress.getLoopbackAddress().getHostAddress();
for (String device:
targetDevices) {
if (device_name.equalsIgnoreCase(device)){
return true;
}
if (device_name.equalsIgnoreCase(loopback)){
return true;
}
try {
if (InetAddress.getLocalHost().getHostAddress().equalsIgnoreCase(device)){
return true;
}
} catch (UnknownHostException e) {
//e.printStackTrace();
}
}
return ret;
}
/**
* Schedule this control to change its value in context of scheduler
* @param source_device the device name that was the source of this message - can be null
* @param value the value to send
* @param execution_time the time it needs to be executed
* @param local_only if true, will not send value changed to notifyValueSetListeners
*/
void scheduleValue(String source_device, Object value, double execution_time, boolean local_only){
// We need to convert the Object value into the exact type. EG, integer must be cast to boolean if that is thr control type
Object converted_value = convertValue(controlType, value);
if (disableScheduler || execution_time == 0){
this.objVal = converted_value;
this.executionTime = 0;
this.sendingDevice = source_device;
notifyLocalListeners();
if (!local_only) {
notifyValueSetListeners();
}
}
else {
FutureControlMessage message = new FutureControlMessage(source_device, converted_value, execution_time);
message.localOnly = local_only;
message.pendingSchedule = HBScheduler.getGlobalScheduler().addScheduledObject(execution_time, message, this);
synchronized (futureMessageListLock) {
futureMessageList.add(message);
}
}
}
/**
* Schedule this control to send a value to it's locallisteners at a scheduled time. Will also notify valueListeners (eg GUI controls)
* @param source_device the device name that was the source of this message - can be null
* @param value the value to send
* @param execution_time the time it needs to be executed
*/
void scheduleValue(String source_device, Object value, double execution_time) {
scheduleValue(source_device, value, execution_time, false);
}
/**
* Process the DynamicControl deviceName message and map device name to IPAddress
* We ignore our own device
* @param src_address The address of the device
* @param msg The OSC Message that has device name
*/
public static void processDeviceNameMessage(InetAddress src_address, OSCMessage msg) {
// do some error checking here
if (src_address != null) {
String device_name = (String) msg.getArg(DEVICE_NAME_ARGS.DEVICE_NAME.ordinal());
try {
if (!Device.getDeviceName().equalsIgnoreCase(device_name)) {
HB.HBInstance.addDeviceAddress(device_name, src_address);
}
}
catch(Exception ex){}
}
}
/**
* Process the DynamicControl deviceRequest message
* Send a deviceName back to src. Test that their name is mapped correctly
* If name is not mapped we will request from all devices globally
* @param src_address The address of the device
* @param msg The OSC Message that has device name
*/
public static void processRequestNameMessage(InetAddress src_address, OSCMessage msg) {
String device_name = (String) msg.getArg(DEVICE_NAME_ARGS.DEVICE_NAME.ordinal());
// ignore ourself
if (!Device.getDeviceName().equalsIgnoreCase(device_name)) {
// send them our message
OSCMessage nameMessage = buildDeviceNameMessage();
ControlMap.getInstance().sendGlobalDynamicControlMessage(nameMessage, null);
// See if we have them mapped the same
boolean address_changed = HB.HBInstance.addDeviceAddress(device_name, src_address);
if (address_changed){
// request all
postRequestNamesMessage();
}
}
}
/**
* Post a request device name message to other devices so we can target them specifically and update our map
*/
public static void postRequestNamesMessage(){
OSCMessage requestMessage = buildDeviceRequestNameMessage();
ControlMap.getInstance().sendGlobalDynamicControlMessage(requestMessage, null);
}
/**
* Build OSC Message that gives our device name
* @return OSC Message that has name
*/
public static OSCMessage buildDeviceNameMessage(){
return new OSCMessage(OSCVocabulary.DynamicControlMessage.DEVICE_NAME,
new Object[]{
Device.getDeviceName(),
});
}
/**
* Build OSC Message that requests devices send us their name
* @return OSC Message to request name
*/
public static OSCMessage buildDeviceRequestNameMessage(){
return new OSCMessage(OSCVocabulary.DynamicControlMessage.REQUEST_NAME,
new Object[]{
Device.getDeviceName(),
});
}
/**
* Convert two halves of a long stored integer values into a long value
* @param msi most significant integer
* @param lsi least significant integer
* @return a long value consisting of the concatenation of both int values
*/
public static long integersToLong(int msi, int lsi){
return (long) msi << 32 | lsi & 0xFFFFFFFFL;
}
/**
* Convert a long into two integers in an array of two integers
* @param l_value the Long values that needs to be encoded
* @return an array of two integers. ret[0] will be most significant integer while int [1] will be lease significant
*/
public static int [] longToIntegers (long l_value){
int msi = (int) (l_value >> 32); // this is most significant integer
int lsi = (int) l_value; // This is LSB that has been trimmed down;
return new int[]{msi, lsi};
}
// We will create a single array that we can cache the size of an array of ints for scheduled time
// This is used in numberIntsForScheduledTime
private static int [] intArrayCache = null;
/**
* Return the array size of Integers that would be required to encode a scheduled time
* @return the Array
*/
public static int numberIntsForScheduledTime(){
if (intArrayCache == null) {
intArrayCache = scheduleTimeToIntegers(0);
}
return intArrayCache.length;
}
/**
* Convert a SchedulerTime into integers in an array of three integers
* @param d_val the double values that needs to be encoded
* @return an array of three integers. ret[0] will be most significant integer while int [1] will be lease significant. int [2] is the number of nano seconds
*/
public static int [] scheduleTimeToIntegers (double d_val){
long lval = (long)d_val;
int msi = (int) (lval >> 32); // this is most significant integer
int lsi = (int) lval; // This is LSB that has been trimmed down;
double nano = d_val - lval;
nano *= 1000000;
int n = (int) nano;
return new int[]{msi, lsi, n};
}
/**
* Convert three integers to a double representing scheduler time
* @param msi the most significant value of millisecond value
* @param lsi the least significant value of millisecond value
* @param nano the number of nanoseconds
* @return a double representing the scheduler time
*/
public static double integersToScheduleTime(int msi, int lsi, int nano){
long milliseconds = integersToLong(msi, lsi);
double ret = milliseconds;
double nanoseconds = nano;
return ret + nanoseconds / 1000000d;
}
/**
* Process the {@link ControlScope#GLOBAL} or {@link ControlScope#TARGET} Message from an OSC Message. Examine buildUpdateMessage for parameters inside Message
* We will not process messages that have come from this device because they will be actioned through local listeners
* @param msg OSC message with new value
* @param controlScope the type of {@link ControlScope};
*/
public static void processOSCControlMessage(OSCMessage msg, ControlScope controlScope) {
String device_name = (String) msg.getArg(NETWORK_TRANSMIT_MESSAGE_ARGS.DEVICE_NAME.ordinal());
int message_id = (int)msg.getArg(NETWORK_TRANSMIT_MESSAGE_ARGS.MESSAGE_ID.ordinal());
// Make sure we ignore messages from this device
if (ignoreName || !device_name.equals(Device.getDeviceName())) {
if (enableProcessControlMessage(device_name, message_id)) {
String control_name = (String) msg.getArg(NETWORK_TRANSMIT_MESSAGE_ARGS.CONTROL_NAME.ordinal());
ControlType control_type = ControlType.values()[(int) msg.getArg(NETWORK_TRANSMIT_MESSAGE_ARGS.CONTROL_TYPE.ordinal())];
Object obj_val = msg.getArg(NETWORK_TRANSMIT_MESSAGE_ARGS.OBJ_VAL.ordinal());
Object ms_max = msg.getArg(NETWORK_TRANSMIT_MESSAGE_ARGS.EXECUTE_TIME_MLILI_MS.ordinal());
Object ms_min = msg.getArg(NETWORK_TRANSMIT_MESSAGE_ARGS.EXECUTE_TIME_MLILI_LS.ordinal());
Object nano = msg.getArg(NETWORK_TRANSMIT_MESSAGE_ARGS.EXECUTE_TIME_NANO.ordinal());
double execution_time = integersToScheduleTime((int) ms_max, (int) ms_min, (int) nano);
boolean data_converted = false; // we only want to do data conversion once
synchronized (controlMapLock) {
List<DynamicControl> named_controls = controlMap.getControlsByName(control_name);
for (DynamicControl named_control : named_controls) {
if (named_control.controlScope == controlScope && control_type.equals(named_control.controlType)) {
// we must NOT call setVal as this will generate a global series again.
// Just notifyListeners specific to this control but not globally
if (!data_converted) {
// we need to see if this is a boolean Object as OSC does not support that
if (control_type == ControlType.BOOLEAN) {
int osc_val = (int) obj_val;
Boolean bool_val = osc_val != 0;
obj_val = bool_val;
data_converted = true;
} else if (control_type == ControlType.OBJECT) {
if (!(obj_val instanceof String)) {
// This is not a Json Message
// We will need to get all the remaining OSC arguments after the schedule time and store that as ObjVal
int num_args = msg.getArgCount() - OSC_TRANSMIT_ARRAY_ARG;
Object[] restore_args = new Object[num_args];
for (int i = 0; i < num_args; i++) {
restore_args[i] = msg.getArg(OSC_TRANSMIT_ARRAY_ARG + i);
}
obj_val = restore_args;
data_converted = true;
}
}
}
// We need to schedule this value
named_control.scheduleValue(device_name, obj_val, execution_time);
}
}
}
}
}
}
/**
* Process the Update Message from an OSC Message. Examine buildUpdateMessage for parameters inside Message
* The message is directed as a specific control defined by the MAP_KEY parameter in the OSC Message
* @param msg OSC message with new value
*/
public static void processUpdateMessage(OSCMessage msg){
String map_key = (String) msg.getArg(UPDATE_MESSAGE_ARGS.MAP_KEY.ordinal());
String control_name = (String) msg.getArg(UPDATE_MESSAGE_ARGS.CONTROL_NAME.ordinal());
Object obj_val = msg.getArg(UPDATE_MESSAGE_ARGS.OBJ_VAL.ordinal());
ControlScope control_scope = ControlScope.values ()[(int) msg.getArg(UPDATE_MESSAGE_ARGS.CONTROL_SCOPE.ordinal())];
DISPLAY_TYPE display_type = DISPLAY_TYPE.DISPLAY_DEFAULT;
DynamicControl control = getControl(map_key);
if (control != null)
{
Object display_min = control.getMinimumDisplayValue();
Object display_max = control.getMaximumDisplayValue();
if (msg.getArgCount() > UPDATE_MESSAGE_ARGS.DISPLAY_TYPE_VAL.ordinal())
{
int osc_val = (int) msg.getArg(UPDATE_MESSAGE_ARGS.DISPLAY_TYPE_VAL.ordinal());
display_type = DISPLAY_TYPE.values ()[osc_val];
}
if (msg.getArgCount() > UPDATE_MESSAGE_ARGS.MAX_VALUE.ordinal()){
display_max = msg.getArg(UPDATE_MESSAGE_ARGS.MAX_VALUE.ordinal());
}
if (msg.getArgCount() > UPDATE_MESSAGE_ARGS.MIN_VALUE.ordinal()){
display_min = msg.getArg(UPDATE_MESSAGE_ARGS.MIN_VALUE.ordinal());
}
// do not use setters as we only want to generate one notifyLocalListeners
boolean changed = false;
boolean control_scope_changed = false;
if (control.displayType != display_type)
{
changed = true;
}
control.displayType = display_type;
obj_val = convertValue(control.controlType, obj_val);
display_max = convertValue(control.controlType, display_max);
display_min = convertValue(control.controlType, display_min);
if (!obj_val.equals(control.objVal) ||
!display_max.equals(control.maximumDisplayValue) ||
!display_min.equals(control.minimumDisplayValue)
) {
changed = true;
}
if (!control_scope.equals(control.controlScope)) {
control.controlScope = control_scope;
//control.executionTime = execution_time;
changed = true;
control_scope_changed = true;
}
if (changed) {
control.maximumDisplayValue = display_max;
control.minimumDisplayValue = display_min;
control.scheduleValue(null, obj_val, 0, true);
if (control.getControlScope() != ControlScope.UNIQUE){
control.objVal = obj_val;
control.notifyGlobalListeners();
}
}
if (control_scope_changed)
{
control.notifyControlChangeListeners();
}
}
}
/**
* Build OSC Message that specifies a removal of a control
* @return OSC Message to notify removal
*/
public OSCMessage buildRemoveMessage(){
return new OSCMessage(OSCVocabulary.DynamicControlMessage.DESTROY,
new Object[]{
deviceName,
controlMapKey
});
}
/**
* Return an object that can be sent by OSC based on control TYpe
* @param obj_val The object value we want to send
* @return the type we will actually send
*/
private Object OSCArgumentObject (Object obj_val){
Object ret = obj_val;
if (obj_val instanceof Boolean)
{
boolean b = (Boolean) obj_val;
return b? 1:0;
}
else if (obj_val instanceof Double){
String s = ((Double)obj_val).toString();
ret = s;
}
return ret;
}
/**
* Build OSC Message that specifies an update
* @return OSC Message To send to specific control
*/
public OSCMessage buildUpdateMessage(){
Object sendObjType = objVal;
if (controlType == ControlType.OBJECT){
sendObjType = objVal.toString();
}
return new OSCMessage(OSCVocabulary.DynamicControlMessage.UPDATE,
new Object[]{
deviceName,
controlName,
controlType.ordinal(),
controlMapKey,
OSCArgumentObject(sendObjType),
controlScope.ordinal(),
displayType.ordinal(),
OSCArgumentObject(minimumDisplayValue),
OSCArgumentObject(maximumDisplayValue),
});
}
/**
* Build OSC Message that specifies a Network update
* @return OSC Message directed to controls with same name, scope, but on different devices
*/
public OSCMessage buildNetworkSendMessage(){
deviceSendId++;
String OSC_MessageName = OSCVocabulary.DynamicControlMessage.GLOBAL;
// define the arguments for send time
int [] execution_args = scheduleTimeToIntegers(executionTime);
if (controlScope == ControlScope.TARGET){
OSC_MessageName = OSCVocabulary.DynamicControlMessage.TARGET;
}
if (controlType == ControlType.OBJECT){
/*
DEVICE_NAME,
CONTROL_NAME,
CONTROL_TYPE,
OBJ_VAL,
EXECUTE_TIME_MLILI_MS, // Most Significant Int of Milliseconds - stored as int
EXECUTE_TIME_MLILI_LS, // Least Significant Bit of Milliseconds - stored as int
EXECUTE_TIME_NANO // Number on Nano Seconds - stored as int
*/
// we need to see if we have a custom encode function
if (objVal instanceof CustomGlobalEncoder){
Object [] encode_data = ((CustomGlobalEncoder)objVal).encodeGlobalMessage();
int num_args = OSC_TRANSMIT_ARRAY_ARG + encode_data.length;
Object [] osc_args = new Object[num_args];
osc_args[0] = deviceName;
osc_args[1] = controlName;
osc_args[2] = controlType.ordinal();
osc_args[3] = 0; // by defining zero we are going to say this is NOT json
osc_args[4] = execution_args [0];
osc_args[5] = execution_args [1];
osc_args[6] = execution_args [2];
osc_args[7] = deviceSendId;
// now encode the object parameters
for (int i = 0; i < encode_data.length; i++){
osc_args[OSC_TRANSMIT_ARRAY_ARG + i] = encode_data[i];
}
return new OSCMessage(OSC_MessageName,
osc_args);
}
else
{
String jsonString = gson.toJson(objVal);
return new OSCMessage(OSC_MessageName,
new Object[]{
deviceName,
controlName,
controlType.ordinal(),
jsonString,
execution_args[0],
execution_args[1],
execution_args[2],
deviceSendId
});
}
}
else {
return new OSCMessage(OSC_MessageName,
new Object[]{
deviceName,
controlName,
controlType.ordinal(),
OSCArgumentObject(objVal),
execution_args[0],
execution_args[1],
execution_args[2],
deviceSendId
});
}
}
/**
* Build the OSC Message for a create message
* @return OSC Message required to create the object
*/
public OSCMessage buildCreateMessage() {
Object sendObjType = objVal;
if (controlType == ControlType.OBJECT){
sendObjType = objVal.toString();
}
return new OSCMessage(OSCVocabulary.DynamicControlMessage.CREATE,
new Object[]{
deviceName,
controlMapKey,
controlName,
parentSketchName,
parentId,
controlType.ordinal(),
OSCArgumentObject(sendObjType),
OSCArgumentObject(minimumDisplayValue),
OSCArgumentObject(maximumDisplayValue),
controlScope.ordinal(),
displayType.ordinal()
});
}
/**
* Create a DynamicControl based on OSC Message. This will keep OSC implementation inside this class
* The buildUpdateMessage shows how messages are constructed
* @param msg the OSC Message with the parameters to make Control
*/
public DynamicControl (OSCMessage msg)
{
deviceName = (String) msg.getArg(CREATE_MESSAGE_ARGS.DEVICE_NAME.ordinal());
controlMapKey = (String) msg.getArg(CREATE_MESSAGE_ARGS.MAP_KEY.ordinal());
controlName = (String) msg.getArg(CREATE_MESSAGE_ARGS.CONTROL_NAME.ordinal());
parentSketchName = (String) msg.getArg(CREATE_MESSAGE_ARGS.PARENT_SKETCH_NAME.ordinal());
parentId = (int) msg.getArg(CREATE_MESSAGE_ARGS.PARENT_SKETCH_ID.ordinal());
controlType = ControlType.values ()[(int) msg.getArg(CREATE_MESSAGE_ARGS.CONTROL_TYPE.ordinal())];
objVal = convertValue (controlType, msg.getArg(CREATE_MESSAGE_ARGS.OBJ_VAL.ordinal()));
minimumDisplayValue = convertValue (controlType, msg.getArg(CREATE_MESSAGE_ARGS.MIN_VAL.ordinal()));
maximumDisplayValue = convertValue (controlType, msg.getArg(CREATE_MESSAGE_ARGS.MAX_VAL.ordinal()));
controlScope = ControlScope.values ()[(int) msg.getArg(CREATE_MESSAGE_ARGS.CONTROL_SCOPE.ordinal())];
if (msg.getArgCount() > CREATE_MESSAGE_ARGS.DISPLAY_TYPE_VAL.ordinal())
{
int osc_val = (int) msg.getArg(CREATE_MESSAGE_ARGS.DISPLAY_TYPE_VAL.ordinal());
displayType = DISPLAY_TYPE.values ()[osc_val];
}
synchronized (controlMapLock) {
controlMap.addControl(this);
}
}
/**
* Get the map key created in the device as a method for mapping back
* @return The unique key to identify this object
*/
public String getControlMapKey(){
return controlMapKey;
}
/**
* Set the value of the object and notify any listeners
* Additionally, the value will propagate to any controls that match the control scope
* If we are using a trigger, send a random number or a unique value
* @param val the value to set
* @return this object
*/
public DynamicControl setValue(Object val)
{
return setValue(val, 0);
}
/**
* Set the value of the object and notify any listeners
* Additionally, the value will propagate to any controls that match the control scope
* If we are using a trigger, send a random number or a unique value
* @param val the value to set
* @param execution_time the Scheduler time we want this to occur
* @return this object
*/
public DynamicControl setValue(Object val, double execution_time)
{
executionTime = execution_time;
val = convertValue (controlType, val);
if (!objVal.equals(val)) {
if (controlType == ControlType.FLOAT)
{
objVal = (Double) val;
}
else {
objVal = val;
}
notifyGlobalListeners();
scheduleValue(null, val, execution_time);
}
return this;
}
/**
* Gets the value of the control. The type needs to be cast to the required type in the listener
* @return Control Value
*/
public Object getValue(){
return objVal;
}
/**
* The maximum value that we want as a display, for example, in a slider control. Does not limit values in the messages
* @return The maximum value we want a graphical display to be set to
*/
public Object getMaximumDisplayValue(){
return maximumDisplayValue;
}
/**
* Set the minimum display range for display
* @param min minimum display value
*
* @return this
*/
public DynamicControl setMinimumValue(Object min) {minimumDisplayValue = min; return this;}
/**
* Set the maximum display range for display
* @param max maximum display value
* @return this
*/
public DynamicControl setMaximumDisplayValue(Object max) {maximumDisplayValue = max; return this;}
/**
* The minimum value that we want as a display, for example, in a slider control. Does not limit values in the messages
* @return The minimum value we want a graphical display to be set to
*/
public Object getMinimumDisplayValue(){
return minimumDisplayValue;
}
/**
* Get the name of the control used for ControlScope matching. Also displayed in GUI
* @return The name of the control for scope matching
*/
public String getControlName(){
return controlName;
}
/**
* Register Listener to receive changed values in the control
* @param listener Listener to register for events
* @return this
*/
public DynamicControl addControlListener(DynamicControlListener listener)
{
if (listener != null) {
synchronized (controlListenerLock) {
controlListenerList.add(listener);
}
}
return this;
}
/**
* Register Listener to receive changed values in the control that need to be global type messages
* @param listener Listener to register for events
* @return this listener that has been created
*/
public DynamicControl addGlobalControlListener(DynamicControlListener listener)
{
if (listener != null) {
synchronized (globalListenerLock) {
globalControlListenerList.add(listener);
}
}
return this;
}
/**
* Register Listener to receive changed values in the control that need to be received when value is specifically set from
* Within sketch
* @param listener Listener to register for events
* @return this
*/
public DynamicControl addValueSetListener(DynamicControlListener listener)
{
if (listener != null) {
synchronized (valueSetListenerLock) {
valueSetListenerList.add(listener);
}
}
return this;
}
/**
* Deregister listener so it no longer receives messages from this control
* @param listener The lsitener we are removing
* @return this object
*/
public DynamicControl removeControlListener(DynamicControlListener listener) {
if (listener != null) {
synchronized (controlListenerLock) {
controlListenerList.remove(listener);
}
}
return this;
}
/**
* Deregister listener so it no longer receives messages from this control
* @param listener the listener we are remmoving
* @return this object
*/
public DynamicControl removeGlobalControlListener(DynamicControlListener listener) {
if (listener != null) {
synchronized (globalListenerLock) {
globalControlListenerList.remove(listener);
}
}
return this;
}
/**
* Register Listener to receive changed values in the control scope
* @param listener Listener to register for events
* @return this object
*/
public DynamicControl addControlScopeListener(ControlScopeChangedListener listener){
if (listener != null) {
synchronized (controlScopeChangedLock) {
controlScopeChangedList.add(listener);
}
}
return this;
}
/**
* Deregister listener so it no longer receives messages from this control
* @param listener the listener
* @return this object
*/
public DynamicControl removeControlScopeChangedListener(ControlScopeChangedListener listener) {
if (listener != null) {
synchronized (controlScopeChangedLock) {
controlScopeChangedList.remove(listener);
}
}
return this;
}
/**
* Erase all listeners from this control
* @return this object
*/
public DynamicControl eraseListeners()
{
// We need to
synchronized (futureMessageListLock){
for (FutureControlMessage message:
futureMessageList) {
message.pendingSchedule.setCancelled(true);
}
futureMessageList.clear();
}
synchronized (controlListenerLock) {controlListenerList.clear();}
synchronized (controlScopeChangedLock) {controlScopeChangedList.clear();}
return this;
}
/**
* Notify all registered listeners of object value on this device
* @return this object
*/
public DynamicControl notifyLocalListeners()
{
synchronized (controlListenerLock)
{
controlListenerList.forEach(listener ->
{
try
{
listener.update(this);
}
catch (Exception ex)
{
ex.printStackTrace();
}
});
}
return this;
}
/**
* Send Update Message when value set
*/
public void notifyValueSetListeners(){
synchronized (valueSetListenerLock)
{
valueSetListenerList.forEach(listener ->
{
try
{
listener.update(this);
}
catch (Exception ex)
{
ex.printStackTrace();
}
});
}
}
/**
* Send Global Update Message
*/
public void notifyGlobalListeners(){
synchronized (globalListenerLock)
{
globalControlListenerList.forEach(listener ->
{
try
{
listener.update(this);
}
catch (Exception ex)
{
ex.printStackTrace();
}
});
}
}
/**
* Notify all registered listeners of object value
* @return this object
*/
public DynamicControl notifyControlChangeListeners()
{
synchronized (controlScopeChangedLock)
{
controlScopeChangedList.forEach(listener ->
{
try
{
listener.controlScopeChanged(this.getControlScope());
}
catch (Exception ex)
{
ex.printStackTrace();
}
});
}
return this;
}
/**
* Get the tooltip to display
* @return the tooltip to display
*/
public String getTooltipText(){
String control_scope_text = "";
if (getControlScope() == ControlScope.UNIQUE)
{
control_scope_text = "UNIQUE scope";
}
else if (getControlScope() == ControlScope.SKETCH)
{
control_scope_text = "SKETCH scope";
}
else if (getControlScope() == ControlScope.CLASS)
{
control_scope_text = "CLASS scope - " + parentSketchName;
}
else if (getControlScope() == ControlScope.DEVICE)
{
control_scope_text = "DEVICE scope - " + deviceName;
}
else if (getControlScope() == ControlScope.GLOBAL)
{
control_scope_text = "GLOBAL scope";
}
return tooltipPrefix + "\n" + control_scope_text;
}
}
| orsjb/HappyBrackets | HappyBrackets/src/main/java/net/happybrackets/core/control/DynamicControl.java | Java | apache-2.0 | 63,210 |
<?php
declare(strict_types=1);
namespace OpenTelemetry\Tests\Unit\Contrib;
use AssertWell\PHPUnitGlobalState\EnvironmentVariables;
use Grpc\UnaryCall;
use Mockery;
use Mockery\MockInterface;
use OpenTelemetry\Contrib\OtlpGrpc\Exporter;
use Opentelemetry\Proto\Collector\Trace\V1\TraceServiceClient;
use OpenTelemetry\SDK\Trace\SpanExporterInterface;
use OpenTelemetry\Tests\Unit\SDK\Trace\SpanExporter\AbstractExporterTest;
use OpenTelemetry\Tests\Unit\SDK\Util\SpanData;
use org\bovigo\vfs\vfsStream;
/**
* @covers OpenTelemetry\Contrib\OtlpGrpc\Exporter
*/
class OTLPGrpcExporterTest extends AbstractExporterTest
{
use EnvironmentVariables;
public function createExporter(): SpanExporterInterface
{
return new Exporter();
}
public function tearDown(): void
{
$this->restoreEnvironmentVariables();
}
/**
* @psalm-suppress UndefinedConstant
*/
public function test_exporter_happy_path(): void
{
$exporter = new Exporter(
//These first parameters were copied from the constructor's default values
'localhost:4317',
true,
'',
'',
false,
10,
$this->createMockTraceServiceClient([
'expectations' => [
'num_spans' => 1,
],
'return_values' => [
'status_code' => \Grpc\STATUS_OK,
],
])
);
$exporterStatusCode = $exporter->export([new SpanData()]);
$this->assertSame(SpanExporterInterface::STATUS_SUCCESS, $exporterStatusCode);
}
public function test_exporter_unexpected_grpc_response_status(): void
{
$exporter = new Exporter(
//These first parameters were copied from the constructor's default values
'localhost:4317',
true,
'',
'',
false,
10,
$this->createMockTraceServiceClient([
'expectations' => [
'num_spans' => 1,
],
'return_values' => [
'status_code' => 'An unexpected status',
],
])
);
$exporterStatusCode = $exporter->export([new SpanData()]);
$this->assertSame(SpanExporterInterface::STATUS_FAILED_NOT_RETRYABLE, $exporterStatusCode);
}
public function test_exporter_grpc_responds_as_unavailable(): void
{
$this->assertEquals(SpanExporterInterface::STATUS_FAILED_RETRYABLE, (new Exporter())->export([new SpanData()]));
}
public function test_set_headers_with_environment_variables(): void
{
$this->setEnvironmentVariable('OTEL_EXPORTER_OTLP_HEADERS', 'x-aaa=foo,x-bbb=barf');
$exporter = new Exporter();
$this->assertEquals(['x-aaa' => 'foo', 'x-bbb' => 'barf'], $exporter->getHeaders());
}
public function test_set_header(): void
{
$exporter = new Exporter();
$exporter->setHeader('foo', 'bar');
$headers = $exporter->getHeaders();
$this->assertArrayHasKey('foo', $headers);
$this->assertEquals('bar', $headers['foo']);
}
public function test_set_headers_in_constructor(): void
{
$exporter = new Exporter('localhost:4317', true, '', 'x-aaa=foo,x-bbb=bar');
$this->assertEquals(['x-aaa' => 'foo', 'x-bbb' => 'bar'], $exporter->getHeaders());
$exporter->setHeader('key', 'value');
$this->assertEquals(['x-aaa' => 'foo', 'x-bbb' => 'bar', 'key' => 'value'], $exporter->getHeaders());
}
public function test_should_be_ok_to_exporter_empty_spans_collection(): void
{
$this->assertEquals(
SpanExporterInterface::STATUS_SUCCESS,
(new Exporter('test.otlp'))->export([])
);
}
private function isInsecure(Exporter $exporter) : bool
{
$reflection = new \ReflectionClass($exporter);
$property = $reflection->getProperty('insecure');
$property->setAccessible(true);
return $property->getValue($exporter);
}
public function test_client_options(): void
{
// default options
$exporter = new Exporter('localhost:4317');
$opts = $exporter->getClientOptions();
$this->assertEquals(10, $opts['timeout']);
$this->assertTrue($this->isInsecure($exporter));
$this->assertArrayNotHasKey('grpc.default_compression_algorithm', $opts);
// method args
$exporter = new Exporter('localhost:4317', false, '', '', true, 5);
$opts = $exporter->getClientOptions();
$this->assertEquals(5, $opts['timeout']);
$this->assertFalse($this->isInsecure($exporter));
$this->assertEquals(2, $opts['grpc.default_compression_algorithm']);
// env vars
$this->setEnvironmentVariable('OTEL_EXPORTER_OTLP_TIMEOUT', '1');
$this->setEnvironmentVariable('OTEL_EXPORTER_OTLP_COMPRESSION', 'gzip');
$this->setEnvironmentVariable('OTEL_EXPORTER_OTLP_INSECURE', 'false');
$exporter = new Exporter('localhost:4317');
$opts = $exporter->getClientOptions();
$this->assertEquals(1, $opts['timeout']);
$this->assertFalse($this->isInsecure($exporter));
$this->assertEquals(2, $opts['grpc.default_compression_algorithm']);
}
/**
* @psalm-suppress PossiblyUndefinedMethod
* @psalm-suppress UndefinedMagicMethod
*/
private function createMockTraceServiceClient(array $options = [])
{
[
'expectations' => [
'num_spans' => $expectedNumSpans,
],
'return_values' => [
'status_code' => $statusCode,
]
] = $options;
/** @var MockInterface&TraceServiceClient */
$mockClient = Mockery::mock(TraceServiceClient::class)
->allows('Export')
->withArgs(function ($request) use ($expectedNumSpans) {
return (count($request->getResourceSpans()) === $expectedNumSpans);
})
->andReturns(
Mockery::mock(UnaryCall::class)
->allows('wait')
->andReturns(
[
'unused response data',
new class($statusCode) {
public $code;
public function __construct($code)
{
$this->code = $code;
}
},
]
)
->getMock()
)
->getMock();
return $mockClient;
}
public function test_from_connection_string(): void
{
// @phpstan-ignore-next-line
$this->assertNotSame(
Exporter::fromConnectionString(),
Exporter::fromConnectionString()
);
}
public function test_create_with_cert_file(): void
{
$certDir = 'var';
$certFile = 'file.cert';
vfsStream::setup($certDir);
$certPath = vfsStream::url(sprintf('%s/%s', $certDir, $certFile));
file_put_contents($certPath, 'foo');
$this->setEnvironmentVariable('OTEL_EXPORTER_OTLP_INSECURE', 'false');
$this->setEnvironmentVariable('OTEL_EXPORTER_OTLP_CERTIFICATE', $certPath);
$this->assertSame(
$certPath,
(new Exporter())->getCertificateFile()
);
}
}
| open-telemetry/opentelemetry-php | tests/Unit/Contrib/OTLPGrpcExporterTest.php | PHP | apache-2.0 | 7,876 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.elasticmapreduce.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.elasticmapreduce.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* ListBootstrapActionsRequestMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class ListBootstrapActionsRequestMarshaller {
private static final MarshallingInfo<String> CLUSTERID_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD)
.marshallLocationName("ClusterId").build();
private static final MarshallingInfo<String> MARKER_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD)
.marshallLocationName("Marker").build();
private static final ListBootstrapActionsRequestMarshaller instance = new ListBootstrapActionsRequestMarshaller();
public static ListBootstrapActionsRequestMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(ListBootstrapActionsRequest listBootstrapActionsRequest, ProtocolMarshaller protocolMarshaller) {
if (listBootstrapActionsRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(listBootstrapActionsRequest.getClusterId(), CLUSTERID_BINDING);
protocolMarshaller.marshall(listBootstrapActionsRequest.getMarker(), MARKER_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-emr/src/main/java/com/amazonaws/services/elasticmapreduce/model/transform/ListBootstrapActionsRequestMarshaller.java | Java | apache-2.0 | 2,390 |
/**
* Server-side support classes for WebSocket requests.
*/
@NonNullApi
@NonNullFields
package org.springframework.web.reactive.socket.server.support;
import org.springframework.lang.NonNullApi;
import org.springframework.lang.NonNullFields;
| spring-projects/spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/socket/server/support/package-info.java | Java | apache-2.0 | 246 |
/**
*
*/
package org.commcare.cases.ledger;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.Vector;
import org.javarosa.core.services.storage.IMetaData;
import org.javarosa.core.services.storage.Persistable;
import org.javarosa.core.util.externalizable.DeserializationException;
import org.javarosa.core.util.externalizable.ExtUtil;
import org.javarosa.core.util.externalizable.ExtWrapMap;
import org.javarosa.core.util.externalizable.PrototypeFactory;
/**
* A Ledger is a data model which tracks numeric data organized into
* different sections with different meanings.
*
* @author ctsims
*
*/
public class Ledger implements Persistable, IMetaData {
//NOTE: Right now this is (lazily) implemented assuming that each ledger
//object tracks _all_ of the sections for an entity, which will likely be a terrible way
//to do things long-term.
public static final String STORAGE_KEY = "ledger";
public static final String INDEX_ENTITY_ID = "entity-id";
String entityId;
int recordId = -1;
Hashtable<String, Hashtable<String, Integer>> sections;
public Ledger() {
}
public Ledger(String entityId) {
this.entityId = entityId;
this.sections = new Hashtable<String, Hashtable<String, Integer>>();
}
/**
* Get the ID of the linked entity associated with this Ledger record
* @return
*/
public String getEntiyId() {
return entityId;
}
/**
* Retrieve an entry from a specific section of the ledger.
*
* If no entry is defined, the ledger will return the value '0'
*
* @param sectionId The section containing the entry
* @param entryId The Id of the entry to retrieve
* @return the entry value. '0' if no entry exists.
*/
public int getEntry(String sectionId, String entryId) {
if(!sections.containsKey(sectionId) || !sections.get(sectionId).containsKey(entryId)) {
return 0;
}
return sections.get(sectionId).get(entryId).intValue();
}
/**
* @return The list of sections available in this ledger
*/
public String[] getSectionList() {
String[] sectionList = new String[sections.size()];
int i = 0;
for(Enumeration e = sections.keys(); e.hasMoreElements();) {
sectionList[i] = (String)e.nextElement();
++i;
}
return sectionList;
}
/**
* Retrieves a list of all entries (by ID) defined in a
* section of the ledger
*
* @param sectionId The ID of a section
* @return The IDs of all entries defined in the provided section
*/
public String[] getListOfEntries(String sectionId) {
Hashtable<String, Integer> entries = sections.get(sectionId);
String[] entryList = new String[entries.size()];
int i = 0;
for(Enumeration e = entries.keys(); e.hasMoreElements();) {
entryList[i] = (String)e.nextElement();
++i;
}
return entryList;
}
/*
* (non-Javadoc)
* @see org.javarosa.core.util.externalizable.Externalizable#readExternal(java.io.DataInputStream, org.javarosa.core.util.externalizable.PrototypeFactory)
*/
public void readExternal(DataInputStream in, PrototypeFactory pf) throws IOException, DeserializationException {
recordId = ExtUtil.readInt(in);
entityId = ExtUtil.readString(in);
sections = (Hashtable<String, Hashtable<String, Integer>>) ExtUtil.read(in, new ExtWrapMap(String.class, new ExtWrapMap(String.class, Integer.class)));
}
/*
* (non-Javadoc)
* @see org.javarosa.core.util.externalizable.Externalizable#writeExternal(java.io.DataOutputStream)
*/
public void writeExternal(DataOutputStream out) throws IOException {
ExtUtil.writeNumeric(out, recordId);
ExtUtil.writeString(out, entityId);
ExtUtil.write(out, new ExtWrapMap(sections, new ExtWrapMap(String.class, Integer.class)));
}
/*
* (non-Javadoc)
* @see org.javarosa.core.services.storage.Persistable#setID(int)
*/
public void setID(int ID) {
recordId = ID;
}
/*
* (non-Javadoc)
* @see org.javarosa.core.services.storage.Persistable#getID()
*/
public int getID() {
return recordId;
}
/**
* Sets the value of an entry in the specified section of this ledger
*
* @param sectionId
* @param entryId
* @param quantity
*/
public void setEntry(String sectionId, String entryId, int quantity) {
if(!sections.containsKey(sectionId)) {
sections.put(sectionId, new Hashtable<String, Integer>());
}
sections.get(sectionId).put(entryId, new Integer(quantity));
}
/*
* (non-Javadoc)
* @see org.javarosa.core.services.storage.IMetaData#getMetaDataFields()
*/
public String[] getMetaDataFields() {
return new String[] {INDEX_ENTITY_ID};
}
/*
* (non-Javadoc)
* @see org.javarosa.core.services.storage.IMetaData#getMetaData(java.lang.String)
*/
public Object getMetaData(String fieldName) {
if(fieldName.equals(INDEX_ENTITY_ID)){
return entityId;
} else {
throw new IllegalArgumentException("No metadata field " + fieldName + " in the ledger storage system");
}
}
}
| wpride/commcare | cases/src/org/commcare/cases/ledger/Ledger.java | Java | apache-2.0 | 4,988 |
/*
* Copyright (c) 2010-2013 Evolveum
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.evolveum.midpoint.repo.sql.data.audit;
import com.evolveum.midpoint.audit.api.AuditEventRecord;
import com.evolveum.midpoint.audit.api.AuditService;
import com.evolveum.midpoint.prism.PrismContext;
import com.evolveum.midpoint.prism.PrismObject;
import com.evolveum.midpoint.prism.polystring.PolyString;
import com.evolveum.midpoint.repo.sql.data.common.enums.ROperationResultStatus;
import com.evolveum.midpoint.repo.sql.data.common.other.RObjectType;
import com.evolveum.midpoint.repo.sql.util.ClassMapper;
import com.evolveum.midpoint.repo.sql.util.DtoTranslationException;
import com.evolveum.midpoint.repo.sql.util.RUtil;
import com.evolveum.midpoint.schema.ObjectDeltaOperation;
import com.evolveum.midpoint.schema.constants.ObjectTypes;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ObjectType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.UserType;
import org.apache.commons.lang.Validate;
import org.hibernate.annotations.Cascade;
import org.hibernate.annotations.ForeignKey;
import javax.persistence.*;
import javax.xml.namespace.QName;
import java.io.Serializable;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* @author lazyman
*/
@Entity
@Table(name = RAuditEventRecord.TABLE_NAME, indexes = {
@Index(name = "iTimestampValue", columnList = RAuditEventRecord.COLUMN_TIMESTAMP)}) // TODO correct index name
public class RAuditEventRecord implements Serializable {
public static final String TABLE_NAME = "m_audit_event";
public static final String COLUMN_TIMESTAMP = "timestampValue";
private long id;
private Timestamp timestamp;
private String eventIdentifier;
private String sessionIdentifier;
private String taskIdentifier;
private String taskOID;
private String hostIdentifier;
//prism object - user
private String initiatorOid;
private String initiatorName;
//prism object
private String targetOid;
private String targetName;
private RObjectType targetType;
//prism object - user
private String targetOwnerOid;
private String targetOwnerName;
private RAuditEventType eventType;
private RAuditEventStage eventStage;
//collection of object deltas
private Set<RObjectDeltaOperation> deltas;
private String channel;
private ROperationResultStatus outcome;
private String parameter;
private String message;
private String result;
public String getResult() {
return result;
}
@Column(length = 1024)
public String getMessage() {
return message;
}
public String getParameter() {
return parameter;
}
public String getChannel() {
return channel;
}
@ForeignKey(name = "fk_audit_delta")
@OneToMany(mappedBy = "record", orphanRemoval = true)
@Cascade({org.hibernate.annotations.CascadeType.ALL})
public Set<RObjectDeltaOperation> getDeltas() {
if (deltas == null) {
deltas = new HashSet<RObjectDeltaOperation>();
}
return deltas;
}
public String getEventIdentifier() {
return eventIdentifier;
}
@Enumerated(EnumType.ORDINAL)
public RAuditEventStage getEventStage() {
return eventStage;
}
@Enumerated(EnumType.ORDINAL)
public RAuditEventType getEventType() {
return eventType;
}
public String getHostIdentifier() {
return hostIdentifier;
}
@Id
@GeneratedValue
public long getId() {
return id;
}
@Column(length = RUtil.COLUMN_LENGTH_OID)
public String getInitiatorOid() {
return initiatorOid;
}
public String getInitiatorName() {
return initiatorName;
}
@Enumerated(EnumType.ORDINAL)
public ROperationResultStatus getOutcome() {
return outcome;
}
public String getSessionIdentifier() {
return sessionIdentifier;
}
public String getTargetName() {
return targetName;
}
@Column(length = RUtil.COLUMN_LENGTH_OID)
public String getTargetOid() {
return targetOid;
}
@Enumerated(EnumType.ORDINAL)
public RObjectType getTargetType() {
return targetType;
}
public String getTargetOwnerName() {
return targetOwnerName;
}
@Column(length = RUtil.COLUMN_LENGTH_OID)
public String getTargetOwnerOid() {
return targetOwnerOid;
}
public String getTaskIdentifier() {
return taskIdentifier;
}
public String getTaskOID() {
return taskOID;
}
@Column(name = COLUMN_TIMESTAMP)
public Timestamp getTimestamp() {
return timestamp;
}
public void setMessage(String message) {
this.message = message;
}
public void setParameter(String parameter) {
this.parameter = parameter;
}
public void setChannel(String channel) {
this.channel = channel;
}
public void setDeltas(Set<RObjectDeltaOperation> deltas) {
this.deltas = deltas;
}
public void setEventIdentifier(String eventIdentifier) {
this.eventIdentifier = eventIdentifier;
}
public void setEventStage(RAuditEventStage eventStage) {
this.eventStage = eventStage;
}
public void setEventType(RAuditEventType eventType) {
this.eventType = eventType;
}
public void setHostIdentifier(String hostIdentifier) {
this.hostIdentifier = hostIdentifier;
}
public void setId(long id) {
this.id = id;
}
public void setInitiatorName(String initiatorName) {
this.initiatorName = initiatorName;
}
public void setInitiatorOid(String initiatorOid) {
this.initiatorOid = initiatorOid;
}
public void setOutcome(ROperationResultStatus outcome) {
this.outcome = outcome;
}
public void setSessionIdentifier(String sessionIdentifier) {
this.sessionIdentifier = sessionIdentifier;
}
public void setTargetName(String targetName) {
this.targetName = targetName;
}
public void setTargetOid(String targetOid) {
this.targetOid = targetOid;
}
public void setTargetType(RObjectType targetType) {
this.targetType = targetType;
}
public void setTargetOwnerName(String targetOwnerName) {
this.targetOwnerName = targetOwnerName;
}
public void setTargetOwnerOid(String targetOwnerOid) {
this.targetOwnerOid = targetOwnerOid;
}
public void setTaskIdentifier(String taskIdentifier) {
this.taskIdentifier = taskIdentifier;
}
public void setTaskOID(String taskOID) {
this.taskOID = taskOID;
}
public void setTimestamp(Timestamp timestamp) {
this.timestamp = timestamp;
}
public void setResult(String result) {
this.result = result;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RAuditEventRecord that = (RAuditEventRecord) o;
if (channel != null ? !channel.equals(that.channel) : that.channel != null) return false;
if (deltas != null ? !deltas.equals(that.deltas) : that.deltas != null) return false;
if (eventIdentifier != null ? !eventIdentifier.equals(that.eventIdentifier) : that.eventIdentifier != null)
return false;
if (eventStage != that.eventStage) return false;
if (eventType != that.eventType) return false;
if (hostIdentifier != null ? !hostIdentifier.equals(that.hostIdentifier) : that.hostIdentifier != null)
return false;
if (initiatorOid != null ? !initiatorOid.equals(that.initiatorOid) : that.initiatorOid != null) return false;
if (initiatorName != null ? !initiatorName.equals(that.initiatorName) : that.initiatorName != null)
return false;
if (outcome != that.outcome) return false;
if (sessionIdentifier != null ? !sessionIdentifier.equals(that.sessionIdentifier) : that.sessionIdentifier != null)
return false;
if (targetOid != null ? !targetOid.equals(that.targetOid) : that.targetOid != null) return false;
if (targetName != null ? !targetName.equals(that.targetName) : that.targetName != null) return false;
if (targetType != null ? !targetType.equals(that.targetType) : that.targetType != null) return false;
if (targetOwnerOid != null ? !targetOwnerOid.equals(that.targetOwnerOid) : that.targetOwnerOid != null)
return false;
if (targetOwnerName != null ? !targetOwnerName.equals(that.targetOwnerName) : that.targetOwnerName != null)
return false;
if (taskIdentifier != null ? !taskIdentifier.equals(that.taskIdentifier) : that.taskIdentifier != null)
return false;
if (taskOID != null ? !taskOID.equals(that.taskOID) : that.taskOID != null) return false;
if (timestamp != null ? !timestamp.equals(that.timestamp) : that.timestamp != null) return false;
if (parameter != null ? !parameter.equals(that.parameter) : that.parameter != null) return false;
if (message != null ? !message.equals(that.message) : that.message != null) return false;
if (result != null ? !result.equals(that.result) : that.result != null) return false;
return true;
}
@Override
public int hashCode() {
int result = timestamp != null ? timestamp.hashCode() : 0;
result = 31 * result + (eventIdentifier != null ? eventIdentifier.hashCode() : 0);
result = 31 * result + (sessionIdentifier != null ? sessionIdentifier.hashCode() : 0);
result = 31 * result + (taskIdentifier != null ? taskIdentifier.hashCode() : 0);
result = 31 * result + (taskOID != null ? taskOID.hashCode() : 0);
result = 31 * result + (hostIdentifier != null ? hostIdentifier.hashCode() : 0);
result = 31 * result + (initiatorName != null ? initiatorName.hashCode() : 0);
result = 31 * result + (initiatorOid != null ? initiatorOid.hashCode() : 0);
result = 31 * result + (targetOid != null ? targetOid.hashCode() : 0);
result = 31 * result + (targetName != null ? targetName.hashCode() : 0);
result = 31 * result + (targetType != null ? targetType.hashCode() : 0);
result = 31 * result + (targetOwnerOid != null ? targetOwnerOid.hashCode() : 0);
result = 31 * result + (targetOwnerName != null ? targetOwnerName.hashCode() : 0);
result = 31 * result + (eventType != null ? eventType.hashCode() : 0);
result = 31 * result + (eventStage != null ? eventStage.hashCode() : 0);
result = 31 * result + (deltas != null ? deltas.hashCode() : 0);
result = 31 * result + (channel != null ? channel.hashCode() : 0);
result = 31 * result + (outcome != null ? outcome.hashCode() : 0);
result = 31 * result + (parameter != null ? parameter.hashCode() : 0);
result = 31 * result + (message != null ? message.hashCode() : 0);
result = 31 * result + (this.result != null ? this.result.hashCode() : 0);
return result;
}
public static RAuditEventRecord toRepo(AuditEventRecord record, PrismContext prismContext)
throws DtoTranslationException {
Validate.notNull(record, "Audit event record must not be null.");
Validate.notNull(prismContext, "Prism context must not be null.");
RAuditEventRecord repo = new RAuditEventRecord();
repo.setChannel(record.getChannel());
if (record.getTimestamp() != null) {
repo.setTimestamp(new Timestamp(record.getTimestamp()));
}
repo.setEventStage(RAuditEventStage.toRepo(record.getEventStage()));
repo.setEventType(RAuditEventType.toRepo(record.getEventType()));
repo.setSessionIdentifier(record.getSessionIdentifier());
repo.setEventIdentifier(record.getEventIdentifier());
repo.setHostIdentifier(record.getHostIdentifier());
repo.setParameter(record.getParameter());
repo.setMessage(trimMessage(record.getMessage()));
if (record.getOutcome() != null) {
repo.setOutcome(RUtil.getRepoEnumValue(record.getOutcome().createStatusType(), ROperationResultStatus.class));
}
repo.setTaskIdentifier(record.getTaskIdentifier());
repo.setTaskOID(record.getTaskOID());
repo.setResult(record.getResult());
try {
if (record.getTarget() != null) {
PrismObject target = record.getTarget();
repo.setTargetName(getOrigName(target));
repo.setTargetOid(target.getOid());
QName type = ObjectTypes.getObjectType(target.getCompileTimeClass()).getTypeQName();
repo.setTargetType(ClassMapper.getHQLTypeForQName(type));
}
if (record.getTargetOwner() != null) {
PrismObject targetOwner = record.getTargetOwner();
repo.setTargetOwnerName(getOrigName(targetOwner));
repo.setTargetOwnerOid(targetOwner.getOid());
}
if (record.getInitiator() != null) {
PrismObject<UserType> initiator = record.getInitiator();
repo.setInitiatorName(getOrigName(initiator));
repo.setInitiatorOid(initiator.getOid());
}
for (ObjectDeltaOperation<?> delta : record.getDeltas()) {
if (delta == null) {
continue;
}
RObjectDeltaOperation rDelta = RObjectDeltaOperation.toRepo(repo, delta, prismContext);
rDelta.setTransient(true);
rDelta.setRecord(repo);
repo.getDeltas().add(rDelta);
}
} catch (Exception ex) {
throw new DtoTranslationException(ex.getMessage(), ex);
}
return repo;
}
public static AuditEventRecord fromRepo(RAuditEventRecord repo, PrismContext prismContext) throws DtoTranslationException{
AuditEventRecord audit = new AuditEventRecord();
audit.setChannel(repo.getChannel());
audit.setEventIdentifier(repo.getEventIdentifier());
if (repo.getEventStage() != null){
audit.setEventStage(repo.getEventStage().getStage());
}
if (repo.getEventType() != null){
audit.setEventType(repo.getEventType().getType());
}
audit.setHostIdentifier(repo.getHostIdentifier());
audit.setMessage(repo.getMessage());
if (repo.getOutcome() != null){
audit.setOutcome(repo.getOutcome().getStatus());
}
audit.setParameter(repo.getParameter());
audit.setResult(repo.getResult());
audit.setSessionIdentifier(repo.getSessionIdentifier());
audit.setTaskIdentifier(repo.getTaskIdentifier());
audit.setTaskOID(repo.getTaskOID());
if (repo.getTimestamp() != null){
audit.setTimestamp(repo.getTimestamp().getTime());
}
List<ObjectDeltaOperation> odos = new ArrayList<ObjectDeltaOperation>();
for (RObjectDeltaOperation rodo : repo.getDeltas()){
try {
ObjectDeltaOperation odo = RObjectDeltaOperation.fromRepo(rodo, prismContext);
if (odo != null){
odos.add(odo);
}
} catch (Exception ex){
//TODO: for now thi is OK, if we cannot parse detla, just skipp it.. Have to be resolved later;
}
}
audit.getDeltas().addAll((Collection) odos);
return audit;
//initiator, target, targetOwner
}
private static String trimMessage(String message) {
if (message == null || message.length() <= AuditService.MAX_MESSAGE_SIZE) {
return message;
}
return message.substring(0, AuditService.MAX_MESSAGE_SIZE - 4) + "...";
}
private static String getOrigName(PrismObject object) {
PolyString name = (PolyString) object.getPropertyRealValue(ObjectType.F_NAME, PolyString.class);
return name != null ? name.getOrig() : null;
}
}
| rpudil/midpoint | repo/repo-sql-impl/src/main/java/com/evolveum/midpoint/repo/sql/data/audit/RAuditEventRecord.java | Java | apache-2.0 | 16,740 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.worklink.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeDevice" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeDeviceResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* <p>
* The current state of the device.
* </p>
*/
private String status;
/**
* <p>
* The model of the device.
* </p>
*/
private String model;
/**
* <p>
* The manufacturer of the device.
* </p>
*/
private String manufacturer;
/**
* <p>
* The operating system of the device.
* </p>
*/
private String operatingSystem;
/**
* <p>
* The operating system version of the device.
* </p>
*/
private String operatingSystemVersion;
/**
* <p>
* The operating system patch level of the device.
* </p>
*/
private String patchLevel;
/**
* <p>
* The date that the device first signed in to Amazon WorkLink.
* </p>
*/
private java.util.Date firstAccessedTime;
/**
* <p>
* The date that the device last accessed Amazon WorkLink.
* </p>
*/
private java.util.Date lastAccessedTime;
/**
* <p>
* The user name associated with the device.
* </p>
*/
private String username;
/**
* <p>
* The current state of the device.
* </p>
*
* @param status
* The current state of the device.
* @see DeviceStatus
*/
public void setStatus(String status) {
this.status = status;
}
/**
* <p>
* The current state of the device.
* </p>
*
* @return The current state of the device.
* @see DeviceStatus
*/
public String getStatus() {
return this.status;
}
/**
* <p>
* The current state of the device.
* </p>
*
* @param status
* The current state of the device.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DeviceStatus
*/
public DescribeDeviceResult withStatus(String status) {
setStatus(status);
return this;
}
/**
* <p>
* The current state of the device.
* </p>
*
* @param status
* The current state of the device.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DeviceStatus
*/
public DescribeDeviceResult withStatus(DeviceStatus status) {
this.status = status.toString();
return this;
}
/**
* <p>
* The model of the device.
* </p>
*
* @param model
* The model of the device.
*/
public void setModel(String model) {
this.model = model;
}
/**
* <p>
* The model of the device.
* </p>
*
* @return The model of the device.
*/
public String getModel() {
return this.model;
}
/**
* <p>
* The model of the device.
* </p>
*
* @param model
* The model of the device.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeDeviceResult withModel(String model) {
setModel(model);
return this;
}
/**
* <p>
* The manufacturer of the device.
* </p>
*
* @param manufacturer
* The manufacturer of the device.
*/
public void setManufacturer(String manufacturer) {
this.manufacturer = manufacturer;
}
/**
* <p>
* The manufacturer of the device.
* </p>
*
* @return The manufacturer of the device.
*/
public String getManufacturer() {
return this.manufacturer;
}
/**
* <p>
* The manufacturer of the device.
* </p>
*
* @param manufacturer
* The manufacturer of the device.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeDeviceResult withManufacturer(String manufacturer) {
setManufacturer(manufacturer);
return this;
}
/**
* <p>
* The operating system of the device.
* </p>
*
* @param operatingSystem
* The operating system of the device.
*/
public void setOperatingSystem(String operatingSystem) {
this.operatingSystem = operatingSystem;
}
/**
* <p>
* The operating system of the device.
* </p>
*
* @return The operating system of the device.
*/
public String getOperatingSystem() {
return this.operatingSystem;
}
/**
* <p>
* The operating system of the device.
* </p>
*
* @param operatingSystem
* The operating system of the device.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeDeviceResult withOperatingSystem(String operatingSystem) {
setOperatingSystem(operatingSystem);
return this;
}
/**
* <p>
* The operating system version of the device.
* </p>
*
* @param operatingSystemVersion
* The operating system version of the device.
*/
public void setOperatingSystemVersion(String operatingSystemVersion) {
this.operatingSystemVersion = operatingSystemVersion;
}
/**
* <p>
* The operating system version of the device.
* </p>
*
* @return The operating system version of the device.
*/
public String getOperatingSystemVersion() {
return this.operatingSystemVersion;
}
/**
* <p>
* The operating system version of the device.
* </p>
*
* @param operatingSystemVersion
* The operating system version of the device.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeDeviceResult withOperatingSystemVersion(String operatingSystemVersion) {
setOperatingSystemVersion(operatingSystemVersion);
return this;
}
/**
* <p>
* The operating system patch level of the device.
* </p>
*
* @param patchLevel
* The operating system patch level of the device.
*/
public void setPatchLevel(String patchLevel) {
this.patchLevel = patchLevel;
}
/**
* <p>
* The operating system patch level of the device.
* </p>
*
* @return The operating system patch level of the device.
*/
public String getPatchLevel() {
return this.patchLevel;
}
/**
* <p>
* The operating system patch level of the device.
* </p>
*
* @param patchLevel
* The operating system patch level of the device.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeDeviceResult withPatchLevel(String patchLevel) {
setPatchLevel(patchLevel);
return this;
}
/**
* <p>
* The date that the device first signed in to Amazon WorkLink.
* </p>
*
* @param firstAccessedTime
* The date that the device first signed in to Amazon WorkLink.
*/
public void setFirstAccessedTime(java.util.Date firstAccessedTime) {
this.firstAccessedTime = firstAccessedTime;
}
/**
* <p>
* The date that the device first signed in to Amazon WorkLink.
* </p>
*
* @return The date that the device first signed in to Amazon WorkLink.
*/
public java.util.Date getFirstAccessedTime() {
return this.firstAccessedTime;
}
/**
* <p>
* The date that the device first signed in to Amazon WorkLink.
* </p>
*
* @param firstAccessedTime
* The date that the device first signed in to Amazon WorkLink.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeDeviceResult withFirstAccessedTime(java.util.Date firstAccessedTime) {
setFirstAccessedTime(firstAccessedTime);
return this;
}
/**
* <p>
* The date that the device last accessed Amazon WorkLink.
* </p>
*
* @param lastAccessedTime
* The date that the device last accessed Amazon WorkLink.
*/
public void setLastAccessedTime(java.util.Date lastAccessedTime) {
this.lastAccessedTime = lastAccessedTime;
}
/**
* <p>
* The date that the device last accessed Amazon WorkLink.
* </p>
*
* @return The date that the device last accessed Amazon WorkLink.
*/
public java.util.Date getLastAccessedTime() {
return this.lastAccessedTime;
}
/**
* <p>
* The date that the device last accessed Amazon WorkLink.
* </p>
*
* @param lastAccessedTime
* The date that the device last accessed Amazon WorkLink.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeDeviceResult withLastAccessedTime(java.util.Date lastAccessedTime) {
setLastAccessedTime(lastAccessedTime);
return this;
}
/**
* <p>
* The user name associated with the device.
* </p>
*
* @param username
* The user name associated with the device.
*/
public void setUsername(String username) {
this.username = username;
}
/**
* <p>
* The user name associated with the device.
* </p>
*
* @return The user name associated with the device.
*/
public String getUsername() {
return this.username;
}
/**
* <p>
* The user name associated with the device.
* </p>
*
* @param username
* The user name associated with the device.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeDeviceResult withUsername(String username) {
setUsername(username);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getStatus() != null)
sb.append("Status: ").append(getStatus()).append(",");
if (getModel() != null)
sb.append("Model: ").append(getModel()).append(",");
if (getManufacturer() != null)
sb.append("Manufacturer: ").append(getManufacturer()).append(",");
if (getOperatingSystem() != null)
sb.append("OperatingSystem: ").append(getOperatingSystem()).append(",");
if (getOperatingSystemVersion() != null)
sb.append("OperatingSystemVersion: ").append(getOperatingSystemVersion()).append(",");
if (getPatchLevel() != null)
sb.append("PatchLevel: ").append(getPatchLevel()).append(",");
if (getFirstAccessedTime() != null)
sb.append("FirstAccessedTime: ").append(getFirstAccessedTime()).append(",");
if (getLastAccessedTime() != null)
sb.append("LastAccessedTime: ").append(getLastAccessedTime()).append(",");
if (getUsername() != null)
sb.append("Username: ").append(getUsername());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DescribeDeviceResult == false)
return false;
DescribeDeviceResult other = (DescribeDeviceResult) obj;
if (other.getStatus() == null ^ this.getStatus() == null)
return false;
if (other.getStatus() != null && other.getStatus().equals(this.getStatus()) == false)
return false;
if (other.getModel() == null ^ this.getModel() == null)
return false;
if (other.getModel() != null && other.getModel().equals(this.getModel()) == false)
return false;
if (other.getManufacturer() == null ^ this.getManufacturer() == null)
return false;
if (other.getManufacturer() != null && other.getManufacturer().equals(this.getManufacturer()) == false)
return false;
if (other.getOperatingSystem() == null ^ this.getOperatingSystem() == null)
return false;
if (other.getOperatingSystem() != null && other.getOperatingSystem().equals(this.getOperatingSystem()) == false)
return false;
if (other.getOperatingSystemVersion() == null ^ this.getOperatingSystemVersion() == null)
return false;
if (other.getOperatingSystemVersion() != null && other.getOperatingSystemVersion().equals(this.getOperatingSystemVersion()) == false)
return false;
if (other.getPatchLevel() == null ^ this.getPatchLevel() == null)
return false;
if (other.getPatchLevel() != null && other.getPatchLevel().equals(this.getPatchLevel()) == false)
return false;
if (other.getFirstAccessedTime() == null ^ this.getFirstAccessedTime() == null)
return false;
if (other.getFirstAccessedTime() != null && other.getFirstAccessedTime().equals(this.getFirstAccessedTime()) == false)
return false;
if (other.getLastAccessedTime() == null ^ this.getLastAccessedTime() == null)
return false;
if (other.getLastAccessedTime() != null && other.getLastAccessedTime().equals(this.getLastAccessedTime()) == false)
return false;
if (other.getUsername() == null ^ this.getUsername() == null)
return false;
if (other.getUsername() != null && other.getUsername().equals(this.getUsername()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getStatus() == null) ? 0 : getStatus().hashCode());
hashCode = prime * hashCode + ((getModel() == null) ? 0 : getModel().hashCode());
hashCode = prime * hashCode + ((getManufacturer() == null) ? 0 : getManufacturer().hashCode());
hashCode = prime * hashCode + ((getOperatingSystem() == null) ? 0 : getOperatingSystem().hashCode());
hashCode = prime * hashCode + ((getOperatingSystemVersion() == null) ? 0 : getOperatingSystemVersion().hashCode());
hashCode = prime * hashCode + ((getPatchLevel() == null) ? 0 : getPatchLevel().hashCode());
hashCode = prime * hashCode + ((getFirstAccessedTime() == null) ? 0 : getFirstAccessedTime().hashCode());
hashCode = prime * hashCode + ((getLastAccessedTime() == null) ? 0 : getLastAccessedTime().hashCode());
hashCode = prime * hashCode + ((getUsername() == null) ? 0 : getUsername().hashCode());
return hashCode;
}
@Override
public DescribeDeviceResult clone() {
try {
return (DescribeDeviceResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-worklink/src/main/java/com/amazonaws/services/worklink/model/DescribeDeviceResult.java | Java | apache-2.0 | 16,586 |
// Java Genetic Algorithm Library.
// Copyright (c) 2017 Franz Wilhelmstötter
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author:
// Franz Wilhelmstötter (franz.wilhelmstoetter@gmx.at)
using System;
using System.Collections.Generic;
using Jenetics.Internal.Util;
using Jenetics.Util;
namespace Jenetics
{
[Serializable]
public class DoubleChromosome : BoundedChromosomeBase<double, DoubleGene>,
INumericChromosome<double, DoubleGene>
{
private DoubleChromosome(IImmutableSeq<DoubleGene> genes) : base(genes)
{
}
public DoubleChromosome(double min, double max, int length = 1) : this(DoubleGene.Seq(min, max, length))
{
Valid = true;
}
public override IEnumerator<DoubleGene> GetEnumerator()
{
return Genes.GetEnumerator();
}
public override IChromosome<DoubleGene> NewInstance()
{
return new DoubleChromosome(Min, Max, Length);
}
public override IChromosome<DoubleGene> NewInstance(IImmutableSeq<DoubleGene> genes)
{
return new DoubleChromosome(genes);
}
public static DoubleChromosome Of(double min, double max)
{
return new DoubleChromosome(min, max);
}
public static DoubleChromosome Of(double min, double max, int length)
{
return new DoubleChromosome(min, max, length);
}
public static DoubleChromosome Of(DoubleRange range)
{
return new DoubleChromosome(range.Min, range.Max);
}
public static DoubleChromosome Of(params DoubleGene[] genes)
{
return new DoubleChromosome(ImmutableSeq.Of(genes));
}
public override bool Equals(object obj)
{
return Equality.Of(this, obj)(base.Equals);
}
public override int GetHashCode()
{
return Hash.Of(GetType()).And(base.GetHashCode()).Value;
}
}
} | rmeindl/jenetics.net | src/core/Jenetics/DoubleChromosome.cs | C# | apache-2.0 | 2,538 |
/*
* Copyright 2012-2014 Netherlands eScience Center.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at the following location:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For the full license, see: LICENSE.txt (located in the root folder of this distribution).
* ---
*/
// source:
package nl.esciencecenter.ptk.web;
/**
* Interface for Managed HTTP Streams.
*/
public interface WebStream {
public boolean autoClose();
//public boolean isChunked();
}
| NLeSC/Platinum | ptk-web/src/main/java/nl/esciencecenter/ptk/web/WebStream.java | Java | apache-2.0 | 941 |
<?php
/**
* This file is part of the SevenShores/NetSuite library
* AND originally from the NetSuite PHP Toolkit.
*
* New content:
* @package ryanwinchester/netsuite-php
* @copyright Copyright (c) Ryan Winchester
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache-2.0
* @link https://github.com/ryanwinchester/netsuite-php
*
* Original content:
* @copyright Copyright (c) NetSuite Inc.
* @license https://raw.githubusercontent.com/ryanwinchester/netsuite-php/master/original/NetSuite%20Application%20Developer%20License%20Agreement.txt
* @link http://www.netsuite.com/portal/developers/resources/suitetalk-sample-applications.shtml
*
* generated: 2020-04-10 09:56:55 PM UTC
*/
namespace NetSuite\Classes;
class Customer extends Record {
/**
* @var \NetSuite\Classes\RecordRef
*/
public $customForm;
/**
* @var string
*/
public $entityId;
/**
* @var string
*/
public $altName;
/**
* @var boolean
*/
public $isPerson;
/**
* @var string
*/
public $phoneticName;
/**
* @var string
*/
public $salutation;
/**
* @var string
*/
public $firstName;
/**
* @var string
*/
public $middleName;
/**
* @var string
*/
public $lastName;
/**
* @var string
*/
public $companyName;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $entityStatus;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $parent;
/**
* @var string
*/
public $phone;
/**
* @var string
*/
public $fax;
/**
* @var string
*/
public $email;
/**
* @var string
*/
public $url;
/**
* @var string
*/
public $defaultAddress;
/**
* @var boolean
*/
public $isInactive;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $category;
/**
* @var string
*/
public $title;
/**
* @var string
*/
public $printOnCheckAs;
/**
* @var string
*/
public $altPhone;
/**
* @var string
*/
public $homePhone;
/**
* @var string
*/
public $mobilePhone;
/**
* @var string
*/
public $altEmail;
/**
* @var \NetSuite\Classes\Language
*/
public $language;
/**
* @var string
*/
public $comments;
/**
* @var \NetSuite\Classes\CustomerNumberFormat
*/
public $numberFormat;
/**
* @var \NetSuite\Classes\CustomerNegativeNumberFormat
*/
public $negativeNumberFormat;
/**
* @var string
*/
public $dateCreated;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $image;
/**
* @var \NetSuite\Classes\EmailPreference
*/
public $emailPreference;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $subsidiary;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $representingSubsidiary;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $salesRep;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $territory;
/**
* @var string
*/
public $contribPct;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $partner;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $salesGroup;
/**
* @var string
*/
public $vatRegNumber;
/**
* @var string
*/
public $accountNumber;
/**
* @var boolean
*/
public $taxExempt;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $terms;
/**
* @var float
*/
public $creditLimit;
/**
* @var \NetSuite\Classes\CustomerCreditHoldOverride
*/
public $creditHoldOverride;
/**
* @var \NetSuite\Classes\CustomerMonthlyClosing
*/
public $monthlyClosing;
/**
* @var boolean
*/
public $overrideCurrencyFormat;
/**
* @var string
*/
public $displaySymbol;
/**
* @var \NetSuite\Classes\CurrencySymbolPlacement
*/
public $symbolPlacement;
/**
* @var float
*/
public $balance;
/**
* @var float
*/
public $overdueBalance;
/**
* @var integer
*/
public $daysOverdue;
/**
* @var float
*/
public $unbilledOrders;
/**
* @var float
*/
public $consolUnbilledOrders;
/**
* @var float
*/
public $consolOverdueBalance;
/**
* @var float
*/
public $consolDepositBalance;
/**
* @var float
*/
public $consolBalance;
/**
* @var float
*/
public $consolAging;
/**
* @var float
*/
public $consolAging1;
/**
* @var float
*/
public $consolAging2;
/**
* @var float
*/
public $consolAging3;
/**
* @var float
*/
public $consolAging4;
/**
* @var integer
*/
public $consolDaysOverdue;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $priceLevel;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $currency;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $prefCCProcessor;
/**
* @var float
*/
public $depositBalance;
/**
* @var boolean
*/
public $shipComplete;
/**
* @var boolean
*/
public $taxable;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $taxItem;
/**
* @var string
*/
public $resaleNumber;
/**
* @var float
*/
public $aging;
/**
* @var float
*/
public $aging1;
/**
* @var float
*/
public $aging2;
/**
* @var float
*/
public $aging3;
/**
* @var float
*/
public $aging4;
/**
* @var string
*/
public $startDate;
/**
* @var \NetSuite\Classes\AlcoholRecipientType
*/
public $alcoholRecipientType;
/**
* @var string
*/
public $endDate;
/**
* @var integer
*/
public $reminderDays;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $shippingItem;
/**
* @var string
*/
public $thirdPartyAcct;
/**
* @var string
*/
public $thirdPartyZipcode;
/**
* @var \NetSuite\Classes\Country
*/
public $thirdPartyCountry;
/**
* @var boolean
*/
public $giveAccess;
/**
* @var float
*/
public $estimatedBudget;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $accessRole;
/**
* @var boolean
*/
public $sendEmail;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $assignedWebSite;
/**
* @var string
*/
public $password;
/**
* @var string
*/
public $password2;
/**
* @var boolean
*/
public $requirePwdChange;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $campaignCategory;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $sourceWebSite;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $leadSource;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $receivablesAccount;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $drAccount;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $fxAccount;
/**
* @var float
*/
public $defaultOrderPriority;
/**
* @var string
*/
public $webLead;
/**
* @var string
*/
public $referrer;
/**
* @var string
*/
public $keywords;
/**
* @var string
*/
public $clickStream;
/**
* @var string
*/
public $lastPageVisited;
/**
* @var integer
*/
public $visits;
/**
* @var string
*/
public $firstVisit;
/**
* @var string
*/
public $lastVisit;
/**
* @var boolean
*/
public $billPay;
/**
* @var float
*/
public $openingBalance;
/**
* @var string
*/
public $lastModifiedDate;
/**
* @var string
*/
public $openingBalanceDate;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $openingBalanceAccount;
/**
* @var \NetSuite\Classes\CustomerStage
*/
public $stage;
/**
* @var boolean
*/
public $emailTransactions;
/**
* @var boolean
*/
public $printTransactions;
/**
* @var boolean
*/
public $faxTransactions;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $defaultTaxReg;
/**
* @var boolean
*/
public $syncPartnerTeams;
/**
* @var boolean
*/
public $isBudgetApproved;
/**
* @var \NetSuite\Classes\GlobalSubscriptionStatus
*/
public $globalSubscriptionStatus;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $salesReadiness;
/**
* @var \NetSuite\Classes\CustomerSalesTeamList
*/
public $salesTeamList;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $buyingReason;
/**
* @var \NetSuite\Classes\CustomerDownloadList
*/
public $downloadList;
/**
* @var \NetSuite\Classes\RecordRef
*/
public $buyingTimeFrame;
/**
* @var \NetSuite\Classes\CustomerAddressbookList
*/
public $addressbookList;
/**
* @var \NetSuite\Classes\SubscriptionsList
*/
public $subscriptionsList;
/**
* @var \NetSuite\Classes\ContactAccessRolesList
*/
public $contactRolesList;
/**
* @var \NetSuite\Classes\CustomerCurrencyList
*/
public $currencyList;
/**
* @var \NetSuite\Classes\CustomerCreditCardsList
*/
public $creditCardsList;
/**
* @var \NetSuite\Classes\CustomerPartnersList
*/
public $partnersList;
/**
* @var \NetSuite\Classes\CustomerGroupPricingList
*/
public $groupPricingList;
/**
* @var \NetSuite\Classes\CustomerItemPricingList
*/
public $itemPricingList;
/**
* @var \NetSuite\Classes\CustomerTaxRegistrationList
*/
public $taxRegistrationList;
/**
* @var \NetSuite\Classes\CustomFieldList
*/
public $customFieldList;
/**
* @var string
*/
public $internalId;
/**
* @var string
*/
public $externalId;
static $paramtypesmap = array(
"customForm" => "RecordRef",
"entityId" => "string",
"altName" => "string",
"isPerson" => "boolean",
"phoneticName" => "string",
"salutation" => "string",
"firstName" => "string",
"middleName" => "string",
"lastName" => "string",
"companyName" => "string",
"entityStatus" => "RecordRef",
"parent" => "RecordRef",
"phone" => "string",
"fax" => "string",
"email" => "string",
"url" => "string",
"defaultAddress" => "string",
"isInactive" => "boolean",
"category" => "RecordRef",
"title" => "string",
"printOnCheckAs" => "string",
"altPhone" => "string",
"homePhone" => "string",
"mobilePhone" => "string",
"altEmail" => "string",
"language" => "Language",
"comments" => "string",
"numberFormat" => "CustomerNumberFormat",
"negativeNumberFormat" => "CustomerNegativeNumberFormat",
"dateCreated" => "dateTime",
"image" => "RecordRef",
"emailPreference" => "EmailPreference",
"subsidiary" => "RecordRef",
"representingSubsidiary" => "RecordRef",
"salesRep" => "RecordRef",
"territory" => "RecordRef",
"contribPct" => "string",
"partner" => "RecordRef",
"salesGroup" => "RecordRef",
"vatRegNumber" => "string",
"accountNumber" => "string",
"taxExempt" => "boolean",
"terms" => "RecordRef",
"creditLimit" => "float",
"creditHoldOverride" => "CustomerCreditHoldOverride",
"monthlyClosing" => "CustomerMonthlyClosing",
"overrideCurrencyFormat" => "boolean",
"displaySymbol" => "string",
"symbolPlacement" => "CurrencySymbolPlacement",
"balance" => "float",
"overdueBalance" => "float",
"daysOverdue" => "integer",
"unbilledOrders" => "float",
"consolUnbilledOrders" => "float",
"consolOverdueBalance" => "float",
"consolDepositBalance" => "float",
"consolBalance" => "float",
"consolAging" => "float",
"consolAging1" => "float",
"consolAging2" => "float",
"consolAging3" => "float",
"consolAging4" => "float",
"consolDaysOverdue" => "integer",
"priceLevel" => "RecordRef",
"currency" => "RecordRef",
"prefCCProcessor" => "RecordRef",
"depositBalance" => "float",
"shipComplete" => "boolean",
"taxable" => "boolean",
"taxItem" => "RecordRef",
"resaleNumber" => "string",
"aging" => "float",
"aging1" => "float",
"aging2" => "float",
"aging3" => "float",
"aging4" => "float",
"startDate" => "dateTime",
"alcoholRecipientType" => "AlcoholRecipientType",
"endDate" => "dateTime",
"reminderDays" => "integer",
"shippingItem" => "RecordRef",
"thirdPartyAcct" => "string",
"thirdPartyZipcode" => "string",
"thirdPartyCountry" => "Country",
"giveAccess" => "boolean",
"estimatedBudget" => "float",
"accessRole" => "RecordRef",
"sendEmail" => "boolean",
"assignedWebSite" => "RecordRef",
"password" => "string",
"password2" => "string",
"requirePwdChange" => "boolean",
"campaignCategory" => "RecordRef",
"sourceWebSite" => "RecordRef",
"leadSource" => "RecordRef",
"receivablesAccount" => "RecordRef",
"drAccount" => "RecordRef",
"fxAccount" => "RecordRef",
"defaultOrderPriority" => "float",
"webLead" => "string",
"referrer" => "string",
"keywords" => "string",
"clickStream" => "string",
"lastPageVisited" => "string",
"visits" => "integer",
"firstVisit" => "dateTime",
"lastVisit" => "dateTime",
"billPay" => "boolean",
"openingBalance" => "float",
"lastModifiedDate" => "dateTime",
"openingBalanceDate" => "dateTime",
"openingBalanceAccount" => "RecordRef",
"stage" => "CustomerStage",
"emailTransactions" => "boolean",
"printTransactions" => "boolean",
"faxTransactions" => "boolean",
"defaultTaxReg" => "RecordRef",
"syncPartnerTeams" => "boolean",
"isBudgetApproved" => "boolean",
"globalSubscriptionStatus" => "GlobalSubscriptionStatus",
"salesReadiness" => "RecordRef",
"salesTeamList" => "CustomerSalesTeamList",
"buyingReason" => "RecordRef",
"downloadList" => "CustomerDownloadList",
"buyingTimeFrame" => "RecordRef",
"addressbookList" => "CustomerAddressbookList",
"subscriptionsList" => "SubscriptionsList",
"contactRolesList" => "ContactAccessRolesList",
"currencyList" => "CustomerCurrencyList",
"creditCardsList" => "CustomerCreditCardsList",
"partnersList" => "CustomerPartnersList",
"groupPricingList" => "CustomerGroupPricingList",
"itemPricingList" => "CustomerItemPricingList",
"taxRegistrationList" => "CustomerTaxRegistrationList",
"customFieldList" => "CustomFieldList",
"internalId" => "string",
"externalId" => "string",
);
}
| RyanWinchester/netsuite-php | src/Classes/Customer.php | PHP | apache-2.0 | 15,854 |
package lm.com.framework.encrypt;
import java.io.IOException;
import java.security.SecureRandom;
import javax.crypto.Cipher;
import javax.crypto.SecretKey;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.DESKeySpec;
import sun.misc.BASE64Decoder;
import sun.misc.BASE64Encoder;
public class DESEncrypt {
private final static String DES = "DES";
/**
* des加密
*
* @param encryptString
* @param key
* @return
* @throws Exception
*/
public static String encode(String encryptString, String key) throws Exception {
byte[] bt = encrypt(encryptString.getBytes(), key.getBytes());
String strs = new BASE64Encoder().encode(bt);
return strs;
}
/**
* des解密
*
* @param decryptString
* @param key
* @return
* @throws IOException
* @throws Exception
*/
public static String decode(String decryptString, String key) throws IOException, Exception {
if (decryptString == null || decryptString.trim().isEmpty())
return "";
BASE64Decoder decoder = new BASE64Decoder();
byte[] buf = decoder.decodeBuffer(decryptString);
byte[] bt = decrypt(buf, key.getBytes());
return new String(bt);
}
/**
* 根据键值进行加密
*/
private static byte[] encrypt(byte[] data, byte[] key) throws Exception {
Cipher cipher = cipherInit(data, key, Cipher.ENCRYPT_MODE);
return cipher.doFinal(data);
}
/**
* 根据键值进行解密
*/
private static byte[] decrypt(byte[] data, byte[] key) throws Exception {
Cipher cipher = cipherInit(data, key, Cipher.DECRYPT_MODE);
return cipher.doFinal(data);
}
private static Cipher cipherInit(byte[] data, byte[] key, int cipherValue) throws Exception {
/** 生成一个可信任的随机数源 **/
SecureRandom sr = new SecureRandom();
/** 从原始密钥数据创建DESKeySpec对象 **/
DESKeySpec dks = new DESKeySpec(key);
/** 创建一个密钥工厂,然后用它把DESKeySpec转换成SecretKey对象 **/
SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(DES);
SecretKey securekey = keyFactory.generateSecret(dks);
/** Cipher对象实际完成加密或解密操作 **/
Cipher cipher = Cipher.getInstance(DES);
/** 用密钥初始化Cipher对象 **/
cipher.init(cipherValue, securekey, sr);
return cipher;
}
}
| mrluo735/lm.cloudplat | common/lm.com.framework/src/main/java/lm/com/framework/encrypt/DESEncrypt.java | Java | apache-2.0 | 2,267 |
package com.desple.view;
import javax.imageio.ImageIO;
import javax.swing.*;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
public class PreviewImageCanvas extends JPanel {
private BufferedImage image;
public PreviewImageCanvas() {
image = null;
}
public Dimension getPreferredSize() {
return new Dimension(512, 512);
}
public void paintComponent(Graphics g) {
super.paintComponent(g);
if (this.image != null) {
g.drawImage(this.image, 0, 0, 512, 512, this);
}
}
public void loadImage(String imageLocation) throws IOException {
this.image = ImageIO.read(new File(imageLocation));
repaint();
}
public BufferedImage getImage() {
return this.image;
}
public void setImage(BufferedImage image) {
this.image = image;
repaint();
}
}
| thebillkidy/RandomProjects | FaceRecognition/Java/src/main/java/com/desple/view/PreviewImageCanvas.java | Java | apache-2.0 | 938 |
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rafthttp
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"path"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
pioutil "github.com/coreos/etcd/pkg/ioutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/snap"
"github.com/coreos/etcd/version"
)
const (
// connReadLimitByte limits the number of bytes
// a single read can read out.
//
// 64KB should be large enough for not causing
// throughput bottleneck as well as small enough
// for not causing a read timeout.
connReadLimitByte = 64 * 1024
)
var (
RaftPrefix = "/raft"
ProbingPrefix = path.Join(RaftPrefix, "probing")
RaftStreamPrefix = path.Join(RaftPrefix, "stream")
RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot")
errIncompatibleVersion = errors.New("incompatible version")
errClusterIDMismatch = errors.New("cluster ID mismatch")
)
type peerGetter interface {
Get(id types.ID) Peer
}
type writerToResponse interface {
WriteTo(w http.ResponseWriter)
}
type pipelineHandler struct {
r Raft
cid types.ID
}
// newPipelineHandler returns a handler for handling raft messages
// from pipeline for RaftPrefix.
//
// The handler reads out the raft message from request body,
// and forwards it to the given raft state machine for processing.
func newPipelineHandler(r Raft, cid types.ID) http.Handler {
return &pipelineHandler{
r: r,
cid: cid,
}
}
func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Header().Set("Allow", "POST")
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
http.Error(w, err.Error(), http.StatusPreconditionFailed)
return
}
// Limit the data size that could be read from the request body, which ensures that read from
// connection will not time out accidentally due to possible blocking in underlying implementation.
limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte)
b, err := ioutil.ReadAll(limitedr)
if err != nil {
plog.Errorf("failed to read raft message (%v)", err)
http.Error(w, "error reading raft message", http.StatusBadRequest)
return
}
var m raftpb.Message
if err := m.Unmarshal(b); err != nil {
plog.Errorf("failed to unmarshal raft message (%v)", err)
http.Error(w, "error unmarshaling raft message", http.StatusBadRequest)
return
}
if err := h.r.Process(context.TODO(), m); err != nil {
switch v := err.(type) {
case writerToResponse:
v.WriteTo(w)
default:
plog.Warningf("failed to process raft message (%v)", err)
http.Error(w, "error processing raft message", http.StatusInternalServerError)
}
return
}
// Write StatusNoContet header after the message has been processed by
// raft, which facilitates the client to report MsgSnap status.
w.WriteHeader(http.StatusNoContent)
}
type snapshotHandler struct {
r Raft
snapshotter *snap.Snapshotter
cid types.ID
}
func newSnapshotHandler(r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler {
return &snapshotHandler{
r: r,
snapshotter: snapshotter,
cid: cid,
}
}
// ServeHTTP serves HTTP request to receive and process snapshot message.
//
// If request sender dies without closing underlying TCP connection,
// the handler will keep waiting for the request body until TCP keepalive
// finds out that the connection is broken after several minutes.
// This is acceptable because
// 1. snapshot messages sent through other TCP connections could still be
// received and processed.
// 2. this case should happen rarely, so no further optimization is done.
func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Header().Set("Allow", "POST")
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
http.Error(w, err.Error(), http.StatusPreconditionFailed)
return
}
dec := &messageDecoder{r: r.Body}
m, err := dec.decode()
if err != nil {
msg := fmt.Sprintf("failed to decode raft message (%v)", err)
plog.Errorf(msg)
http.Error(w, msg, http.StatusBadRequest)
return
}
if m.Type != raftpb.MsgSnap {
plog.Errorf("unexpected raft message type %s on snapshot path", m.Type)
http.Error(w, "wrong raft message type", http.StatusBadRequest)
return
}
// save incoming database snapshot.
if err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index); err != nil {
msg := fmt.Sprintf("failed to save KV snapshot (%v)", err)
plog.Error(msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From))
if err := h.r.Process(context.TODO(), m); err != nil {
switch v := err.(type) {
// Process may return writerToResponse error when doing some
// additional checks before calling raft.Node.Step.
case writerToResponse:
v.WriteTo(w)
default:
msg := fmt.Sprintf("failed to process raft message (%v)", err)
plog.Warningf(msg)
http.Error(w, msg, http.StatusInternalServerError)
}
return
}
// Write StatusNoContet header after the message has been processed by
// raft, which facilitates the client to report MsgSnap status.
w.WriteHeader(http.StatusNoContent)
}
type streamHandler struct {
peerGetter peerGetter
r Raft
id types.ID
cid types.ID
}
func newStreamHandler(peerGetter peerGetter, r Raft, id, cid types.ID) http.Handler {
return &streamHandler{
peerGetter: peerGetter,
r: r,
id: id,
cid: cid,
}
}
func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
w.Header().Set("Allow", "GET")
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("X-Server-Version", version.Version)
w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
http.Error(w, err.Error(), http.StatusPreconditionFailed)
return
}
var t streamType
switch path.Dir(r.URL.Path) {
case streamTypeMsgAppV2.endpoint():
t = streamTypeMsgAppV2
case streamTypeMessage.endpoint():
t = streamTypeMessage
default:
plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path)
http.Error(w, "invalid path", http.StatusNotFound)
return
}
fromStr := path.Base(r.URL.Path)
from, err := types.IDFromString(fromStr)
if err != nil {
plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err)
http.Error(w, "invalid from", http.StatusNotFound)
return
}
if h.r.IsIDRemoved(uint64(from)) {
plog.Warningf("rejected the stream from peer %s since it was removed", from)
http.Error(w, "removed member", http.StatusGone)
return
}
p := h.peerGetter.Get(from)
if p == nil {
// This may happen in following cases:
// 1. user starts a remote peer that belongs to a different cluster
// with the same cluster ID.
// 2. local etcd falls behind of the cluster, and cannot recognize
// the members that joined after its current progress.
plog.Errorf("failed to find member %s in cluster %s", from, h.cid)
http.Error(w, "error sender not found", http.StatusNotFound)
return
}
wto := h.id.String()
if gto := r.Header.Get("X-Raft-To"); gto != wto {
plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto)
http.Error(w, "to field mismatch", http.StatusPreconditionFailed)
return
}
w.WriteHeader(http.StatusOK)
w.(http.Flusher).Flush()
c := newCloseNotifier()
conn := &outgoingConn{
t: t,
Writer: w,
Flusher: w.(http.Flusher),
Closer: c,
}
p.attachOutgoingConn(conn)
<-c.closeNotify()
}
// checkClusterCompatibilityFromHeader checks the cluster compatibility of
// the local member from the given header.
// It checks whether the version of local member is compatible with
// the versions in the header, and whether the cluster ID of local member
// matches the one in the header.
func checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error {
if err := checkVersionCompability(header.Get("X-Server-From"), serverVersion(header), minClusterVersion(header)); err != nil {
plog.Errorf("request version incompatibility (%v)", err)
return errIncompatibleVersion
}
if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() {
plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid)
return errClusterIDMismatch
}
return nil
}
type closeNotifier struct {
done chan struct{}
}
func newCloseNotifier() *closeNotifier {
return &closeNotifier{
done: make(chan struct{}),
}
}
func (n *closeNotifier) Close() error {
close(n.done)
return nil
}
func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done }
| fasaxc/etcd | rafthttp/http.go | GO | apache-2.0 | 9,699 |
<?php
class _Upload
{
private static $files = array();
/**
* Takes a $_FILES array and standardizes it to be the same regardless of number of uploads
*
* @param array $files Files array to standardize
* @return void
*/
public static function standardizeFileUploads($files=array())
{
if (!count($files)) {
return $files;
}
// loop through files to standardize
foreach ($files as $field => $data) {
if (!isset(self::$files[$field]) || !is_array(self::$files[$field])) {
self::$files[$field] = array();
}
$data = array(
'name' => $data['name'],
'type' => $data['type'],
'tmp_name' => $data['tmp_name'],
'size' => $data['size'],
'error' => $data['error']
);
// loop through _FILES to standardize
foreach ($data as $key => $value) {
self::buildFileArray($key, $value, self::$files[$field], $field);
}
}
// return our cleaner version
return self::$files;
}
/**
* Recursively builds an array of files
*
* @param string $key Upload key that we're processing
* @param mixed $value Either a string or an array of the value
* @param array $output The referenced array object for manipulation
* @param string $path A string for colon-delimited path searching
* @return void
*/
private static function buildFileArray($key, $value, &$output, $path)
{
if (is_array($value)) {
foreach ($value as $sub_key => $sub_value) {
if (!isset($output[$sub_key]) || !is_array($output[$sub_key])) {
$output[$sub_key] = array();
}
$new_path = (empty($path)) ? $sub_key : $path . ':' . $sub_key;
self::buildFileArray($key, $sub_value, $output[$sub_key], $new_path);
}
} else {
$output[$key] = $value;
// add error message
if ($key === 'error') {
$error_message = self::getFriendlyErrorMessage($value);
$success_status = ($value === UPLOAD_ERR_OK);
$output['error_message'] = $error_message;
$output['success'] = $success_status;
} elseif ($key === 'size') {
$human_readable_size = File::getHumanSize($value);
$output['size_human_readable'] = $human_readable_size;
}
}
}
/**
* Create friendly error messages for upload issues
*
* @param int $error Error int
* @return string
*/
private static function getFriendlyErrorMessage($error)
{
// these errors are PHP-based
if ($error === UPLOAD_ERR_OK) {
return '';
} elseif ($error === UPLOAD_ERR_INI_SIZE) {
return Localization::fetch('upload_error_ini_size');
} elseif ($error === UPLOAD_ERR_FORM_SIZE) {
return Localization::fetch('upload_error_form_size');
} elseif ($error === UPLOAD_ERR_PARTIAL) {
return Localization::fetch('upload_error_err_partial');
} elseif ($error === UPLOAD_ERR_NO_FILE) {
return Localization::fetch('upload_error_no_file');
} elseif ($error === UPLOAD_ERR_NO_TMP_DIR) {
return Localization::fetch('upload_error_no_temp_dir');
} elseif ($error === UPLOAD_ERR_CANT_WRITE) {
return Localization::fetch('upload_error_cant_write');
} elseif ($error === UPLOAD_ERR_EXTENSION) {
return Localization::fetch('upload_error_extension');
} else {
// we should never, ever see this
return Localization::fetch('upload_error_unknown');
}
}
/**
* Upload file(s)
*
* @param string $destination Where the file is going
* @param string $id The field took look at in the files array
* @return array
*/
public static function uploadBatch($destination = null, $id = null)
{
$destination = $destination ?: Request::get('destination');
$id = $id ?: Request::get('id');
$files = self::standardizeFileUploads($_FILES);
$results = array();
// Resizing configuration
if ($resize = Request::get('resize')) {
$width = Request::get('width', null);
$height = Request::get('height', null);
$ratio = Request::get('ratio', true);
$upsize = Request::get('upsize', false);
$quality = Request::get('quality', '75');
}
// If $files[$id][0] exists, it means there's an array of images.
// If there's not, there's just one. We want to change this to an array.
if ( ! isset($files[$id][0])) {
$tmp = $files[$id];
unset($files[$id]);
$files[$id][] = $tmp;
}
// Process each image
foreach ($files[$id] as $file) {
// Image data
$path = File::upload($file, $destination);
$name = basename($path);
// Resize
if ($resize) {
$image = \Intervention\Image\Image::make(Path::assemble(BASE_PATH, $path));
$resize_folder = Path::assemble($image->dirname, 'resized');
if ( ! Folder::exists($resize_folder)) {
Folder::make($resize_folder);
}
$resize_path = Path::assemble($resize_folder, $image->basename);
$path = Path::toAsset($resize_path);
$name = basename($path);
$image->resize($width, $height, $ratio, $upsize)->save($resize_path, $quality);
}
$results[] = compact('path', 'name');
}
return $results;
}
} | kwanpt/blog | _app/core/private_api/_upload.php | PHP | apache-2.0 | 6,075 |
package example.multiview;
import io.db.Connect;
import io.db.ConnectFactory;
import io.db.FormatResultSet;
import io.json.JSONStructureMaker;
import io.parcoord.db.MakeTableModel;
import java.awt.BasicStroke;
import java.awt.BorderLayout;
import java.awt.Color;
import java.awt.Dimension;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.File;
import java.io.IOException;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Map.Entry;
import javax.swing.BorderFactory;
import javax.swing.Box;
import javax.swing.BoxLayout;
import javax.swing.JButton;
import javax.swing.JCheckBox;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JSlider;
import javax.swing.JSplitPane;
import javax.swing.JTabbedPane;
import javax.swing.JTable;
import javax.swing.ListSelectionModel;
import javax.swing.SwingUtilities;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import javax.swing.plaf.metal.MetalLookAndFeel;
import javax.swing.table.DefaultTableModel;
import javax.swing.table.TableModel;
import javax.swing.table.TableRowSorter;
import model.graph.Edge;
import model.graph.EdgeSetValueMaker;
import model.graph.GraphFilter;
import model.graph.GraphModel;
import model.graph.impl.SymmetricGraphInstance;
import model.matrix.DefaultMatrixTableModel;
import model.matrix.MatrixTableModel;
import model.shared.selection.LinkedGraphMatrixSelectionModelBridge;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.node.MissingNode;
import org.codehaus.jackson.node.ObjectNode;
import swingPlus.graph.GraphCellRenderer;
import swingPlus.graph.JGraph;
import swingPlus.graph.force.impl.BarnesHut2DForceCalculator;
import swingPlus.graph.force.impl.EdgeWeightedAttractor;
import swingPlus.matrix.JHeaderRenderer;
import swingPlus.matrix.JMatrix;
import swingPlus.parcoord.JColumnList;
import swingPlus.parcoord.JColumnList2;
import swingPlus.parcoord.JParCoord;
import swingPlus.shared.MyFrame;
import swingPlus.tablelist.ColumnSortControl;
import swingPlus.tablelist.JEditableVarColTable;
import ui.StackedRowTableUI;
import util.Messages;
import util.colour.ColorUtilities;
import util.ui.NewMetalTheme;
import util.ui.VerticalLabelUI;
import example.graph.renderers.node.NodeDegreeGraphCellRenderer;
import example.multiview.renderers.edge.EdgeCountFatEdgeRenderer;
import example.multiview.renderers.matrix.JSONObjHeaderRenderer;
import example.multiview.renderers.matrix.KeyedDataHeaderRenderer;
import example.multiview.renderers.matrix.NumberShadeRenderer;
import example.multiview.renderers.node.JSONNodeTypeGraphRenderer;
import example.multiview.renderers.node.JSONTooltipGraphCellRenderer;
import example.multiview.renderers.node.KeyedDataGraphCellRenderer;
import example.multiview.renderers.node.TableTooltipGraphCellRenderer;
import example.multiview.renderers.node.valuemakers.NodeTotalEdgeWeightValueMaker;
import example.tablelist.renderers.ColourBarCellRenderer;
public class NapierDBVis {
static final Logger LOGGER = Logger.getLogger (NapierDBVis.class);
/**
* @param args
*/
public static void main (final String[] args) {
//final MetalLookAndFeel lf = new MetalLookAndFeel();
MetalLookAndFeel.setCurrentTheme (new NewMetalTheme());
PropertyConfigurator.configure (Messages.makeProperties ("log4j"));
new NapierDBVis ();
}
public NapierDBVis () {
TableModel tableModel = null;
GraphModel graph = null;
TableModel listTableModel = null;
MatrixTableModel matrixModel = null;
Map<JsonNode, String> nodeTypeMap = null;
final Properties connectionProperties = Messages.makeProperties ("dbconnect", this.getClass(), false);
final Properties queryProperties = Messages.makeProperties ("queries", this.getClass(), false);
final Connect connect = ConnectFactory.getConnect (connectionProperties);
//ResultSet resultSet = null;
Statement stmt;
try {
stmt = connect.getConnection().createStatement();
//final ResultSet resultSet = stmt.executeQuery ("Select * from people where peopleid>0;");
final String peopleDataQuery = queryProperties.get ("PeopleData").toString();
System.err.println (peopleDataQuery);
final ResultSet peopleDataResultSet = stmt.executeQuery (peopleDataQuery);
final MakeTableModel mtm2 = new MakeTableModel();
tableModel = mtm2.makeTable (peopleDataResultSet);
//final ResultSet resultSet = stmt.executeQuery ("Select * from people where peopleid>0;");
final String pubJoinQuery = queryProperties.get ("PublicationJoin").toString();
System.err.println (pubJoinQuery);
final ResultSet pubJoinResultSet = stmt.executeQuery (pubJoinQuery);
//FormatResultSet.getInstance().printResultSet (resultSet);
final MakeTableModel mtm = new MakeTableModel();
TableModel tableModel2 = mtm.makeTable (pubJoinResultSet);
//final DatabaseMetaData dmd = connect.getConnection().getMetaData();
//final ResultSet resultSet2 = dmd.getProcedures (connect.getConnection().getCatalog(), null, "%");
//FormatResultSet.getInstance().printResultSet (resultSet2);
final String pubsByYearQuery = queryProperties.get ("PubsByYear").toString();
System.err.println (pubsByYearQuery);
final ResultSet pubsByYearResultSet = stmt.executeQuery (pubsByYearQuery);
final MakeTableModel mtm3 = new MakeTableModel();
TableModel tableModel3 = mtm3.makeTable (pubsByYearResultSet);
listTableModel = makePubByYearTable (tableModel3);
Map<Object, KeyedData> keyDataMap = makeKeyedDataMap (tableModel, 0, 1);
graph = makeGraph (keyDataMap, "peopleid", tableModel2);
matrixModel = new DefaultMatrixTableModel (graph);
} catch (SQLException e) {
// TODO Auto-generated catch block
e.printStackTrace();
connect.close();
}
connect.close();
System.err.println (tableModel == null ? "no model" : "tableModel rows: "+tableModel.getRowCount()+", cols: "+tableModel.getColumnCount());
/*
try {
final ObjectMapper objMapper = new ObjectMapper ();
final JsonNode rootNode = objMapper.readValue (new File (fileName), JsonNode.class);
LOGGER.info ("rootnode: "+rootNode);
final JSONStructureMaker structureMaker = new JSONStructureMaker (rootNode);
graph = structureMaker.makeGraph (new String[] {"people"}, new String[] {"publications", "grants"});
//graph = structureMaker.makeGraph (new String[] {"grants"}, new String[] {"publications", "people"});
//graph = structureMaker.makeGraph (new String[] {"publications", "people", "grants"}, new String[] {"people"});
//tableModel = structureMaker.makeTable ("publications");
tableModel = structureMaker.makeTable ("people");
matrixModel = new DefaultMatrixTableModel (graph);
nodeTypeMap = structureMaker.makeNodeTypeMap (new String[] {"publications", "people", "grants"});
} catch (JsonParseException e) {
e.printStackTrace();
} catch (JsonMappingException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
*/
Map<Object, Integer> keyRowMap = makeKeyRowMap (tableModel, 0);
final JGraph jgraph = new JGraph (graph);
final EdgeWeightedAttractor edgeWeighter = new EdgeWeightedAttractor ();
jgraph.setAttractiveForceCalculator (edgeWeighter);
jgraph.setShowEdges (true);
final EdgeSetValueMaker weightedEdgeMaker = new NodeTotalEdgeWeightValueMaker ();
//final GraphCellRenderer tableTupleRenderer = new TableTupleGraphRenderer (tableModel, keyRowMap);
final GraphCellRenderer jsonGraphRenderer = new JSONNodeTypeGraphRenderer (nodeTypeMap);
jgraph.setDefaultNodeRenderer (String.class, new NodeDegreeGraphCellRenderer (10.0));
jgraph.setDefaultNodeRenderer (JsonNode.class, jsonGraphRenderer);
jgraph.setDefaultNodeRenderer (ObjectNode.class, jsonGraphRenderer);
jgraph.setDefaultNodeRenderer (KeyedData.class, new KeyedDataGraphCellRenderer (weightedEdgeMaker));
jgraph.setDefaultEdgeRenderer (Integer.class, new EdgeCountFatEdgeRenderer ());
jgraph.setDefaultNodeToolTipRenderer (KeyedData.class, new TableTooltipGraphCellRenderer ());
final JTable pubTable = new JEditableVarColTable (listTableModel);
//final JTable jtable3 = new JTable (dtm);
pubTable.setSelectionMode (ListSelectionModel.MULTIPLE_INTERVAL_SELECTION);
pubTable.setRowSelectionAllowed (true);
//jt2.setColumnSelectionAllowed (true);
pubTable.setRowSorter (new TableRowSorter<DefaultTableModel> ((DefaultTableModel)listTableModel));
final StackedRowTableUI tlui = new StackedRowTableUI ();
pubTable.setUI (tlui);
tlui.setRelativeLayout (true);
final Color[] columnColours = new Color [pubTable.getColumnCount() - 1];
for (int n = 0; n < columnColours.length; n++) {
double perc = (double)n / columnColours.length;
columnColours[n] = ColorUtilities.mixColours (Color.orange, new Color (0, 128, 255), (float)perc);
}
pubTable.getTableHeader().setReorderingAllowed(true);
pubTable.getTableHeader().setResizingAllowed(false);
System.err.println ("ptc: "+pubTable.getColumnModel().getColumnCount());
for (int col = 1; col < pubTable.getColumnCount(); col++) {
System.err.println ("col: "+col+", ptyc: "+pubTable.getColumnModel().getColumn(col));
pubTable.getColumnModel().getColumn(col).setCellRenderer (new ColourBarCellRenderer (columnColours [(col - 1) % columnColours.length]));
}
final JColumnList jcl = new JColumnList (pubTable) {
@Override
public boolean isCellEditable (final int row, final int column) {
return super.isCellEditable (row, column) && row > 0;
}
};
//jcl.addTable (pubTable);
final JMatrix jmatrix = new JMatrix ((TableModel) matrixModel);
//final JHeaderRenderer stringHeader = new JSONObjHeaderRenderer ();
//final JHeaderRenderer stringHeader2 = new JSONObjHeaderRenderer ();
final JHeaderRenderer stringHeader = new KeyedDataHeaderRenderer ();
final JHeaderRenderer stringHeader2 = new KeyedDataHeaderRenderer ();
jmatrix.getRowHeader().setDefaultRenderer (Object.class, stringHeader);
jmatrix.getRowHeader().setDefaultRenderer (String.class, stringHeader);
jmatrix.getColumnHeader().setDefaultRenderer (Object.class, stringHeader2);
jmatrix.getColumnHeader().setDefaultRenderer (String.class, stringHeader2);
((JLabel)stringHeader2).setUI (new VerticalLabelUI (false));
stringHeader.setSelectionBackground (jmatrix.getRowHeader());
stringHeader2.setSelectionBackground (jmatrix.getColumnHeader());
//jmatrix.setDefaultRenderer (HashSet.class, stringHeader);
jmatrix.setDefaultRenderer (String.class, stringHeader);
jmatrix.setDefaultRenderer (Integer.class, new NumberShadeRenderer ());
final JTable table = new JParCoord (tableModel);
table.setSelectionMode (ListSelectionModel.MULTIPLE_INTERVAL_SELECTION);
table.setRowSelectionAllowed (true);
table.setAutoCreateRowSorter (true);
table.setColumnSelectionAllowed (true);
table.setForeground (Color.lightGray);
table.setSelectionForeground (Color.orange);
if (table instanceof JParCoord) {
((JParCoord)table).setBrushForegroundColour (Color.gray);
((JParCoord)table).setBrushSelectionColour (Color.red);
((JParCoord)table).setSelectedStroke (new BasicStroke (2.0f));
//((JParCoord)table).setBrushing (true);
}
table.setGridColor (Color.gray);
table.setShowVerticalLines (false);
table.setBorder (BorderFactory.createEmptyBorder (24, 2, 24, 2));
if (table.getRowSorter() instanceof TableRowSorter) {
final TableRowSorter<? extends TableModel> trs = (TableRowSorter<? extends TableModel>)table.getRowSorter();
}
table.setAutoResizeMode (JTable.AUTO_RESIZE_OFF);
/*
jgraph.setPreferredSize (new Dimension (768, 640));
table.setPreferredSize (new Dimension (768, 384));
table.setMinimumSize (new Dimension (256, 128));
final LinkedGraphMatrixSelectionModelBridge selectionBridge = new LinkedGraphMatrixSelectionModelBridge ();
selectionBridge.addJGraph (jgraph);
selectionBridge.addJTable (table);
selectionBridge.addJTable (jmatrix);
*/
SwingUtilities.invokeLater (
new Runnable () {
@Override
public void run() {
final JFrame jf2 = new MyFrame ("JGraph Demo");
jf2.setSize (1024, 768);
final JPanel optionPanel = new JPanel ();
optionPanel.setLayout (new BoxLayout (optionPanel, BoxLayout.Y_AXIS));
final JSlider llengthSlider = new JSlider (20, 1000, (int)edgeWeighter.getLinkLength());
llengthSlider.addChangeListener(
new ChangeListener () {
@Override
public void stateChanged (final ChangeEvent cEvent) {
edgeWeighter.setLinkLength (llengthSlider.getValue());
}
}
);
final JSlider lstiffSlider = new JSlider (20, 1000, edgeWeighter.getStiffness());
lstiffSlider.addChangeListener(
new ChangeListener () {
@Override
public void stateChanged (final ChangeEvent cEvent) {
edgeWeighter.setStiffness (lstiffSlider.getValue());
}
}
);
final JSlider repulseSlider = new JSlider (1, 50, 10);
repulseSlider.addChangeListener(
new ChangeListener () {
@Override
public void stateChanged (final ChangeEvent cEvent) {
((BarnesHut2DForceCalculator)jgraph.getRepulsiveForceCalculator()).setAttenuator (3.0 / repulseSlider.getValue());
}
}
);
final JCheckBox showSingletons = new JCheckBox ("Show singletons", true);
showSingletons.addActionListener (
new ActionListener () {
@Override
public void actionPerformed (final ActionEvent e) {
final Object source = e.getSource();
if (source instanceof JCheckBox) {
final boolean selected = ((JCheckBox)source).isSelected();
final GraphFilter singletonFilter = new GraphFilter () {
@Override
public boolean includeNode (final Object obj) {
return jgraph.getModel().getEdges(obj).size() > 0 || selected;
}
@Override
public boolean includeEdge (final Edge edge) {
return true;
}
};
jgraph.setGraphFilter (singletonFilter);
}
}
}
);
final JButton clearSelections = new JButton ("Clear Selections");
clearSelections.addActionListener (
new ActionListener () {
@Override
public void actionPerformed (ActionEvent e) {
jgraph.getSelectionModel().clearSelection ();
}
}
);
final JButton graphFreezer = new JButton ("Freeze Graph");
graphFreezer.addActionListener (
new ActionListener () {
@Override
public void actionPerformed (ActionEvent e) {
jgraph.pauseWorker();
}
}
);
optionPanel.add (new JLabel ("Link Length:"));
optionPanel.add (llengthSlider);
optionPanel.add (new JLabel ("Link Stiffness:"));
optionPanel.add (lstiffSlider);
optionPanel.add (new JLabel ("Repulse Strength:"));
optionPanel.add (repulseSlider);
optionPanel.add (showSingletons);
optionPanel.add (clearSelections);
optionPanel.add (graphFreezer);
JPanel listTablePanel = new JPanel (new BorderLayout ());
listTablePanel.add (new JScrollPane (pubTable), BorderLayout.CENTER);
final Box pubControlPanel = Box.createVerticalBox();
final JScrollPane pubTableScrollPane = new JScrollPane (pubControlPanel);
pubTableScrollPane.setPreferredSize (new Dimension (168, 400));
jcl.getColumnModel().getColumn(1).setWidth (30);
listTablePanel.add (pubTableScrollPane, BorderLayout.WEST);
JTable columnSorter = new ColumnSortControl (pubTable);
pubControlPanel.add (jcl.getTableHeader());
pubControlPanel.add (jcl);
pubControlPanel.add (columnSorter.getTableHeader());
pubControlPanel.add (columnSorter);
JScrollPane parCoordsScrollPane = new JScrollPane (table);
JScrollPane matrixScrollPane = new JScrollPane (jmatrix);
JTabbedPane jtp = new JTabbedPane ();
JPanel graphPanel = new JPanel (new BorderLayout ());
graphPanel.add (jgraph, BorderLayout.CENTER);
graphPanel.add (optionPanel, BorderLayout.WEST);
jtp.addTab ("Node-Link", graphPanel);
jtp.addTab ("Matrix", matrixScrollPane);
jtp.addTab ("Pubs", listTablePanel);
jtp.addTab ("||-Coords", parCoordsScrollPane);
jtp.setPreferredSize(new Dimension (800, 480));
//jf2.getContentPane().add (optionPanel, BorderLayout.EAST);
jf2.getContentPane().add (jtp, BorderLayout.CENTER);
//jf2.getContentPane().add (tableScrollPane, BorderLayout.SOUTH);
jf2.setVisible (true);
}
}
);
}
public GraphModel makeGraph (final ResultSet nodeSet, final ResultSet edgeSet) throws SQLException {
edgeSet.beforeFirst();
final GraphModel graph = new SymmetricGraphInstance ();
// Look through the rootnode for fields named 'nodeType'
// Add that nodeTypes' subfields as nodes to a graph
while (edgeSet.next()) {
Object author1 = edgeSet.getObject(1);
Object author2 = edgeSet.getObject(2);
graph.addNode (author1);
graph.addNode (author2);
final Set<Edge> edges = graph.getEdges (author1, author2);
if (edges.isEmpty()) {
graph.addEdge (author1, author2, Integer.valueOf (1));
} else {
final Iterator<Edge> edgeIter = edges.iterator();
final Edge firstEdge = edgeIter.next();
final Integer val = (Integer)firstEdge.getEdgeObject();
firstEdge.setEdgeObject (Integer.valueOf (val.intValue() + 1));
//graph.removeEdge (firstEdge);
//graph.addEdge (node1, node2, Integer.valueOf (val.intValue() + 1));
}
}
return graph;
}
public GraphModel makeGraph (final TableModel nodes, final String primaryKeyColumn, final TableModel edges) throws SQLException {
final GraphModel graph = new SymmetricGraphInstance ();
final Map<Object, Integer> primaryKeyRowMap = new HashMap<Object, Integer> ();
for (int row = 0; row < nodes.getRowCount(); row++) {
primaryKeyRowMap.put (nodes.getValueAt (row, 0), Integer.valueOf (row));
}
// Look through the rootnode for fields named 'nodeType'
// Add that nodeTypes' subfields as nodes to a graph
for (int row = 0; row < edges.getRowCount(); row++) {
Object authorKey1 = edges.getValueAt (row, 0);
Object authorKey2 = edges.getValueAt (row, 1);
int authorIndex1 = (primaryKeyRowMap.get(authorKey1) == null ? -1 : primaryKeyRowMap.get(authorKey1).intValue());
int authorIndex2 = (primaryKeyRowMap.get(authorKey2) == null ? -1 : primaryKeyRowMap.get(authorKey2).intValue());
if (authorIndex1 >= 0 && authorIndex2 >= 0) {
Object graphNode1 = nodes.getValueAt (authorIndex1, 1);
Object graphNode2 = nodes.getValueAt (authorIndex2, 1);
graph.addNode (graphNode1);
graph.addNode (graphNode2);
final Set<Edge> gedges = graph.getEdges (graphNode1, graphNode2);
if (gedges.isEmpty()) {
graph.addEdge (graphNode1, graphNode2, Integer.valueOf (1));
} else {
final Iterator<Edge> edgeIter = gedges.iterator();
final Edge firstEdge = edgeIter.next();
final Integer val = (Integer)firstEdge.getEdgeObject();
firstEdge.setEdgeObject (Integer.valueOf (val.intValue() + 1));
}
}
}
return graph;
}
public GraphModel makeGraph (final Map<Object, KeyedData> keyDataMap, final String primaryKeyColumn, final TableModel edges) throws SQLException {
final GraphModel graph = new SymmetricGraphInstance ();
// Look through the rootnode for fields named 'nodeType'
// Add that nodeTypes' subfields as nodes to a graph
for (int row = 0; row < edges.getRowCount(); row++) {
Object authorKey1 = edges.getValueAt (row, 0);
Object authorKey2 = edges.getValueAt (row, 1);
if (authorKey1 != null && authorKey2 != null) {
Object graphNode1 = keyDataMap.get (authorKey1);
Object graphNode2 = keyDataMap.get (authorKey2);
if (graphNode1 != null && graphNode2 != null) {
graph.addNode (graphNode1);
graph.addNode (graphNode2);
final Set<Edge> gedges = graph.getEdges (graphNode1, graphNode2);
if (gedges.isEmpty()) {
graph.addEdge (graphNode1, graphNode2, Integer.valueOf (1));
} else {
final Iterator<Edge> edgeIter = gedges.iterator();
final Edge firstEdge = edgeIter.next();
final Integer val = (Integer)firstEdge.getEdgeObject();
firstEdge.setEdgeObject (Integer.valueOf (val.intValue() + 1));
}
}
}
}
return graph;
}
public Map<Object, Integer> makeKeyRowMap (final TableModel tableModel, final int columnPKIndex) {
final Map<Object, Integer> primaryKeyRowMap = new HashMap<Object, Integer> ();
for (int row = 0; row < tableModel.getRowCount(); row++) {
primaryKeyRowMap.put (tableModel.getValueAt (row, 0), Integer.valueOf (row));
}
return primaryKeyRowMap;
}
public Map<Object, KeyedData> makeKeyedDataMap (final TableModel tableModel, final int columnPKIndex, final int columnLabelIndex) {
final Map<Object, KeyedData> primaryKeyDataMap = new HashMap<Object, KeyedData> ();
for (int row = 0; row < tableModel.getRowCount(); row++) {
primaryKeyDataMap.put (tableModel.getValueAt (row, columnPKIndex), makeKeyedData (tableModel, columnPKIndex, columnLabelIndex, row));
}
return primaryKeyDataMap;
}
public KeyedData makeKeyedData (final TableModel tableModel, final int columnPKIndex, final int columnLabelIndex, final int rowIndex) {
List<Object> data = new ArrayList<Object> ();
for (int n = 0; n < tableModel.getColumnCount(); n++) {
data.add (tableModel.getValueAt (rowIndex, n));
}
KeyedData kd = new KeyedData (tableModel.getValueAt (rowIndex, columnPKIndex), data, columnLabelIndex);
return kd;
}
/**
* can't do pivot queries in ANSI SQL
* @param sqlresult
* @return
*/
public TableModel makePubByYearTable (final TableModel sqlresult) {
DefaultTableModel tm = new DefaultTableModel () {
public Class<?> getColumnClass(int columnIndex) {
if (columnIndex > 0) {
return Long.class;
}
return Integer.class;
}
public boolean isCellEditable (final int row, final int column) {
return false;
}
};
Map<Object, List<Long>> yearsToTypes = new HashMap<Object, List<Long>> ();
Map<Object, Integer> columnTypes = new HashMap<Object, Integer> ();
tm.addColumn ("Year");
int col = 1;
for (int sqlrow = 0; sqlrow < sqlresult.getRowCount(); sqlrow++) {
Object type = sqlresult.getValueAt (sqlrow, 1);
if (columnTypes.get(type) == null) {
columnTypes.put(type, Integer.valueOf(col));
tm.addColumn (type);
col++;
}
}
System.err.println ("cols: "+columnTypes+", "+columnTypes.size());
for (int sqlrow = 0; sqlrow < sqlresult.getRowCount(); sqlrow++) {
Object year = sqlresult.getValueAt (sqlrow, 0);
if (year != null) {
Object type = sqlresult.getValueAt (sqlrow, 1);
Object val = sqlresult.getValueAt (sqlrow, 2);
int colIndex = columnTypes.get(type).intValue();
List<Long> store = yearsToTypes.get (year);
if (store == null) {
Long[] storep = new Long [col - 1];
Arrays.fill (storep, Long.valueOf(0));
List<Long> longs = Arrays.asList (storep);
store = new ArrayList (longs);
//Collections.fill (store, Long.valueOf (0));
yearsToTypes.put (year, store);
}
store.set (colIndex - 1, (Long)val);
}
}
for (Entry<Object, List<Long>> yearEntry : yearsToTypes.entrySet()) {
Object[] rowData = new Object [col];
rowData[0] = yearEntry.getKey();
for (int n = 1; n < col; n++) {
rowData[n] = yearEntry.getValue().get(n-1);
}
tm.addRow(rowData);
}
return tm;
}
}
| martingraham/JSwingPlus | src/example/multiview/NapierDBVis.java | Java | apache-2.0 | 24,478 |
import props from './props';
import './view.html';
class NoteClab {
beforeRegister() {
this.is = 'note-clab';
this.properties = props;
}
computeClasses(type) {
var arr = ['input-note'];
if (type != undefined) arr.push(type);
return arr.join(' ');
}
}
Polymer(NoteClab);
| contactlab/contactlab-ui-components | src/note/index.js | JavaScript | apache-2.0 | 301 |
package com.umeng.soexample.run.step;
/**
* 步数更新回调
* Created by dylan on 16/9/27.
*/
public interface UpdateUiCallBack {
/**
* 更新UI步数
*
* @param stepCount 步数
*/
void updateUi(int stepCount);
}
| liulei-0911/LLApp | myselfapp/src/main/java/com/umeng/soexample/run/step/UpdateUiCallBack.java | Java | apache-2.0 | 249 |
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/protobuf/unittest_preserve_unknown_enum2.proto
#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
#include <google/protobuf/unittest_preserve_unknown_enum2.pb.h>
#include <algorithm>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/port.h>
#include <google/protobuf/stubs/once.h>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/wire_format_lite_inl.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/generated_message_reflection.h>
#include <google/protobuf/reflection_ops.h>
#include <google/protobuf/wire_format.h>
// @@protoc_insertion_point(includes)
namespace proto2_preserve_unknown_enum_unittest {
class MyMessageDefaultTypeInternal : public ::google::protobuf::internal::ExplicitlyConstructed<MyMessage> {
public:
int oneof_e_1_;
int oneof_e_2_;
} _MyMessage_default_instance_;
namespace protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto {
namespace {
::google::protobuf::Metadata file_level_metadata[1];
const ::google::protobuf::EnumDescriptor* file_level_enum_descriptors[1];
} // namespace
PROTOBUF_CONSTEXPR_VAR ::google::protobuf::internal::ParseTableField
const TableStruct::entries[] = {
{0, 0, 0, ::google::protobuf::internal::kInvalidMask, 0, 0},
};
PROTOBUF_CONSTEXPR_VAR ::google::protobuf::internal::AuxillaryParseTableField
const TableStruct::aux[] = {
::google::protobuf::internal::AuxillaryParseTableField(),
};
PROTOBUF_CONSTEXPR_VAR ::google::protobuf::internal::ParseTable const
TableStruct::schema[] = {
{ NULL, NULL, 0, -1, -1, false },
};
const ::google::protobuf::uint32 TableStruct::offsets[] = {
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MyMessage, _has_bits_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MyMessage, _internal_metadata_),
~0u, // no _extensions_
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MyMessage, _oneof_case_[0]),
~0u, // no _weak_field_map_
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MyMessage, e_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MyMessage, repeated_e_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MyMessage, repeated_packed_e_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MyMessage, repeated_packed_unexpected_e_),
GOOGLE_PROTOBUF_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET((&_MyMessage_default_instance_), oneof_e_1_),
GOOGLE_PROTOBUF_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET((&_MyMessage_default_instance_), oneof_e_2_),
GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MyMessage, o_),
0,
~0u,
~0u,
~0u,
~0u,
~0u,
};
static const ::google::protobuf::internal::MigrationSchema schemas[] = {
{ 0, 12, sizeof(MyMessage)},
};
static ::google::protobuf::Message const * const file_default_instances[] = {
reinterpret_cast<const ::google::protobuf::Message*>(&_MyMessage_default_instance_),
};
namespace {
void protobuf_AssignDescriptors() {
AddDescriptors();
::google::protobuf::MessageFactory* factory = NULL;
AssignDescriptors(
"google/protobuf/unittest_preserve_unknown_enum2.proto", schemas, file_default_instances, TableStruct::offsets, factory,
file_level_metadata, file_level_enum_descriptors, NULL);
}
void protobuf_AssignDescriptorsOnce() {
static GOOGLE_PROTOBUF_DECLARE_ONCE(once);
::google::protobuf::GoogleOnceInit(&once, &protobuf_AssignDescriptors);
}
void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD;
void protobuf_RegisterTypes(const ::std::string&) {
protobuf_AssignDescriptorsOnce();
::google::protobuf::internal::RegisterAllTypes(file_level_metadata, 1);
}
} // namespace
void TableStruct::Shutdown() {
_MyMessage_default_instance_.Shutdown();
delete file_level_metadata[0].reflection;
}
void TableStruct::InitDefaultsImpl() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
::google::protobuf::internal::InitProtobufDefaults();
_MyMessage_default_instance_.DefaultConstruct();
_MyMessage_default_instance_.oneof_e_1_ = 0;
_MyMessage_default_instance_.oneof_e_2_ = 0;
}
void InitDefaults() {
static GOOGLE_PROTOBUF_DECLARE_ONCE(once);
::google::protobuf::GoogleOnceInit(&once, &TableStruct::InitDefaultsImpl);
}
void AddDescriptorsImpl() {
InitDefaults();
static const char descriptor[] = {
"\n5google/protobuf/unittest_preserve_unkn"
"own_enum2.proto\022%proto2_preserve_unknown"
"_enum_unittest\"\270\003\n\tMyMessage\0228\n\001e\030\001 \001(\0162"
"-.proto2_preserve_unknown_enum_unittest."
"MyEnum\022A\n\nrepeated_e\030\002 \003(\0162-.proto2_pres"
"erve_unknown_enum_unittest.MyEnum\022L\n\021rep"
"eated_packed_e\030\003 \003(\0162-.proto2_preserve_u"
"nknown_enum_unittest.MyEnumB\002\020\001\022S\n\034repea"
"ted_packed_unexpected_e\030\004 \003(\0162-.proto2_p"
"reserve_unknown_enum_unittest.MyEnum\022B\n\t"
"oneof_e_1\030\005 \001(\0162-.proto2_preserve_unknow"
"n_enum_unittest.MyEnumH\000\022B\n\toneof_e_2\030\006 "
"\001(\0162-.proto2_preserve_unknown_enum_unitt"
"est.MyEnumH\000B\003\n\001o*#\n\006MyEnum\022\007\n\003FOO\020\000\022\007\n\003"
"BAR\020\001\022\007\n\003BAZ\020\002"
};
::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
descriptor, 574);
::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
"google/protobuf/unittest_preserve_unknown_enum2.proto", &protobuf_RegisterTypes);
::google::protobuf::internal::OnShutdown(&TableStruct::Shutdown);
}
void AddDescriptors() {
static GOOGLE_PROTOBUF_DECLARE_ONCE(once);
::google::protobuf::GoogleOnceInit(&once, &AddDescriptorsImpl);
}
// Force AddDescriptors() to be called at static initialization time.
struct StaticDescriptorInitializer {
StaticDescriptorInitializer() {
AddDescriptors();
}
} static_descriptor_initializer;
} // namespace protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto
const ::google::protobuf::EnumDescriptor* MyEnum_descriptor() {
protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto::protobuf_AssignDescriptorsOnce();
return protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto::file_level_enum_descriptors[0];
}
bool MyEnum_IsValid(int value) {
switch (value) {
case 0:
case 1:
case 2:
return true;
default:
return false;
}
}
// ===================================================================
#if !defined(_MSC_VER) || _MSC_VER >= 1900
const int MyMessage::kEFieldNumber;
const int MyMessage::kRepeatedEFieldNumber;
const int MyMessage::kRepeatedPackedEFieldNumber;
const int MyMessage::kRepeatedPackedUnexpectedEFieldNumber;
const int MyMessage::kOneofE1FieldNumber;
const int MyMessage::kOneofE2FieldNumber;
#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
MyMessage::MyMessage()
: ::google::protobuf::Message(), _internal_metadata_(NULL) {
if (GOOGLE_PREDICT_TRUE(this != internal_default_instance())) {
protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto::InitDefaults();
}
SharedCtor();
// @@protoc_insertion_point(constructor:proto2_preserve_unknown_enum_unittest.MyMessage)
}
MyMessage::MyMessage(const MyMessage& from)
: ::google::protobuf::Message(),
_internal_metadata_(NULL),
_has_bits_(from._has_bits_),
_cached_size_(0),
repeated_e_(from.repeated_e_),
repeated_packed_e_(from.repeated_packed_e_),
repeated_packed_unexpected_e_(from.repeated_packed_unexpected_e_) {
_internal_metadata_.MergeFrom(from._internal_metadata_);
e_ = from.e_;
clear_has_o();
switch (from.o_case()) {
case kOneofE1: {
set_oneof_e_1(from.oneof_e_1());
break;
}
case kOneofE2: {
set_oneof_e_2(from.oneof_e_2());
break;
}
case O_NOT_SET: {
break;
}
}
// @@protoc_insertion_point(copy_constructor:proto2_preserve_unknown_enum_unittest.MyMessage)
}
void MyMessage::SharedCtor() {
_cached_size_ = 0;
e_ = 0;
clear_has_o();
}
MyMessage::~MyMessage() {
// @@protoc_insertion_point(destructor:proto2_preserve_unknown_enum_unittest.MyMessage)
SharedDtor();
}
void MyMessage::SharedDtor() {
if (has_o()) {
clear_o();
}
}
void MyMessage::SetCachedSize(int size) const {
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
}
const ::google::protobuf::Descriptor* MyMessage::descriptor() {
protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto::protobuf_AssignDescriptorsOnce();
return protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto::file_level_metadata[kIndexInFileMessages].descriptor;
}
const MyMessage& MyMessage::default_instance() {
protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto::InitDefaults();
return *internal_default_instance();
}
MyMessage* MyMessage::New(::google::protobuf::Arena* arena) const {
MyMessage* n = new MyMessage;
if (arena != NULL) {
arena->Own(n);
}
return n;
}
void MyMessage::clear_o() {
// @@protoc_insertion_point(one_of_clear_start:proto2_preserve_unknown_enum_unittest.MyMessage)
switch (o_case()) {
case kOneofE1: {
// No need to clear
break;
}
case kOneofE2: {
// No need to clear
break;
}
case O_NOT_SET: {
break;
}
}
_oneof_case_[0] = O_NOT_SET;
}
void MyMessage::Clear() {
// @@protoc_insertion_point(message_clear_start:proto2_preserve_unknown_enum_unittest.MyMessage)
repeated_e_.Clear();
repeated_packed_e_.Clear();
repeated_packed_unexpected_e_.Clear();
e_ = 0;
clear_o();
_has_bits_.Clear();
_internal_metadata_.Clear();
}
bool MyMessage::MergePartialFromCodedStream(
::google::protobuf::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
::google::protobuf::uint32 tag;
// @@protoc_insertion_point(parse_start:proto2_preserve_unknown_enum_unittest.MyMessage)
for (;;) {
::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// optional .proto2_preserve_unknown_enum_unittest.MyEnum e = 1;
case 1: {
if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(8u)) {
int value;
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
input, &value)));
if (::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value)) {
set_e(static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(value));
} else {
mutable_unknown_fields()->AddVarint(1, value);
}
} else {
goto handle_unusual;
}
break;
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_e = 2;
case 2: {
if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(16u)) {
int value;
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
input, &value)));
if (::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value)) {
add_repeated_e(static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(value));
} else {
mutable_unknown_fields()->AddVarint(2, value);
}
} else if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(18u)) {
DO_((::google::protobuf::internal::WireFormat::ReadPackedEnumPreserveUnknowns(
input,
2,
::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid,
mutable_unknown_fields(),
this->mutable_repeated_e())));
} else {
goto handle_unusual;
}
break;
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_e = 3 [packed = true];
case 3: {
if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(26u)) {
::google::protobuf::uint32 length;
DO_(input->ReadVarint32(&length));
::google::protobuf::io::CodedInputStream::Limit limit = input->PushLimit(length);
while (input->BytesUntilLimit() > 0) {
int value;
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
input, &value)));
if (::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value)) {
add_repeated_packed_e(static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(value));
} else {
mutable_unknown_fields()->AddVarint(3, value);
}
}
input->PopLimit(limit);
} else if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(24u)) {
int value;
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
input, &value)));
if (::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value)) {
add_repeated_packed_e(static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(value));
} else {
mutable_unknown_fields()->AddVarint(3, value);
}
} else {
goto handle_unusual;
}
break;
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_unexpected_e = 4;
case 4: {
if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(32u)) {
int value;
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
input, &value)));
if (::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value)) {
add_repeated_packed_unexpected_e(static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(value));
} else {
mutable_unknown_fields()->AddVarint(4, value);
}
} else if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(34u)) {
DO_((::google::protobuf::internal::WireFormat::ReadPackedEnumPreserveUnknowns(
input,
4,
::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid,
mutable_unknown_fields(),
this->mutable_repeated_packed_unexpected_e())));
} else {
goto handle_unusual;
}
break;
}
// optional .proto2_preserve_unknown_enum_unittest.MyEnum oneof_e_1 = 5;
case 5: {
if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(40u)) {
int value;
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
input, &value)));
if (::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value)) {
set_oneof_e_1(static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(value));
} else {
mutable_unknown_fields()->AddVarint(5, value);
}
} else {
goto handle_unusual;
}
break;
}
// optional .proto2_preserve_unknown_enum_unittest.MyEnum oneof_e_2 = 6;
case 6: {
if (static_cast< ::google::protobuf::uint8>(tag) ==
static_cast< ::google::protobuf::uint8>(48u)) {
int value;
DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
input, &value)));
if (::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value)) {
set_oneof_e_2(static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(value));
} else {
mutable_unknown_fields()->AddVarint(6, value);
}
} else {
goto handle_unusual;
}
break;
}
default: {
handle_unusual:
if (tag == 0 ||
::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
goto success;
}
DO_(::google::protobuf::internal::WireFormat::SkipField(
input, tag, mutable_unknown_fields()));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:proto2_preserve_unknown_enum_unittest.MyMessage)
return true;
failure:
// @@protoc_insertion_point(parse_failure:proto2_preserve_unknown_enum_unittest.MyMessage)
return false;
#undef DO_
}
void MyMessage::SerializeWithCachedSizes(
::google::protobuf::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:proto2_preserve_unknown_enum_unittest.MyMessage)
::google::protobuf::uint32 cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
// optional .proto2_preserve_unknown_enum_unittest.MyEnum e = 1;
if (cached_has_bits & 0x00000001u) {
::google::protobuf::internal::WireFormatLite::WriteEnum(
1, this->e(), output);
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_e = 2;
for (int i = 0, n = this->repeated_e_size(); i < n; i++) {
::google::protobuf::internal::WireFormatLite::WriteEnum(
2, this->repeated_e(i), output);
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_e = 3 [packed = true];
if (this->repeated_packed_e_size() > 0) {
::google::protobuf::internal::WireFormatLite::WriteTag(
3,
::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
output);
output->WriteVarint32(_repeated_packed_e_cached_byte_size_);
}
for (int i = 0, n = this->repeated_packed_e_size(); i < n; i++) {
::google::protobuf::internal::WireFormatLite::WriteEnumNoTag(
this->repeated_packed_e(i), output);
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_unexpected_e = 4;
for (int i = 0, n = this->repeated_packed_unexpected_e_size(); i < n; i++) {
::google::protobuf::internal::WireFormatLite::WriteEnum(
4, this->repeated_packed_unexpected_e(i), output);
}
switch (o_case()) {
case kOneofE1:
::google::protobuf::internal::WireFormatLite::WriteEnum(
5, this->oneof_e_1(), output);
break;
case kOneofE2:
::google::protobuf::internal::WireFormatLite::WriteEnum(
6, this->oneof_e_2(), output);
break;
default: ;
}
if (_internal_metadata_.have_unknown_fields()) {
::google::protobuf::internal::WireFormat::SerializeUnknownFields(
unknown_fields(), output);
}
// @@protoc_insertion_point(serialize_end:proto2_preserve_unknown_enum_unittest.MyMessage)
}
::google::protobuf::uint8* MyMessage::InternalSerializeWithCachedSizesToArray(
bool deterministic, ::google::protobuf::uint8* target) const {
// @@protoc_insertion_point(serialize_to_array_start:proto2_preserve_unknown_enum_unittest.MyMessage)
::google::protobuf::uint32 cached_has_bits = 0;
(void) cached_has_bits;
cached_has_bits = _has_bits_[0];
// optional .proto2_preserve_unknown_enum_unittest.MyEnum e = 1;
if (cached_has_bits & 0x00000001u) {
target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
1, this->e(), target);
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_e = 2;
target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
2, this->repeated_e_, target);
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_e = 3 [packed = true];
if (this->repeated_packed_e_size() > 0) {
target = ::google::protobuf::internal::WireFormatLite::WriteTagToArray(
3,
::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
target);
target = ::google::protobuf::io::CodedOutputStream::WriteVarint32ToArray( _repeated_packed_e_cached_byte_size_, target);
target = ::google::protobuf::internal::WireFormatLite::WriteEnumNoTagToArray(
this->repeated_packed_e_, target);
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_unexpected_e = 4;
target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
4, this->repeated_packed_unexpected_e_, target);
switch (o_case()) {
case kOneofE1:
target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
5, this->oneof_e_1(), target);
break;
case kOneofE2:
target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
6, this->oneof_e_2(), target);
break;
default: ;
}
if (_internal_metadata_.have_unknown_fields()) {
target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
unknown_fields(), target);
}
// @@protoc_insertion_point(serialize_to_array_end:proto2_preserve_unknown_enum_unittest.MyMessage)
return target;
}
size_t MyMessage::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:proto2_preserve_unknown_enum_unittest.MyMessage)
size_t total_size = 0;
if (_internal_metadata_.have_unknown_fields()) {
total_size +=
::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
unknown_fields());
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_e = 2;
{
size_t data_size = 0;
unsigned int count = this->repeated_e_size();for (unsigned int i = 0; i < count; i++) {
data_size += ::google::protobuf::internal::WireFormatLite::EnumSize(
this->repeated_e(i));
}
total_size += (1UL * count) + data_size;
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_e = 3 [packed = true];
{
size_t data_size = 0;
unsigned int count = this->repeated_packed_e_size();for (unsigned int i = 0; i < count; i++) {
data_size += ::google::protobuf::internal::WireFormatLite::EnumSize(
this->repeated_packed_e(i));
}
if (data_size > 0) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::Int32Size(data_size);
}
int cached_size = ::google::protobuf::internal::ToCachedSize(data_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_repeated_packed_e_cached_byte_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
total_size += data_size;
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_unexpected_e = 4;
{
size_t data_size = 0;
unsigned int count = this->repeated_packed_unexpected_e_size();for (unsigned int i = 0; i < count; i++) {
data_size += ::google::protobuf::internal::WireFormatLite::EnumSize(
this->repeated_packed_unexpected_e(i));
}
total_size += (1UL * count) + data_size;
}
// optional .proto2_preserve_unknown_enum_unittest.MyEnum e = 1;
if (has_e()) {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::EnumSize(this->e());
}
switch (o_case()) {
// optional .proto2_preserve_unknown_enum_unittest.MyEnum oneof_e_1 = 5;
case kOneofE1: {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::EnumSize(this->oneof_e_1());
break;
}
// optional .proto2_preserve_unknown_enum_unittest.MyEnum oneof_e_2 = 6;
case kOneofE2: {
total_size += 1 +
::google::protobuf::internal::WireFormatLite::EnumSize(this->oneof_e_2());
break;
}
case O_NOT_SET: {
break;
}
}
int cached_size = ::google::protobuf::internal::ToCachedSize(total_size);
GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
_cached_size_ = cached_size;
GOOGLE_SAFE_CONCURRENT_WRITES_END();
return total_size;
}
void MyMessage::MergeFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:proto2_preserve_unknown_enum_unittest.MyMessage)
GOOGLE_DCHECK_NE(&from, this);
const MyMessage* source =
::google::protobuf::internal::DynamicCastToGenerated<const MyMessage>(
&from);
if (source == NULL) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:proto2_preserve_unknown_enum_unittest.MyMessage)
::google::protobuf::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:proto2_preserve_unknown_enum_unittest.MyMessage)
MergeFrom(*source);
}
}
void MyMessage::MergeFrom(const MyMessage& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:proto2_preserve_unknown_enum_unittest.MyMessage)
GOOGLE_DCHECK_NE(&from, this);
_internal_metadata_.MergeFrom(from._internal_metadata_);
::google::protobuf::uint32 cached_has_bits = 0;
(void) cached_has_bits;
repeated_e_.MergeFrom(from.repeated_e_);
repeated_packed_e_.MergeFrom(from.repeated_packed_e_);
repeated_packed_unexpected_e_.MergeFrom(from.repeated_packed_unexpected_e_);
if (from.has_e()) {
set_e(from.e());
}
switch (from.o_case()) {
case kOneofE1: {
set_oneof_e_1(from.oneof_e_1());
break;
}
case kOneofE2: {
set_oneof_e_2(from.oneof_e_2());
break;
}
case O_NOT_SET: {
break;
}
}
}
void MyMessage::CopyFrom(const ::google::protobuf::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:proto2_preserve_unknown_enum_unittest.MyMessage)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void MyMessage::CopyFrom(const MyMessage& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:proto2_preserve_unknown_enum_unittest.MyMessage)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool MyMessage::IsInitialized() const {
return true;
}
void MyMessage::Swap(MyMessage* other) {
if (other == this) return;
InternalSwap(other);
}
void MyMessage::InternalSwap(MyMessage* other) {
repeated_e_.InternalSwap(&other->repeated_e_);
repeated_packed_e_.InternalSwap(&other->repeated_packed_e_);
repeated_packed_unexpected_e_.InternalSwap(&other->repeated_packed_unexpected_e_);
std::swap(e_, other->e_);
std::swap(o_, other->o_);
std::swap(_oneof_case_[0], other->_oneof_case_[0]);
std::swap(_has_bits_[0], other->_has_bits_[0]);
_internal_metadata_.Swap(&other->_internal_metadata_);
std::swap(_cached_size_, other->_cached_size_);
}
::google::protobuf::Metadata MyMessage::GetMetadata() const {
protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto::protobuf_AssignDescriptorsOnce();
return protobuf_google_2fprotobuf_2funittest_5fpreserve_5funknown_5fenum2_2eproto::file_level_metadata[kIndexInFileMessages];
}
#if PROTOBUF_INLINE_NOT_IN_HEADERS
// MyMessage
// optional .proto2_preserve_unknown_enum_unittest.MyEnum e = 1;
bool MyMessage::has_e() const {
return (_has_bits_[0] & 0x00000001u) != 0;
}
void MyMessage::set_has_e() {
_has_bits_[0] |= 0x00000001u;
}
void MyMessage::clear_has_e() {
_has_bits_[0] &= ~0x00000001u;
}
void MyMessage::clear_e() {
e_ = 0;
clear_has_e();
}
::proto2_preserve_unknown_enum_unittest::MyEnum MyMessage::e() const {
// @@protoc_insertion_point(field_get:proto2_preserve_unknown_enum_unittest.MyMessage.e)
return static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(e_);
}
void MyMessage::set_e(::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
set_has_e();
e_ = value;
// @@protoc_insertion_point(field_set:proto2_preserve_unknown_enum_unittest.MyMessage.e)
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_e = 2;
int MyMessage::repeated_e_size() const {
return repeated_e_.size();
}
void MyMessage::clear_repeated_e() {
repeated_e_.Clear();
}
::proto2_preserve_unknown_enum_unittest::MyEnum MyMessage::repeated_e(int index) const {
// @@protoc_insertion_point(field_get:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_e)
return static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(repeated_e_.Get(index));
}
void MyMessage::set_repeated_e(int index, ::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
repeated_e_.Set(index, value);
// @@protoc_insertion_point(field_set:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_e)
}
void MyMessage::add_repeated_e(::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
repeated_e_.Add(value);
// @@protoc_insertion_point(field_add:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_e)
}
const ::google::protobuf::RepeatedField<int>&
MyMessage::repeated_e() const {
// @@protoc_insertion_point(field_list:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_e)
return repeated_e_;
}
::google::protobuf::RepeatedField<int>*
MyMessage::mutable_repeated_e() {
// @@protoc_insertion_point(field_mutable_list:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_e)
return &repeated_e_;
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_e = 3 [packed = true];
int MyMessage::repeated_packed_e_size() const {
return repeated_packed_e_.size();
}
void MyMessage::clear_repeated_packed_e() {
repeated_packed_e_.Clear();
}
::proto2_preserve_unknown_enum_unittest::MyEnum MyMessage::repeated_packed_e(int index) const {
// @@protoc_insertion_point(field_get:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_e)
return static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(repeated_packed_e_.Get(index));
}
void MyMessage::set_repeated_packed_e(int index, ::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
repeated_packed_e_.Set(index, value);
// @@protoc_insertion_point(field_set:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_e)
}
void MyMessage::add_repeated_packed_e(::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
repeated_packed_e_.Add(value);
// @@protoc_insertion_point(field_add:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_e)
}
const ::google::protobuf::RepeatedField<int>&
MyMessage::repeated_packed_e() const {
// @@protoc_insertion_point(field_list:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_e)
return repeated_packed_e_;
}
::google::protobuf::RepeatedField<int>*
MyMessage::mutable_repeated_packed_e() {
// @@protoc_insertion_point(field_mutable_list:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_e)
return &repeated_packed_e_;
}
// repeated .proto2_preserve_unknown_enum_unittest.MyEnum repeated_packed_unexpected_e = 4;
int MyMessage::repeated_packed_unexpected_e_size() const {
return repeated_packed_unexpected_e_.size();
}
void MyMessage::clear_repeated_packed_unexpected_e() {
repeated_packed_unexpected_e_.Clear();
}
::proto2_preserve_unknown_enum_unittest::MyEnum MyMessage::repeated_packed_unexpected_e(int index) const {
// @@protoc_insertion_point(field_get:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_unexpected_e)
return static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(repeated_packed_unexpected_e_.Get(index));
}
void MyMessage::set_repeated_packed_unexpected_e(int index, ::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
repeated_packed_unexpected_e_.Set(index, value);
// @@protoc_insertion_point(field_set:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_unexpected_e)
}
void MyMessage::add_repeated_packed_unexpected_e(::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
repeated_packed_unexpected_e_.Add(value);
// @@protoc_insertion_point(field_add:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_unexpected_e)
}
const ::google::protobuf::RepeatedField<int>&
MyMessage::repeated_packed_unexpected_e() const {
// @@protoc_insertion_point(field_list:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_unexpected_e)
return repeated_packed_unexpected_e_;
}
::google::protobuf::RepeatedField<int>*
MyMessage::mutable_repeated_packed_unexpected_e() {
// @@protoc_insertion_point(field_mutable_list:proto2_preserve_unknown_enum_unittest.MyMessage.repeated_packed_unexpected_e)
return &repeated_packed_unexpected_e_;
}
// optional .proto2_preserve_unknown_enum_unittest.MyEnum oneof_e_1 = 5;
bool MyMessage::has_oneof_e_1() const {
return o_case() == kOneofE1;
}
void MyMessage::set_has_oneof_e_1() {
_oneof_case_[0] = kOneofE1;
}
void MyMessage::clear_oneof_e_1() {
if (has_oneof_e_1()) {
o_.oneof_e_1_ = 0;
clear_has_o();
}
}
::proto2_preserve_unknown_enum_unittest::MyEnum MyMessage::oneof_e_1() const {
// @@protoc_insertion_point(field_get:proto2_preserve_unknown_enum_unittest.MyMessage.oneof_e_1)
if (has_oneof_e_1()) {
return static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(o_.oneof_e_1_);
}
return static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(0);
}
void MyMessage::set_oneof_e_1(::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
if (!has_oneof_e_1()) {
clear_o();
set_has_oneof_e_1();
}
o_.oneof_e_1_ = value;
// @@protoc_insertion_point(field_set:proto2_preserve_unknown_enum_unittest.MyMessage.oneof_e_1)
}
// optional .proto2_preserve_unknown_enum_unittest.MyEnum oneof_e_2 = 6;
bool MyMessage::has_oneof_e_2() const {
return o_case() == kOneofE2;
}
void MyMessage::set_has_oneof_e_2() {
_oneof_case_[0] = kOneofE2;
}
void MyMessage::clear_oneof_e_2() {
if (has_oneof_e_2()) {
o_.oneof_e_2_ = 0;
clear_has_o();
}
}
::proto2_preserve_unknown_enum_unittest::MyEnum MyMessage::oneof_e_2() const {
// @@protoc_insertion_point(field_get:proto2_preserve_unknown_enum_unittest.MyMessage.oneof_e_2)
if (has_oneof_e_2()) {
return static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(o_.oneof_e_2_);
}
return static_cast< ::proto2_preserve_unknown_enum_unittest::MyEnum >(0);
}
void MyMessage::set_oneof_e_2(::proto2_preserve_unknown_enum_unittest::MyEnum value) {
assert(::proto2_preserve_unknown_enum_unittest::MyEnum_IsValid(value));
if (!has_oneof_e_2()) {
clear_o();
set_has_oneof_e_2();
}
o_.oneof_e_2_ = value;
// @@protoc_insertion_point(field_set:proto2_preserve_unknown_enum_unittest.MyMessage.oneof_e_2)
}
bool MyMessage::has_o() const {
return o_case() != O_NOT_SET;
}
void MyMessage::clear_has_o() {
_oneof_case_[0] = O_NOT_SET;
}
MyMessage::OCase MyMessage::o_case() const {
return MyMessage::OCase(_oneof_case_[0]);
}
#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
} // namespace proto2_preserve_unknown_enum_unittest
// @@protoc_insertion_point(global_scope)
| dbHunter/bson_rtdb | 3rd/protobuf/src/google/protobuf/unittest_preserve_unknown_enum2.pb.cc | C++ | apache-2.0 | 35,954 |
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2016 Oracle and/or its affiliates. All rights reserved.
*
* Oracle and Java are registered trademarks of Oracle and/or its affiliates.
* Other names may be trademarks of their respective owners.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common
* Development and Distribution License("CDDL") (collectively, the
* "License"). You may not use this file except in compliance with the
* License. You can obtain a copy of the License at
* http://www.netbeans.org/cddl-gplv2.html
* or nbbuild/licenses/CDDL-GPL-2-CP. See the License for the
* specific language governing permissions and limitations under the
* License. When distributing the software, include this License Header
* Notice in each file and include the License file at
* nbbuild/licenses/CDDL-GPL-2-CP. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the
* License Header, with the fields enclosed by brackets [] replaced by
* your own identifying information:
* "Portions Copyrighted [year] [name of copyright owner]"
*
* If you wish your version of this file to be governed by only the CDDL
* or only the GPL Version 2, indicate your decision by adding
* "[Contributor] elects to include this software in this distribution
* under the [CDDL or GPL Version 2] license." If you do not indicate a
* single choice of license, a recipient has the option to distribute
* your version of this file under either the CDDL, the GPL Version 2 or
* to extend the choice of license to its licensees as provided above.
* However, if you add GPL Version 2 code and therefore, elected the GPL
* Version 2 license, then the option applies only if the new code is
* made subject to such option by the copyright holder.
*
* Contributor(s):
*
* Portions Copyrighted 2016 Sun Microsystems, Inc.
*/
package beans;
import java.io.Serializable;
import java.util.Collection;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.OneToMany;
import javax.persistence.Table;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlTransient;
/**
*
* @author marc.gareta
*/
@Entity
@Table(name = "TIPO_SERVICIO", catalog = "", schema = "APP")
@XmlRootElement
@NamedQueries({
@NamedQuery(name = "TIPO_SERVICIO.findAll", query = "SELECT t FROM TipoServicio t"),
@NamedQuery(name = "TIPO_SERVICIO.findAllNombre", query = "SELECT t.nombre FROM TipoServicio t"),
@NamedQuery(name = "TIPO_SERVICIO.findById", query = "SELECT t FROM TipoServicio t WHERE t.id = :id"),
@NamedQuery(name = "TIPO_SERVICIO.findByNombre", query = "SELECT t FROM TipoServicio t WHERE t.nombre = :nombre"),
@NamedQuery(name = "TIPO_SERVICIO.deleteAll", query = "DELETE FROM TipoServicio t")})
public class TipoServicio implements Serializable {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Basic(optional = false)
@Column(nullable = false)
private Integer id;
@Column(length = 100)
private String nombre;
@OneToMany(mappedBy = "tipoServicio")
private Collection<ParteIncidencia> parteIncidenciaCollection;
public TipoServicio() {
}
public TipoServicio(Integer id) {
this.id = id;
}
public TipoServicio(String nombre) {
this.nombre = nombre;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getNombre() {
return nombre;
}
public void setNombre(String nombre) {
this.nombre = nombre;
}
@XmlTransient
public Collection<ParteIncidencia> getParteIncidenciaCollection() {
return parteIncidenciaCollection;
}
public void setParteIncidenciaCollection(Collection<ParteIncidencia> parteIncidenciaCollection) {
this.parteIncidenciaCollection = parteIncidenciaCollection;
}
@Override
public int hashCode() {
int hash = 0;
hash += (id != null ? id.hashCode() : 0);
return hash;
}
@Override
public boolean equals(Object object) {
// TODO: Warning - this method won't work in the case the id fields are not set
if (!(object instanceof TipoServicio)) {
return false;
}
TipoServicio other = (TipoServicio) object;
if ((this.id == null && other.id != null) || (this.id != null && !this.id.equals(other.id))) {
return false;
}
return true;
}
@Override
public String toString() {
return "beans.TipoServicio[ id=" + id + " ]";
}
}
| MGareta/BBVA | src/java/beans/TipoServicio.java | Java | apache-2.0 | 5,309 |
namespace Snippets3.Serialization
{
using NServiceBus;
public class BinarySerializerUsage
{
public void Simple()
{
#region BinarySerialization
Configure configure = Configure.With();
configure.BinarySerializer();
#endregion
}
}
} | WojcikMike/docs.particular.net | Snippets/Snippets_3/Serialization/BinarySerializerUsage.cs | C# | apache-2.0 | 325 |
Ext.define('TaxRate', {
extend: 'Ext.data.Model',
fields: [{name: "id"},
{name: "date",type: 'date',dateFormat: 'Y-m-d'},
{name: "rate"},
{name: "remark"},
{name: "create_time",type: 'date',dateFormat: 'timestamp'},
{name: "update_time",type: 'date',dateFormat: 'timestamp'},
{name: "creater"},
{name: "updater"}]
});
var taxRateStore = Ext.create('Ext.data.Store', {
model: 'TaxRate',
proxy: {
type: 'ajax',
reader: 'json',
url: homePath+'/public/erp/setting_tax/gettaxrate/option/data'
}
});
var taxRateRowEditing = Ext.create('Ext.grid.plugin.CellEditing', {
clicksToEdit: 1
});
// 税率管理窗口
var taxRateWin = Ext.create('Ext.window.Window', {
title: '税率管理',
border: 0,
height: 300,
width: 600,
modal: true,
constrain: true,
closeAction: 'hide',
layout: 'fit',
tools: [{
type: 'refresh',
tooltip: 'Refresh',
scope: this,
handler: function(){taxRateStore.reload();}
}],
items: [{
xtype: 'gridpanel',
id: 'taxRateGrid',
columnLines: true,
store: taxRateStore,
selType: 'checkboxmodel',
tbar: [{
xtype: 'hiddenfield',
id: 'tax_id_to_rate'
}, {
text: '添加税率',
iconCls: 'icon-add',
scope: this,
handler: function(){
taxRateRowEditing.cancelEdit();
var r = Ext.create('TaxRate', {
date: Ext.util.Format.date(new Date(), 'Y-m-d'),
rate: 1
});
taxRateStore.insert(0, r);
taxRateRowEditing.startEdit(0, 0);
}
}, {
text: '删除税率',
iconCls: 'icon-delete',
scope: this,
handler: function(){
var selection = Ext.getCmp('taxRateGrid').getView().getSelectionModel().getSelection();
if(selection.length > 0){
taxRateStore.remove(selection);
}else{
Ext.MessageBox.alert('错误', '没有选择删除对象!');
}
}
}, {
text: '保存修改',
iconCls: 'icon-save',
scope: this,
handler: function(){
var updateRecords = taxRateStore.getUpdatedRecords();
var insertRecords = taxRateStore.getNewRecords();
var deleteRecords = taxRateStore.getRemovedRecords();
// 判断是否有修改数据
if(updateRecords.length + insertRecords.length + deleteRecords.length > 0){
var changeRows = {
updated: [],
inserted: [],
deleted: []
}
for(var i = 0; i < updateRecords.length; i++){
var data = updateRecords[i].data;
changeRows.updated.push(data)
}
for(var i = 0; i < insertRecords.length; i++){
var data = insertRecords[i].data;
changeRows.inserted.push(data)
}
for(var i = 0; i < deleteRecords.length; i++){
changeRows.deleted.push(deleteRecords[i].data)
}
Ext.MessageBox.confirm('确认', '确定保存修改内容?', function(button, text){
if(button == 'yes'){
var json = Ext.JSON.encode(changeRows);
var selection = Ext.getCmp('taxGrid').getView().getSelectionModel().getSelection();
Ext.Msg.wait('提交中,请稍后...', '提示');
Ext.Ajax.request({
url: homePath+'/public/erp/setting_tax/edittaxrate',
params: {json: json, tax_id: Ext.getCmp('tax_id_to_rate').value},
method: 'POST',
success: function(response, options) {
var data = Ext.JSON.decode(response.responseText);
if(data.success){
Ext.MessageBox.alert('提示', data.info);
taxRateStore.reload();
taxStore.reload();
}else{
Ext.MessageBox.alert('错误', data.info);
}
},
failure: function(response){
Ext.MessageBox.alert('错误', '保存提交失败');
}
});
}
});
}else{
Ext.MessageBox.alert('提示', '没有修改任何数据!');
}
}
}, '->', {
text: '刷新',
iconCls: 'icon-refresh',
handler: function(){
taxRateStore.reload();
}
}],
plugins: taxRateRowEditing,
columns: [{
xtype: 'rownumberer'
}, {
text: 'ID',
dataIndex: 'id',
hidden: true,
flex: 1
}, {
text: '生效日期',
dataIndex: 'date',
renderer: Ext.util.Format.dateRenderer('Y-m-d'),
editor: {
xtype: 'datefield',
editable: false,
format: 'Y-m-d'
},
flex: 3
}, {
text: '税率',
dataIndex: 'rate',
editor: 'numberfield',
flex: 2
}, {
text: '备注',
dataIndex: 'remark',
editor: 'textfield',
flex: 5
}, {
text: '创建人',
hidden: true,
dataIndex: 'creater',
flex: 2
}, {
text: '创建时间',
hidden: true,
dataIndex: 'create_time',
renderer : Ext.util.Format.dateRenderer('Y-m-d H:i:s'),
flex: 3
}, {
text: '更新人',
hidden: true,
dataIndex: 'updater',
flex: 2
}, {
text: '更新时间',
hidden: true,
dataIndex: 'update_time',
renderer : Ext.util.Format.dateRenderer('Y-m-d H:i:s'),
flex: 3
}]
}]
}); | eoasoft/evolve | application/modules/erp/views/scripts/setting/js/tax_rate.js | JavaScript | apache-2.0 | 6,462 |
package ch.bfh.swos.bookapp.jpa.model;
import javax.persistence.*;
import java.io.Serializable;
import java.util.Date;
import static javax.persistence.GenerationType.IDENTITY;
import static javax.persistence.TemporalType.DATE;
/**
* Entity implementation class for Entity: Book
*
*/
@Entity
public class Book implements Serializable {
@Id
@GeneratedValue(strategy = IDENTITY)
private Long id;
private String bookId;
private String title;
@Temporal(DATE)
private Date releaseDate;
private static final long serialVersionUID = 1L;
@ManyToOne
private Author author;
public Book() {
super();
}
public Long getId() {
return this.id;
}
public void setId(Long id) {
this.id = id;
}
public String getBookId() {
return bookId;
}
public void setBookId(String bookId) {
this.bookId = bookId;
}
public String getTitle() {
return this.title;
}
public void setTitle(String title) {
this.title = title;
}
public Date getReleaseDate() {
return this.releaseDate;
}
public void setReleaseDate(Date releaseDate) {
this.releaseDate = releaseDate;
}
public Author getAuthor() {
return author;
}
public void setAuthor(Author author) {
this.author = author;
}
}
| rvillars/bookapp-cqrs | ch.bfh.swos.bookapp.jpa/src/main/java/ch/bfh/swos/bookapp/jpa/model/Book.java | Java | apache-2.0 | 1,241 |
<?php
namespace App\Util;
use Illuminate\Support\Facades\DB;
class Access
{
// list all perm
// if the returned array is empty then user dont have permission to list the perms
public static function listPerm($userid, $appcode)
{
if (self::can_editPerm($userid, $appcode) == false) return [];
$app = DB::table('apps')->where('code', $appcode)->first();
if ($app == null) return -3;
return DB::table('user_app')
->join('users', 'user_app.userid', '=', 'users.id')
->where('user_app.appid', $app->id)->get();
}
// used to delete user from app
// return
// -1 acess deny
// -3 appid doesn't exist
// -4 cannot delte owner
//
public static function deletePerm($userid, $otherid, $appcode)
{
// get owner
$app = DB::table('apps')->where('code', $appcode)->first();
if ($app == null) return -3;
if ($otherid == $app->ownerid) return -4;
if (self::can_editPerm($userid, $app->id)) {
DB::table('user_app')->where('appid', $app->id)->where('userid', $otherid)->delete();
return 0;
}
return -1;
}
// used to add new user to app
// or $userid set perm for $otheruserid,
// if $can_perm is differ than null, then its value is valid
// if $can_struct is differ than null, then its value is valid
// if $can_reportis differ than null, then its value is valid
// 0 means unset, 1 means set
// return 0 if sucecss
// -1: access deny
// -2 other user not exist in app, must add first
// -3 appid doesn't exist
// -4 cannot set perm for owner
// -5 if user doesn't exist
public static function setPerm($userid, $otheruser, $appcode, $can_perm, $can_struct, $can_report)
{
//check if user existed
if (DB::table('users')->where('id', $otheruser)->count() + DB::table('users')->where('id', $userid)->count() != 2)
return -5;
// get owner
$app = DB::table('apps')->where('code', $appcode)->first();
if ($app == null) return -3;
if (self::can_editPerm($userid, $appcode)) {
$perm = DB::table('user_app')->where('appid', $app->id)->where('userid', $otheruser)->first();
if ($perm == null) {
if ($app->ownerid == $otheruser)
DB::table('user_app')->insert(
['appid' => $app->id, 'userid' => $otheruser, 'can_perm' => 1, 'can_struct' => 1, 'can_report' => 1]
);
else
DB::table('user_app')->insert(
['appid' => $app->id, 'userid' => $otheruser, 'can_perm' => 0, 'can_struct' => 0, 'can_report' => 1]
);
} else {
if ($app->ownerid == $otheruser) {
return -4;
}
}
$permrecord = [];
if ($can_perm != null) $permrecord['can_perm'] = $can_perm;
if ($can_struct != null) $permrecord['can_struct'] = $can_struct;
if ($can_report != null) $permrecord['can_report'] = $can_report;
if (count($permrecord) != 0) {
DB::table('user_app')->where('appid', $app->id)->where('userid', $otheruser)->update($permrecord);
}
return 0;
}
abort(500, "sdfdf");
return -1;
}
public static function isBanned($id){
$user = DB::table('users')->where('id',$id)->first();
if($user == null) return true;
if(!$user->banned)
return false;
return true;
}
public static function can_editPerm($userid, $appcode)
{
// full access for app owner
$app = DB::table('apps')->where('code', $appcode)->first();
if ($app == null) return -3;
if(self::isBanned($app->ownerid)) return false;
if ($app->ownerid == $userid) return true;
$perm = DB::table('user_app')->where('appid', $app->id)->where('userid', $userid)->first();
if ($perm == null) return false;
if ($perm->can_perm == 1) return true;
return false;
}
public static function can_editStruct($userid, $appcode)
{
// full access for app owner
$app = DB::table('apps')->where('code', $appcode)->first();
if ($app == null) return -3;
if(self::isBanned($app->ownerid)) return false;
if ($app->ownerid == $userid) return true;
$perm = DB::table('user_app')->where('appid', $app->id)->where('userid', $userid)->first();
if ($perm == null) return false;
if ($perm->can_struct == 1) return true;
return false;
}
public static function can_view($userid, $appcode)
{
// full access for app owner
$app = DB::table('apps')->where('code', $appcode)->first();
if ($app == null) return -3;
if(self::isBanned($app->ownerid)) return false;
if ($app->ownerid == $userid) return true;
$perm = DB::table('user_app')->where('appid', $app->id)->where('userid', $userid)->first();
if ($perm == null) return false;
return true;
}
public static function can_editReport($userid, $appcode)
{
// full access for app owner
$app = DB::table('apps')->where('code', $appcode)->first();
if ($app == null) return -3;
if(self::isBanned($app->ownerid)) return false;
if ($app->ownerid == $userid) return true;
$perm = DB::table('user_app')->where('appid', $app->id)->where('userid', $userid)->first();
if ($perm == null) return false;
if ($perm->can_report == 1) return true;
return false;
}
}
| meotrics/meotrics | dashboard/app/Util/Access.php | PHP | apache-2.0 | 4,937 |
/*
* Copyright 2010-2011 Nabeel Mukhtar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.google.code.linkedinapi.schema.impl;
import java.io.Serializable;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import com.google.code.linkedinapi.schema.Adapter1;
import com.google.code.linkedinapi.schema.DateOfBirth;
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"year",
"month",
"day"
})
@XmlRootElement(name = "date-of-birth")
public class DateOfBirthImpl
implements Serializable, DateOfBirth
{
private final static long serialVersionUID = 2461660169443089969L;
@XmlElement(required = true, type = String.class)
@XmlJavaTypeAdapter(Adapter1 .class)
protected Long year;
@XmlElement(required = true, type = String.class)
@XmlJavaTypeAdapter(Adapter1 .class)
protected Long month;
@XmlElement(required = true, type = String.class)
@XmlJavaTypeAdapter(Adapter1 .class)
protected Long day;
public Long getYear() {
return year;
}
public void setYear(Long value) {
this.year = value;
}
public Long getMonth() {
return month;
}
public void setMonth(Long value) {
this.month = value;
}
public Long getDay() {
return day;
}
public void setDay(Long value) {
this.day = value;
}
}
| shisoft/LinkedIn-J | core/src/main/java/com/google/code/linkedinapi/schema/impl/DateOfBirthImpl.java | Java | apache-2.0 | 2,214 |
package excelcom.api;
import com.sun.jna.platform.win32.COM.COMException;
import com.sun.jna.platform.win32.COM.COMLateBindingObject;
import com.sun.jna.platform.win32.COM.IDispatch;
import com.sun.jna.platform.win32.OaIdl;
import com.sun.jna.platform.win32.OleAuto;
import com.sun.jna.platform.win32.Variant;
import static com.sun.jna.platform.win32.Variant.VT_NULL;
/**
* Represents a Range
*/
class Range extends COMLateBindingObject {
Range(IDispatch iDispatch) throws COMException {
super(iDispatch);
}
Variant.VARIANT getValue() {
return this.invoke("Value");
}
int getRow() {
return this.invoke("Row").intValue();
}
int getColumn() {
return this.invoke("Column").intValue();
}
void setInteriorColor(ExcelColor color) {
new CellPane(this.getAutomationProperty("Interior", this)).setColorIndex(color);
}
ExcelColor getInteriorColor() {
return ExcelColor.getColor(new CellPane(this.getAutomationProperty("Interior", this)).getColorIndex());
}
void setFontColor(ExcelColor color) {
new CellPane(this.getAutomationProperty("Font", this)).setColorIndex(color);
}
ExcelColor getFontColor() {
return ExcelColor.getColor(new CellPane(this.getAutomationProperty("Font", this)).getColorIndex());
}
void setBorderColor(ExcelColor color) {
new CellPane(this.getAutomationProperty("Borders", this)).setColorIndex(color);
}
ExcelColor getBorderColor() {
return ExcelColor.getColor(new CellPane(this.getAutomationProperty("Borders", this)).getColorIndex());
}
void setComment(String comment) {
this.invokeNoReply("ClearComments");
this.invoke("AddComment", new Variant.VARIANT(comment));
}
String getComment() {
return new COMLateBindingObject(this.getAutomationProperty("Comment")) {
private String getText() {
return this.invoke("Text").stringValue();
}
}.getText();
}
FindResult find(Variant.VARIANT[] options) {
IDispatch find = (IDispatch) this.invoke("Find", options).getValue();
if (find == null) {
return null;
}
return new FindResult(find, this);
}
FindResult findNext(FindResult previous) {
return new FindResult(this.getAutomationProperty("FindNext", this, previous.toVariant()), this);
}
/**
* Can be Interior, Border or Font. Has methods for setting e.g. Color.
*/
private class CellPane extends COMLateBindingObject {
CellPane(IDispatch iDispatch) {
super(iDispatch);
}
void setColorIndex(ExcelColor color) {
this.setProperty("ColorIndex", color.getIndex());
}
int getColorIndex() {
Variant.VARIANT colorIndex = this.invoke("ColorIndex");
if(colorIndex.getVarType().intValue() == VT_NULL) {
throw new NullPointerException("return type of colorindex is null. Maybe multiple colors in range?");
}
return this.invoke("ColorIndex").intValue();
}
}
}
| lprc/excelcom | src/main/java/excelcom/api/Range.java | Java | apache-2.0 | 3,146 |
/*
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.bigtable.hbase;
import static com.google.cloud.bigtable.hbase.test_env.SharedTestEnvRule.COLUMN_FAMILY;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.junit.Assert;
import org.junit.Test;
@SuppressWarnings("deprecation")
public class TestCreateTable extends AbstractTestCreateTable {
@Override
protected void createTable(TableName tableName) throws IOException {
getConnection().getAdmin().createTable(createDescriptor(tableName));
}
@Override
protected void createTable(TableName tableName, byte[] start, byte[] end, int splitCount)
throws IOException {
getConnection().getAdmin().createTable(createDescriptor(tableName), start, end, splitCount);
}
@Override
protected void createTable(TableName tableName, byte[][] ranges) throws IOException {
getConnection().getAdmin().createTable(createDescriptor(tableName), ranges);
}
private HTableDescriptor createDescriptor(TableName tableName) {
return new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(COLUMN_FAMILY));
}
@Override
protected List<HRegionLocation> getRegions(TableName tableName) throws Exception {
return getConnection().getRegionLocator(tableName).getAllRegionLocations();
}
@Test
public void testGetRegions() throws Exception {
TableName tableName = sharedTestEnv.newTestTableName();
getConnection().getAdmin().createTable(createDescriptor(tableName));
List<RegionInfo> regions = getConnection().getAdmin().getRegions(tableName);
Assert.assertEquals(1, regions.size());
}
@Override
protected boolean asyncGetRegions(TableName tableName) throws Exception {
return getConnection().getAdmin().getRegions(tableName).size() == 1 ? true : false;
}
@Override
protected boolean isTableEnabled(TableName tableName) throws Exception {
return getConnection().getAdmin().isTableEnabled(tableName);
}
@Override
protected void disableTable(TableName tableName) throws Exception {
getConnection().getAdmin().disableTable(tableName);
}
@Override
protected void adminDeleteTable(TableName tableName) throws Exception {
getConnection().getAdmin().deleteTable(tableName);
}
@Override
protected boolean tableExists(TableName tableName) throws Exception {
return getConnection().getAdmin().tableExists(tableName);
}
}
| sduskis/cloud-bigtable-client | bigtable-hbase-2.x-parent/bigtable-hbase-2.x-integration-tests/src/test/java/com/google/cloud/bigtable/hbase/TestCreateTable.java | Java | apache-2.0 | 3,209 |
// Copyright 2004, 2005 The Apache Software Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.tapestry.binding;
import org.apache.hivemind.Location;
import org.apache.tapestry.BindingException;
import org.apache.tapestry.IActionListener;
import org.apache.tapestry.IComponent;
import org.apache.tapestry.IRequestCycle;
import org.apache.tapestry.PageRedirectException;
import org.apache.tapestry.RedirectException;
import org.apache.tapestry.coerce.ValueConverter;
import org.apache.tapestry.listener.ListenerMap;
/**
* Test for {@link org.apache.tapestry.binding.ListenerMethodBinding}.
*
* @author Howard M. Lewis Ship
* @since 4.0
*/
public class TestListenerMethodBinding extends BindingTestCase
{
public void testInvokeListener()
{
IComponent component = newComponent();
ListenerMap map = newListenerMap();
IActionListener listener = newListener();
Location l = newLocation();
IComponent sourceComponent = newComponent();
IRequestCycle cycle = newCycle();
ValueConverter vc = newValueConverter();
trainGetListener(component, map, listener);
listener.actionTriggered(sourceComponent, cycle);
replayControls();
ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo");
assertSame(b, b.getObject());
assertSame(component, b.getComponent());
b.actionTriggered(sourceComponent, cycle);
verifyControls();
}
public void testToString()
{
IComponent component = newComponent();
Location l = newLocation();
ValueConverter vc = newValueConverter();
trainGetExtendedId(component, "Fred/barney");
replayControls();
ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo");
String toString = b.toString();
String description = toString.substring(toString.indexOf('[') + 1, toString.length() - 1);
assertEquals(
"param, component=Fred/barney, methodName=foo, location=classpath:/org/apache/tapestry/binding/TestListenerMethodBinding, line 1",
description);
verifyControls();
}
public void testInvokeAndPageRedirect()
{
IComponent component = newComponent();
ListenerMap map = newListenerMap();
IActionListener listener = newListener();
Location l = newLocation();
ValueConverter vc = newValueConverter();
IComponent sourceComponent = newComponent();
IRequestCycle cycle = newCycle();
trainGetListener(component, map, listener);
listener.actionTriggered(sourceComponent, cycle);
Throwable t = new PageRedirectException("TargetPage");
setThrowable(listener, t);
replayControls();
ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo");
try
{
b.actionTriggered(sourceComponent, cycle);
unreachable();
}
catch (PageRedirectException ex)
{
assertSame(t, ex);
}
verifyControls();
}
public void testInvokeAndRedirect()
{
IComponent component = newComponent();
ListenerMap map = newListenerMap();
IActionListener listener = newListener();
Location l = newLocation();
ValueConverter vc = newValueConverter();
IComponent sourceComponent = newComponent();
IRequestCycle cycle = newCycle();
trainGetListener(component, map, listener);
listener.actionTriggered(sourceComponent, cycle);
Throwable t = new RedirectException("http://foo.bar");
setThrowable(listener, t);
replayControls();
ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo");
try
{
b.actionTriggered(sourceComponent, cycle);
unreachable();
}
catch (RedirectException ex)
{
assertSame(t, ex);
}
verifyControls();
}
public void testInvokeListenerFailure()
{
IComponent component = newComponent();
ListenerMap map = newListenerMap();
IActionListener listener = newListener();
Location l = newLocation();
ValueConverter vc = newValueConverter();
IComponent sourceComponent = newComponent();
IRequestCycle cycle = newCycle();
trainGetListener(component, map, listener);
listener.actionTriggered(sourceComponent, cycle);
Throwable t = new RuntimeException("Failure.");
setThrowable(listener, t);
trainGetExtendedId(component, "Fred/barney");
replayControls();
ListenerMethodBinding b = new ListenerMethodBinding("param", vc, l, component, "foo");
try
{
b.actionTriggered(sourceComponent, cycle);
unreachable();
}
catch (BindingException ex)
{
assertEquals(
"Exception invoking listener method foo of component Fred/barney: Failure.",
ex.getMessage());
assertSame(component, ex.getComponent());
assertSame(l, ex.getLocation());
assertSame(b, ex.getBinding());
}
verifyControls();
}
private void trainGetListener(IComponent component, ListenerMap lm, IActionListener listener)
{
trainGetListeners(component, lm);
trainGetListener(lm, "foo", listener);
}
protected IRequestCycle newCycle()
{
return (IRequestCycle) newMock(IRequestCycle.class);
}
private void trainGetListener(ListenerMap map, String methodName, IActionListener listener)
{
map.getListener(methodName);
setReturnValue(map, listener);
}
private void trainGetListeners(IComponent component, ListenerMap lm)
{
component.getListeners();
setReturnValue(component,lm);
}
private ListenerMap newListenerMap()
{
return (ListenerMap) newMock(ListenerMap.class);
}
private IActionListener newListener()
{
return (IActionListener) newMock(IActionListener.class);
}
} | apache/tapestry4 | framework/src/test/org/apache/tapestry/binding/TestListenerMethodBinding.java | Java | apache-2.0 | 6,776 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.lops.compile;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.sysml.api.DMLScript;
import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;
import org.apache.sysml.conf.ConfigurationManager;
import org.apache.sysml.conf.DMLConfig;
import org.apache.sysml.hops.AggBinaryOp;
import org.apache.sysml.hops.BinaryOp;
import org.apache.sysml.hops.Hop.FileFormatTypes;
import org.apache.sysml.hops.HopsException;
import org.apache.sysml.hops.OptimizerUtils;
import org.apache.sysml.lops.AppendM;
import org.apache.sysml.lops.BinaryM;
import org.apache.sysml.lops.CombineBinary;
import org.apache.sysml.lops.Data;
import org.apache.sysml.lops.Data.OperationTypes;
import org.apache.sysml.lops.FunctionCallCP;
import org.apache.sysml.lops.Lop;
import org.apache.sysml.lops.Lop.Type;
import org.apache.sysml.lops.LopProperties.ExecLocation;
import org.apache.sysml.lops.LopProperties.ExecType;
import org.apache.sysml.lops.LopsException;
import org.apache.sysml.lops.MapMult;
import org.apache.sysml.lops.OutputParameters;
import org.apache.sysml.lops.OutputParameters.Format;
import org.apache.sysml.lops.PMMJ;
import org.apache.sysml.lops.ParameterizedBuiltin;
import org.apache.sysml.lops.PickByCount;
import org.apache.sysml.lops.SortKeys;
import org.apache.sysml.lops.Unary;
import org.apache.sysml.parser.DataExpression;
import org.apache.sysml.parser.Expression;
import org.apache.sysml.parser.Expression.DataType;
import org.apache.sysml.parser.ParameterizedBuiltinFunctionExpression;
import org.apache.sysml.parser.StatementBlock;
import org.apache.sysml.runtime.DMLRuntimeException;
import org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;
import org.apache.sysml.runtime.controlprogram.parfor.util.IDSequence;
import org.apache.sysml.runtime.instructions.CPInstructionParser;
import org.apache.sysml.runtime.instructions.Instruction;
import org.apache.sysml.runtime.instructions.Instruction.INSTRUCTION_TYPE;
import org.apache.sysml.runtime.instructions.InstructionParser;
import org.apache.sysml.runtime.instructions.MRJobInstruction;
import org.apache.sysml.runtime.instructions.SPInstructionParser;
import org.apache.sysml.runtime.instructions.cp.CPInstruction;
import org.apache.sysml.runtime.instructions.cp.CPInstruction.CPINSTRUCTION_TYPE;
import org.apache.sysml.runtime.instructions.cp.VariableCPInstruction;
import org.apache.sysml.runtime.matrix.MatrixCharacteristics;
import org.apache.sysml.runtime.matrix.data.InputInfo;
import org.apache.sysml.runtime.matrix.data.OutputInfo;
import org.apache.sysml.runtime.matrix.sort.PickFromCompactInputFormat;
/**
*
* Class to maintain a DAG of lops and compile it into
* runtime instructions, incl piggybacking into jobs.
*
* @param <N> the class parameter has no affect and is
* only kept for documentation purposes.
*/
public class Dag<N extends Lop>
{
private static final Log LOG = LogFactory.getLog(Dag.class.getName());
private static final int CHILD_BREAKS_ALIGNMENT = 2;
private static final int CHILD_DOES_NOT_BREAK_ALIGNMENT = 1;
private static final int MRCHILD_NOT_FOUND = 0;
private static final int MR_CHILD_FOUND_BREAKS_ALIGNMENT = 4;
private static final int MR_CHILD_FOUND_DOES_NOT_BREAK_ALIGNMENT = 5;
private static IDSequence job_id = null;
private static IDSequence var_index = null;
private int total_reducers = -1;
private String scratch = "";
private String scratchFilePath = null;
private double gmrMapperFootprint = 0;
static {
job_id = new IDSequence();
var_index = new IDSequence();
}
// hash set for all nodes in dag
private ArrayList<Lop> nodes = null;
/*
* Hashmap to translates the nodes in the DAG to a sequence of numbers
* key: Lop ID
* value: Sequence Number (0 ... |DAG|)
*
* This map is primarily used in performing DFS on the DAG, and subsequently in performing ancestor-descendant checks.
*/
private HashMap<Long, Integer> IDMap = null;
private static class NodeOutput {
String fileName;
String varName;
OutputInfo outInfo;
ArrayList<Instruction> preInstructions; // instructions added before a MR instruction
ArrayList<Instruction> postInstructions; // instructions added after a MR instruction
ArrayList<Instruction> lastInstructions;
NodeOutput() {
fileName = null;
varName = null;
outInfo = null;
preInstructions = new ArrayList<Instruction>();
postInstructions = new ArrayList<Instruction>();
lastInstructions = new ArrayList<Instruction>();
}
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public String getVarName() {
return varName;
}
public void setVarName(String varName) {
this.varName = varName;
}
public OutputInfo getOutInfo() {
return outInfo;
}
public void setOutInfo(OutputInfo outInfo) {
this.outInfo = outInfo;
}
public ArrayList<Instruction> getPreInstructions() {
return preInstructions;
}
public void addPreInstruction(Instruction inst) {
preInstructions.add(inst);
}
public ArrayList<Instruction> getPostInstructions() {
return postInstructions;
}
public void addPostInstruction(Instruction inst) {
postInstructions.add(inst);
}
public ArrayList<Instruction> getLastInstructions() {
return lastInstructions;
}
public void addLastInstruction(Instruction inst) {
lastInstructions.add(inst);
}
}
public Dag()
{
//allocate internal data structures
nodes = new ArrayList<Lop>();
IDMap = new HashMap<Long, Integer>();
// get number of reducers from dml config
total_reducers = ConfigurationManager.getNumReducers();
}
///////
// filename handling
private String getFilePath() {
if ( scratchFilePath == null ) {
scratchFilePath = scratch + Lop.FILE_SEPARATOR
+ Lop.PROCESS_PREFIX + DMLScript.getUUID()
+ Lop.FILE_SEPARATOR + Lop.FILE_SEPARATOR
+ ProgramConverter.CP_ROOT_THREAD_ID + Lop.FILE_SEPARATOR;
}
return scratchFilePath;
}
public static String getNextUniqueFilenameSuffix() {
return "temp" + job_id.getNextID();
}
public String getNextUniqueFilename() {
return getFilePath() + getNextUniqueFilenameSuffix();
}
public static String getNextUniqueVarname(DataType dt) {
return (dt==DataType.MATRIX ? Lop.MATRIX_VAR_NAME_PREFIX :
Lop.FRAME_VAR_NAME_PREFIX) + var_index.getNextID();
}
///////
// Dag modifications
/**
* Method to add a node to the DAG.
*
* @param node low-level operator
* @return true if node was not already present, false if not.
*/
public boolean addNode(Lop node) {
if (nodes.contains(node))
return false;
nodes.add(node);
return true;
}
/**
* Method to compile a dag generically
*
* @param sb statement block
* @param config dml configuration
* @return list of instructions
* @throws LopsException if LopsException occurs
* @throws IOException if IOException occurs
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
public ArrayList<Instruction> getJobs(StatementBlock sb, DMLConfig config)
throws LopsException, IOException, DMLRuntimeException {
if (config != null)
{
total_reducers = config.getIntValue(DMLConfig.NUM_REDUCERS);
scratch = config.getTextValue(DMLConfig.SCRATCH_SPACE) + "/";
}
// hold all nodes in a vector (needed for ordering)
ArrayList<Lop> node_v = new ArrayList<Lop>();
node_v.addAll(nodes);
/*
* Sort the nodes by topological order.
*
* 1) All nodes with level i appear prior to the nodes in level i+1.
* 2) All nodes within a level are ordered by their ID i.e., in the order
* they are created
*/
doTopologicalSort_strict_order(node_v);
// do greedy grouping of operations
ArrayList<Instruction> inst = doGreedyGrouping(sb, node_v);
return inst;
}
private static void deleteUpdatedTransientReadVariables(StatementBlock sb, ArrayList<Lop> nodeV,
ArrayList<Instruction> inst) throws DMLRuntimeException {
if ( sb == null )
return;
if( LOG.isTraceEnabled() )
LOG.trace("In delete updated variables");
// CANDIDATE list of variables which could have been updated in this statement block
HashMap<String, Lop> labelNodeMapping = new HashMap<String, Lop>();
// ACTUAL list of variables whose value is updated, AND the old value of the variable
// is no longer accessible/used.
HashSet<String> updatedLabels = new HashSet<String>();
HashMap<String, Lop> updatedLabelsLineNum = new HashMap<String, Lop>();
// first capture all transient read variables
for ( Lop node : nodeV ) {
if (node.getExecLocation() == ExecLocation.Data
&& ((Data) node).isTransient()
&& ((Data) node).getOperationType() == OperationTypes.READ
&& ((Data) node).getDataType() == DataType.MATRIX) {
// "node" is considered as updated ONLY IF the old value is not used any more
// So, make sure that this READ node does not feed into any (transient/persistent) WRITE
boolean hasWriteParent=false;
for(Lop p : node.getOutputs()) {
if(p.getExecLocation() == ExecLocation.Data) {
// if the "p" is of type Data, then it has to be a WRITE
hasWriteParent = true;
break;
}
}
if ( !hasWriteParent ) {
// node has no parent of type WRITE, so this is a CANDIDATE variable
// add it to labelNodeMapping so that it is considered in further processing
labelNodeMapping.put(node.getOutputParameters().getLabel(), node);
}
}
}
// capture updated transient write variables
for ( Lop node : nodeV ) {
if (node.getExecLocation() == ExecLocation.Data
&& ((Data) node).isTransient()
&& ((Data) node).getOperationType() == OperationTypes.WRITE
&& ((Data) node).getDataType() == DataType.MATRIX
&& labelNodeMapping.containsKey(node.getOutputParameters().getLabel()) // check to make sure corresponding (i.e., with the same label/name) transient read is present
&& !labelNodeMapping.containsValue(node.getInputs().get(0)) // check to avoid cases where transient read feeds into a transient write
) {
updatedLabels.add(node.getOutputParameters().getLabel());
updatedLabelsLineNum.put(node.getOutputParameters().getLabel(), node);
}
}
// generate RM instructions
Instruction rm_inst = null;
for ( String label : updatedLabels )
{
rm_inst = VariableCPInstruction.prepareRemoveInstruction(label);
rm_inst.setLocation(updatedLabelsLineNum.get(label));
if( LOG.isTraceEnabled() )
LOG.trace(rm_inst.toString());
inst.add(rm_inst);
}
}
private static void generateRemoveInstructions(StatementBlock sb, ArrayList<Instruction> deleteInst)
throws DMLRuntimeException {
if ( sb == null )
return;
if( LOG.isTraceEnabled() )
LOG.trace("In generateRemoveInstructions()");
Instruction inst = null;
// RULE 1: if in IN and not in OUT, then there should be an rmvar or rmfilevar inst
// (currently required for specific cases of external functions)
for (String varName : sb.liveIn().getVariableNames()) {
if (!sb.liveOut().containsVariable(varName)) {
// DataType dt = in.getVariable(varName).getDataType();
// if( !(dt==DataType.MATRIX || dt==DataType.UNKNOWN) )
// continue; //skip rm instructions for non-matrix objects
inst = VariableCPInstruction.prepareRemoveInstruction(varName);
inst.setLocation(sb.getEndLine(), sb.getEndLine(), -1, -1);
deleteInst.add(inst);
if( LOG.isTraceEnabled() )
LOG.trace(" Adding " + inst.toString());
}
}
// RULE 2: if in KILL and not in IN and not in OUT, then there should be an rmvar or rmfilevar inst
// (currently required for specific cases of nested loops)
// i.e., local variables which are created within the block, and used entirely within the block
/*for (String varName : sb.getKill().getVariableNames()) {
if ((!sb.liveIn().containsVariable(varName))
&& (!sb.liveOut().containsVariable(varName))) {
// DataType dt =
// sb.getKill().getVariable(varName).getDataType();
// if( !(dt==DataType.MATRIX || dt==DataType.UNKNOWN) )
// continue; //skip rm instructions for non-matrix objects
inst = createCleanupInstruction(varName);
deleteInst.add(inst);
if (DMLScript.DEBUG)
System.out.println("Adding instruction (r2) "
+ inst.toString());
}
}*/
}
private static ArrayList<ArrayList<Lop>> createNodeVectors(int size) {
ArrayList<ArrayList<Lop>> arr = new ArrayList<ArrayList<Lop>>();
// for each job type, we need to create a vector.
// additionally, create another vector for execNodes
for (int i = 0; i < size; i++) {
arr.add(new ArrayList<Lop>());
}
return arr;
}
private static void clearNodeVectors(ArrayList<ArrayList<Lop>> arr) {
for (ArrayList<Lop> tmp : arr) {
tmp.clear();
}
}
private static boolean isCompatible(ArrayList<Lop> nodes, JobType jt, int from, int to)
throws LopsException
{
int base = jt.getBase();
for ( Lop node : nodes ) {
if ((node.getCompatibleJobs() & base) == 0) {
if( LOG.isTraceEnabled() )
LOG.trace("Not compatible "+ node.toString());
return false;
}
}
return true;
}
/**
* Function that determines if the two input nodes can be executed together
* in at least one job.
*
* @param node1 low-level operator 1
* @param node2 low-level operator 2
* @return true if nodes can be executed together
*/
private static boolean isCompatible(Lop node1, Lop node2) {
return( (node1.getCompatibleJobs() & node2.getCompatibleJobs()) > 0);
}
/**
* Function that checks if the given node executes in the job specified by jt.
*
* @param node low-level operator
* @param jt job type
* @return true if node executes in the specified job type
*/
private static boolean isCompatible(Lop node, JobType jt) {
if ( jt == JobType.GMRCELL )
jt = JobType.GMR;
return ((node.getCompatibleJobs() & jt.getBase()) > 0);
}
/*
* Add node, and its relevant children to job-specific node vectors.
*/
private void addNodeByJobType(Lop node, ArrayList<ArrayList<Lop>> arr,
ArrayList<Lop> execNodes, boolean eliminate) throws LopsException {
if (!eliminate) {
// Check if this lop defines a MR job.
if ( node.definesMRJob() ) {
// find the corresponding JobType
JobType jt = JobType.findJobTypeFromLop(node);
if ( jt == null ) {
throw new LopsException(node.printErrorLocation() + "No matching JobType is found for a the lop type: " + node.getType() + " \n");
}
// Add "node" to corresponding job vector
if ( jt == JobType.GMR ) {
if ( node.hasNonBlockedInputs() ) {
int gmrcell_index = JobType.GMRCELL.getId();
arr.get(gmrcell_index).add(node);
int from = arr.get(gmrcell_index).size();
addChildren(node, arr.get(gmrcell_index), execNodes);
int to = arr.get(gmrcell_index).size();
if (!isCompatible(arr.get(gmrcell_index),JobType.GMR, from, to)) // check against GMR only, not against GMRCELL
throw new LopsException(node.printErrorLocation() + "Error during compatibility check \n");
}
else {
// if "node" (in this case, a group lop) has any inputs from RAND
// then add it to RAND job. Otherwise, create a GMR job
if (hasChildNode(node, arr.get(JobType.DATAGEN.getId()) )) {
arr.get(JobType.DATAGEN.getId()).add(node);
// we should NOT call 'addChildren' because appropriate
// child nodes would have got added to RAND job already
} else {
int gmr_index = JobType.GMR.getId();
arr.get(gmr_index).add(node);
int from = arr.get(gmr_index).size();
addChildren(node, arr.get(gmr_index), execNodes);
int to = arr.get(gmr_index).size();
if (!isCompatible(arr.get(gmr_index),JobType.GMR, from, to))
throw new LopsException(node.printErrorLocation() + "Error during compatibility check \n");
}
}
}
else {
int index = jt.getId();
arr.get(index).add(node);
int from = arr.get(index).size();
addChildren(node, arr.get(index), execNodes);
int to = arr.get(index).size();
// check if all added nodes are compatible with current job
if (!isCompatible(arr.get(index), jt, from, to)) {
throw new LopsException(
"Unexpected error in addNodeByType.");
}
}
return;
}
}
if ( eliminate ) {
// Eliminated lops are directly added to GMR queue.
// Note that eliminate flag is set only for 'group' lops
if ( node.hasNonBlockedInputs() )
arr.get(JobType.GMRCELL.getId()).add(node);
else
arr.get(JobType.GMR.getId()).add(node);
return;
}
/*
* If this lop does not define a job, check if it uses the output of any
* specialized job. i.e., if this lop has a child node in any of the
* job-specific vector, then add it to the vector. Note: This lop must
* be added to ONLY ONE of the job-specific vectors.
*/
int numAdded = 0;
for ( JobType j : JobType.values() ) {
if ( j.getId() > 0 && hasDirectChildNode(node, arr.get(j.getId()))) {
if (isCompatible(node, j)) {
arr.get(j.getId()).add(node);
numAdded += 1;
}
}
}
if (numAdded > 1) {
throw new LopsException("Unexpected error in addNodeByJobType(): A given lop can ONLY be added to a single job vector (numAdded = " + numAdded + ")." );
}
}
/*
* Remove the node from all job-specific node vectors. This method is
* invoked from removeNodesForNextIteration().
*/
private static void removeNodeByJobType(Lop node, ArrayList<ArrayList<Lop>> arr) {
for ( JobType jt : JobType.values())
if ( jt.getId() > 0 )
arr.get(jt.getId()).remove(node);
}
/**
* As some jobs only write one output, all operations in the mapper need to
* be redone and cannot be marked as finished.
*
* @param execNodes list of exec low-level operators
* @param jobNodes list of job low-level operators
* @param finishedNodes list of finished low-level operators
* @throws LopsException if LopsException occurs
*/
private void handleSingleOutputJobs(ArrayList<Lop> execNodes,
ArrayList<ArrayList<Lop>> jobNodes, ArrayList<Lop> finishedNodes)
throws LopsException {
/*
* If the input of a MMCJ/MMRJ job (must have executed in a Mapper) is used
* by multiple lops then we should mark it as not-finished.
*/
ArrayList<Lop> nodesWithUnfinishedOutputs = new ArrayList<Lop>();
int[] jobIndices = {JobType.MMCJ.getId()};
Lop.Type[] lopTypes = { Lop.Type.MMCJ};
// TODO: SortByValue should be treated similar to MMCJ, since it can
// only sort one file now
for ( int jobi=0; jobi < jobIndices.length; jobi++ ) {
int jindex = jobIndices[jobi];
if (!jobNodes.get(jindex).isEmpty()) {
ArrayList<Lop> vec = jobNodes.get(jindex);
// first find all nodes with more than one parent that is not finished.
for (int i = 0; i < vec.size(); i++) {
Lop node = vec.get(i);
if (node.getExecLocation() == ExecLocation.MapOrReduce
|| node.getExecLocation() == ExecLocation.Map) {
Lop MRparent = getParentNode(node, execNodes, ExecLocation.MapAndReduce);
if ( MRparent != null && MRparent.getType() == lopTypes[jobi]) {
int numParents = node.getOutputs().size();
if (numParents > 1) {
for (int j = 0; j < numParents; j++) {
if (!finishedNodes.contains(node.getOutputs()
.get(j)))
nodesWithUnfinishedOutputs.add(node);
}
}
}
}
}
// need to redo all nodes in nodesWithOutput as well as their children
for ( Lop node : vec ) {
if (node.getExecLocation() == ExecLocation.MapOrReduce
|| node.getExecLocation() == ExecLocation.Map) {
if (nodesWithUnfinishedOutputs.contains(node))
finishedNodes.remove(node);
if (hasParentNode(node, nodesWithUnfinishedOutputs))
finishedNodes.remove(node);
}
}
}
}
}
/**
* Method to check if a lop can be eliminated from checking
*
* @param node low-level operator
* @param execNodes list of exec nodes
* @return true if lop can be eliminated
*/
private static boolean canEliminateLop(Lop node, ArrayList<Lop> execNodes) {
// this function can only eliminate "aligner" lops such a group
if (!node.isAligner())
return false;
// find the child whose execLoc = 'MapAndReduce'
int ret = getChildAlignment(node, execNodes, ExecLocation.MapAndReduce);
if (ret == CHILD_BREAKS_ALIGNMENT)
return false;
else if (ret == CHILD_DOES_NOT_BREAK_ALIGNMENT)
return true;
else if (ret == MRCHILD_NOT_FOUND)
return false;
else if (ret == MR_CHILD_FOUND_BREAKS_ALIGNMENT)
return false;
else if (ret == MR_CHILD_FOUND_DOES_NOT_BREAK_ALIGNMENT)
return true;
else
throw new RuntimeException("Should not happen. \n");
}
/**
* Method to generate createvar instructions, which creates a new entry
* in the symbol table. One instruction is generated for every LOP that is
* 1) type Data and
* 2) persistent and
* 3) matrix and
* 4) read
*
* Transient reads needn't be considered here since the previous program
* block would already create appropriate entries in the symbol table.
*
* @param nodes_v list of nodes
* @param inst list of instructions
* @throws LopsException if LopsException occurs
* @throws IOException if IOException occurs
*/
private static void generateInstructionsForInputVariables(ArrayList<Lop> nodes_v, ArrayList<Instruction> inst) throws LopsException, IOException {
for(Lop n : nodes_v) {
if (n.getExecLocation() == ExecLocation.Data && !((Data) n).isTransient()
&& ((Data) n).getOperationType() == OperationTypes.READ
&& (n.getDataType() == DataType.MATRIX || n.getDataType() == DataType.FRAME) ) {
if ( !((Data)n).isLiteral() ) {
try {
String inst_string = n.getInstructions();
CPInstruction currInstr = CPInstructionParser.parseSingleInstruction(inst_string);
currInstr.setLocation(n);
inst.add(currInstr);
} catch (DMLRuntimeException e) {
throw new LopsException(n.printErrorLocation() + "error generating instructions from input variables in Dag -- \n", e);
}
}
}
}
}
/**
* Determine whether to send <code>node</code> to MR or to process it in the control program.
* It is sent to MR in the following cases:
*
* 1) if input lop gets processed in MR then <code>node</code> can be piggybacked
*
* 2) if the exectype of write lop itself is marked MR i.e., memory estimate > memory budget.
*
* @param node low-level operator
* @return true if lop should be sent to MR
*/
private static boolean sendWriteLopToMR(Lop node)
{
if ( DMLScript.rtplatform == RUNTIME_PLATFORM.SINGLE_NODE )
return false;
Lop in = node.getInputs().get(0);
Format nodeFormat = node.getOutputParameters().getFormat();
// Case of a transient read feeding into only one output persistent binaryblock write
// Move the temporary file on HDFS to required persistent location, insteadof copying.
if ( in.getExecLocation() == ExecLocation.Data && in.getOutputs().size() == 1
&& !((Data)node).isTransient()
&& ((Data)in).isTransient()
&& ((Data)in).getOutputParameters().isBlocked()
&& node.getOutputParameters().isBlocked() ) {
return false;
}
//send write lop to MR if (1) it is marked with exec type MR (based on its memory estimate), or
//(2) if the input lop is in MR and the write format allows to pack it into the same job (this does
//not apply to csv write because MR csvwrite is a separate MR job type)
return (node.getExecType() == ExecType.MR
|| (in.getExecType() == ExecType.MR && nodeFormat != Format.CSV));
}
/**
* Computes the memory footprint required to execute <code>node</code> in the mapper.
* It is used only for those nodes that use inputs from distributed cache. The returned
* value is utilized in limiting the number of instructions piggybacked onto a single GMR mapper.
*
* @param node low-level operator
* @return memory footprint
*/
private static double computeFootprintInMapper(Lop node) {
// Memory limits must be checked only for nodes that use distributed cache
if ( ! node.usesDistributedCache() )
// default behavior
return 0.0;
OutputParameters in1dims = node.getInputs().get(0).getOutputParameters();
OutputParameters in2dims = node.getInputs().get(1).getOutputParameters();
double footprint = 0;
if ( node instanceof MapMult ) {
int dcInputIndex = node.distributedCacheInputIndex()[0];
footprint = AggBinaryOp.getMapmmMemEstimate(
in1dims.getNumRows(), in1dims.getNumCols(), in1dims.getRowsInBlock(), in1dims.getColsInBlock(), in1dims.getNnz(),
in2dims.getNumRows(), in2dims.getNumCols(), in2dims.getRowsInBlock(), in2dims.getColsInBlock(), in2dims.getNnz(),
dcInputIndex, false);
}
else if ( node instanceof PMMJ ) {
int dcInputIndex = node.distributedCacheInputIndex()[0];
footprint = AggBinaryOp.getMapmmMemEstimate(
in1dims.getNumRows(), 1, in1dims.getRowsInBlock(), in1dims.getColsInBlock(), in1dims.getNnz(),
in2dims.getNumRows(), in2dims.getNumCols(), in2dims.getRowsInBlock(), in2dims.getColsInBlock(), in2dims.getNnz(),
dcInputIndex, true);
}
else if ( node instanceof AppendM ) {
footprint = BinaryOp.footprintInMapper(
in1dims.getNumRows(), in1dims.getNumCols(),
in2dims.getNumRows(), in2dims.getNumCols(),
in1dims.getRowsInBlock(), in1dims.getColsInBlock());
}
else if ( node instanceof BinaryM ) {
footprint = BinaryOp.footprintInMapper(
in1dims.getNumRows(), in1dims.getNumCols(),
in2dims.getNumRows(), in2dims.getNumCols(),
in1dims.getRowsInBlock(), in1dims.getColsInBlock());
}
else {
// default behavior
return 0.0;
}
return footprint;
}
/**
* Determines if <code>node</code> can be executed in current round of MR jobs or if it needs to be queued for later rounds.
* If the total estimated footprint (<code>node</code> and previously added nodes in GMR) is less than available memory on
* the mappers then <code>node</code> can be executed in current round, and <code>true</code> is returned. Otherwise,
* <code>node</code> must be queued and <code>false</code> is returned.
*
* @param node low-level operator
* @param footprintInMapper mapper footprint
* @return true if node can be executed in current round of jobs
*/
private static boolean checkMemoryLimits(Lop node, double footprintInMapper) {
boolean addNode = true;
// Memory limits must be checked only for nodes that use distributed cache
if ( ! node.usesDistributedCache() )
// default behavior
return addNode;
double memBudget = Math.min(AggBinaryOp.MAPMULT_MEM_MULTIPLIER, BinaryOp.APPEND_MEM_MULTIPLIER) * OptimizerUtils.getRemoteMemBudgetMap(true);
if ( footprintInMapper <= memBudget )
return addNode;
else
return !addNode;
}
/**
* Method to group a vector of sorted lops.
*
* @param sb statement block
* @param node_v list of low-level operators
* @return list of instructions
* @throws LopsException if LopsException occurs
* @throws IOException if IOException occurs
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
private ArrayList<Instruction> doGreedyGrouping(StatementBlock sb, ArrayList<Lop> node_v)
throws LopsException, IOException, DMLRuntimeException
{
if( LOG.isTraceEnabled() )
LOG.trace("Grouping DAG ============");
// nodes to be executed in current iteration
ArrayList<Lop> execNodes = new ArrayList<Lop>();
// nodes that have already been processed
ArrayList<Lop> finishedNodes = new ArrayList<Lop>();
// nodes that are queued for the following iteration
ArrayList<Lop> queuedNodes = new ArrayList<Lop>();
ArrayList<ArrayList<Lop>> jobNodes = createNodeVectors(JobType.getNumJobTypes());
// list of instructions
ArrayList<Instruction> inst = new ArrayList<Instruction>();
//ArrayList<Instruction> preWriteDeleteInst = new ArrayList<Instruction>();
ArrayList<Instruction> writeInst = new ArrayList<Instruction>();
ArrayList<Instruction> deleteInst = new ArrayList<Instruction>();
ArrayList<Instruction> endOfBlockInst = new ArrayList<Instruction>();
// remove files for transient reads that are updated.
deleteUpdatedTransientReadVariables(sb, node_v, writeInst);
generateRemoveInstructions(sb, endOfBlockInst);
generateInstructionsForInputVariables(node_v, inst);
boolean done = false;
String indent = " ";
while (!done) {
if( LOG.isTraceEnabled() )
LOG.trace("Grouping nodes in DAG");
execNodes.clear();
queuedNodes.clear();
clearNodeVectors(jobNodes);
gmrMapperFootprint=0;
for ( Lop node : node_v ) {
// finished nodes don't need to be processed
if (finishedNodes.contains(node))
continue;
if( LOG.isTraceEnabled() )
LOG.trace("Processing node (" + node.getID()
+ ") " + node.toString() + " exec nodes size is " + execNodes.size());
//if node defines MR job, make sure it is compatible with all
//its children nodes in execNodes
if(node.definesMRJob() && !compatibleWithChildrenInExecNodes(execNodes, node))
{
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing node "
+ node.toString() + " (code 1)");
queuedNodes.add(node);
removeNodesForNextIteration(node, finishedNodes, execNodes, queuedNodes, jobNodes);
continue;
}
// if child is queued, this node will be processed in the later
// iteration
if (hasChildNode(node,queuedNodes)) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing node "
+ node.toString() + " (code 2)");
queuedNodes.add(node);
// if node has more than two inputs,
// remove children that will be needed in a future
// iterations
// may also have to remove parent nodes of these children
removeNodesForNextIteration(node, finishedNodes, execNodes,
queuedNodes, jobNodes);
continue;
}
// if inputs come from different jobs, then queue
if ( node.getInputs().size() >= 2) {
int jobid = Integer.MIN_VALUE;
boolean queueit = false;
for(int idx=0; idx < node.getInputs().size(); idx++) {
int input_jobid = jobType(node.getInputs().get(idx), jobNodes);
if (input_jobid != -1) {
if ( jobid == Integer.MIN_VALUE )
jobid = input_jobid;
else if ( jobid != input_jobid ) {
queueit = true;
break;
}
}
}
if ( queueit ) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing node " + node.toString() + " (code 3)");
queuedNodes.add(node);
removeNodesForNextIteration(node, finishedNodes, execNodes, queuedNodes, jobNodes);
continue;
}
}
// See if this lop can be eliminated
// This check is for "aligner" lops (e.g., group)
boolean eliminate = false;
eliminate = canEliminateLop(node, execNodes);
if (eliminate) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding -"+ node.toString());
execNodes.add(node);
finishedNodes.add(node);
addNodeByJobType(node, jobNodes, execNodes, eliminate);
continue;
}
// If the node defines a MR Job then make sure none of its
// children that defines a MR Job are present in execNodes
if (node.definesMRJob()) {
if (hasMRJobChildNode(node, execNodes)) {
// "node" must NOT be queued when node=group and the child that defines job is Rand
// this is because "group" can be pushed into the "Rand" job.
if (! (node.getType() == Lop.Type.Grouping && checkDataGenAsChildNode(node,execNodes)) ) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing node " + node.toString() + " (code 4)");
queuedNodes.add(node);
removeNodesForNextIteration(node, finishedNodes,
execNodes, queuedNodes, jobNodes);
continue;
}
}
}
// if "node" has more than one input, and has a descendant lop
// in execNodes that is of type RecordReader
// then all its inputs must be ancestors of RecordReader. If
// not, queue "node"
if (node.getInputs().size() > 1
&& hasChildNode(node, execNodes, ExecLocation.RecordReader)) {
// get the actual RecordReader lop
Lop rr_node = getChildNode(node, execNodes, ExecLocation.RecordReader);
// all inputs of "node" must be ancestors of rr_node
boolean queue_it = false;
for (Lop n : node.getInputs()) {
// each input should be ancestor of RecordReader lop
if (!n.equals(rr_node) && !isChild(rr_node, n, IDMap)) {
queue_it = true; // i.e., "node" must be queued
break;
}
}
if (queue_it) {
// queue node
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing -" + node.toString() + " (code 5)");
queuedNodes.add(node);
// TODO: does this have to be modified to handle
// recordreader lops?
removeNodesForNextIteration(node, finishedNodes,
execNodes, queuedNodes, jobNodes);
continue;
} else {
// nothing here.. subsequent checks have to be performed
// on "node"
;
}
}
// data node, always add if child not queued
// only write nodes are kept in execnodes
if (node.getExecLocation() == ExecLocation.Data) {
Data dnode = (Data) node;
boolean dnode_queued = false;
if ( dnode.getOperationType() == OperationTypes.READ ) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding Data -"+ node.toString());
// TODO: avoid readScalar instruction, and read it on-demand just like the way Matrices are read in control program
if ( node.getDataType() == DataType.SCALAR
//TODO: LEO check the following condition is still needed
&& node.getOutputParameters().getFile_name() != null ) {
// this lop corresponds to reading a scalar from HDFS file
// add it to execNodes so that "readScalar" instruction gets generated
execNodes.add(node);
// note: no need to add it to any job vector
}
}
else if (dnode.getOperationType() == OperationTypes.WRITE) {
// Skip the transient write <code>node</code> if the input is a
// transient read with the same variable name. i.e., a dummy copy.
// Hence, <code>node</code> can be avoided.
// TODO: this case should ideally be handled in the language layer
// prior to the construction of Hops Dag
Lop input = dnode.getInputs().get(0);
if ( dnode.isTransient()
&& input.getExecLocation() == ExecLocation.Data
&& ((Data)input).isTransient()
&& dnode.getOutputParameters().getLabel().equals(input.getOutputParameters().getLabel()) ) {
// do nothing, <code>node</code> must not processed any further.
;
}
else if ( execNodes.contains(input) && !isCompatible(node, input) && sendWriteLopToMR(node)) {
// input is in execNodes but it is not compatible with write lop. So, queue the write lop.
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing -" + node.toString());
queuedNodes.add(node);
dnode_queued = true;
}
else {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding Data -"+ node.toString());
execNodes.add(node);
if ( sendWriteLopToMR(node) ) {
addNodeByJobType(node, jobNodes, execNodes, false);
}
}
}
if (!dnode_queued)
finishedNodes.add(node);
continue;
}
// map or reduce node, can always be piggybacked with parent
if (node.getExecLocation() == ExecLocation.MapOrReduce) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding -"+ node.toString());
execNodes.add(node);
finishedNodes.add(node);
addNodeByJobType(node, jobNodes, execNodes, false);
continue;
}
// RecordReader node, add, if no parent needs reduce, else queue
if (node.getExecLocation() == ExecLocation.RecordReader) {
// "node" should not have any children in
// execNodes .. it has to be the first one in the job!
if (!hasChildNode(node, execNodes, ExecLocation.Map)
&& !hasChildNode(node, execNodes,
ExecLocation.MapAndReduce)) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding -"+ node.toString());
execNodes.add(node);
finishedNodes.add(node);
addNodeByJobType(node, jobNodes, execNodes, false);
} else {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing -"+ node.toString() + " (code 6)");
queuedNodes.add(node);
removeNodesForNextIteration(node, finishedNodes,
execNodes, queuedNodes, jobNodes);
}
continue;
}
// map node, add, if no parent needs reduce, else queue
if (node.getExecLocation() == ExecLocation.Map) {
boolean queueThisNode = false;
int subcode = -1;
if ( node.usesDistributedCache() ) {
// if an input to <code>node</code> comes from distributed cache
// then that input must get executed in one of the previous jobs.
int[] dcInputIndexes = node.distributedCacheInputIndex();
for( int dcInputIndex : dcInputIndexes ){
Lop dcInput = node.getInputs().get(dcInputIndex-1);
if ( (dcInput.getType() != Lop.Type.Data && dcInput.getExecType()==ExecType.MR)
&& execNodes.contains(dcInput) )
{
queueThisNode = true;
subcode = 1;
}
}
// Limit the number of distributed cache inputs based on the available memory in mappers
double memsize = computeFootprintInMapper(node);
//gmrMapperFootprint += computeFootprintInMapper(node);
if ( gmrMapperFootprint>0 && !checkMemoryLimits(node, gmrMapperFootprint+memsize ) ) {
queueThisNode = true;
subcode = 2;
}
if(!queueThisNode)
gmrMapperFootprint += memsize;
}
if (!queueThisNode && !hasChildNode(node, execNodes,ExecLocation.MapAndReduce)&& !hasMRJobChildNode(node, execNodes)) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding -"+ node.toString());
execNodes.add(node);
finishedNodes.add(node);
addNodeByJobType(node, jobNodes, execNodes, false);
} else {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing -"+ node.toString() + " (code 7 - " + "subcode " + subcode + ")");
queuedNodes.add(node);
removeNodesForNextIteration(node, finishedNodes,
execNodes, queuedNodes, jobNodes);
}
continue;
}
// reduce node, make sure no parent needs reduce, else queue
if (node.getExecLocation() == ExecLocation.MapAndReduce) {
// TODO: statiko -- keep the middle condition
// discuss about having a lop that is MapAndReduce but does
// not define a job
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding -"+ node.toString());
execNodes.add(node);
finishedNodes.add(node);
addNodeByJobType(node, jobNodes, execNodes, eliminate);
continue;
}
// aligned reduce, make sure a parent that is reduce exists
if (node.getExecLocation() == ExecLocation.Reduce) {
if ( compatibleWithChildrenInExecNodes(execNodes, node) &&
(hasChildNode(node, execNodes, ExecLocation.MapAndReduce)
|| hasChildNode(node, execNodes, ExecLocation.Map) ) )
{
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding -"+ node.toString());
execNodes.add(node);
finishedNodes.add(node);
addNodeByJobType(node, jobNodes, execNodes, false);
} else {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing -"+ node.toString() + " (code 8)");
queuedNodes.add(node);
removeNodesForNextIteration(node, finishedNodes,
execNodes, queuedNodes, jobNodes);
}
continue;
}
// add Scalar to execNodes if it has no child in exec nodes
// that will be executed in a MR job.
if (node.getExecLocation() == ExecLocation.ControlProgram) {
for ( Lop lop : node.getInputs() ) {
if (execNodes.contains(lop)
&& !(lop.getExecLocation() == ExecLocation.Data)
&& !(lop.getExecLocation() == ExecLocation.ControlProgram)) {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing -"+ node.toString() + " (code 9)");
queuedNodes.add(node);
removeNodesForNextIteration(node, finishedNodes,
execNodes, queuedNodes, jobNodes);
break;
}
}
if (queuedNodes.contains(node))
continue;
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Adding - scalar"+ node.toString());
execNodes.add(node);
addNodeByJobType(node, jobNodes, execNodes, false);
finishedNodes.add(node);
continue;
}
}
// no work to do
if ( execNodes.isEmpty() ) {
if( !queuedNodes.isEmpty() )
{
//System.err.println("Queued nodes should be 0");
throw new LopsException("Queued nodes should not be 0 at this point \n");
}
if( LOG.isTraceEnabled() )
LOG.trace("All done! queuedNodes = "+ queuedNodes.size());
done = true;
} else {
// work to do
if( LOG.isTraceEnabled() )
LOG.trace("Generating jobs for group -- Node count="+ execNodes.size());
// first process scalar instructions
generateControlProgramJobs(execNodes, inst, writeInst, deleteInst);
// copy unassigned lops in execnodes to gmrnodes
for (int i = 0; i < execNodes.size(); i++) {
Lop node = execNodes.get(i);
if (jobType(node, jobNodes) == -1) {
if ( isCompatible(node, JobType.GMR) ) {
if ( node.hasNonBlockedInputs() ) {
jobNodes.get(JobType.GMRCELL.getId()).add(node);
addChildren(node, jobNodes.get(JobType.GMRCELL.getId()), execNodes);
}
else {
jobNodes.get(JobType.GMR.getId()).add(node);
addChildren(node, jobNodes.get(JobType.GMR.getId()), execNodes);
}
}
else {
if( LOG.isTraceEnabled() )
LOG.trace(indent + "Queueing -" + node.toString() + " (code 10)");
execNodes.remove(i);
finishedNodes.remove(node);
queuedNodes.add(node);
removeNodesForNextIteration(node, finishedNodes,
execNodes, queuedNodes, jobNodes);
}
}
}
// next generate MR instructions
if (!execNodes.isEmpty())
generateMRJobs(execNodes, inst, writeInst, deleteInst, jobNodes);
handleSingleOutputJobs(execNodes, jobNodes, finishedNodes);
}
}
// add write and delete inst at the very end.
//inst.addAll(preWriteDeleteInst);
inst.addAll(writeInst);
inst.addAll(deleteInst);
inst.addAll(endOfBlockInst);
return inst;
}
private boolean compatibleWithChildrenInExecNodes(ArrayList<Lop> execNodes, Lop node) {
for( Lop tmpNode : execNodes ) {
// for lops that execute in control program, compatibleJobs property is set to LopProperties.INVALID
// we should not consider such lops in this check
if (isChild(tmpNode, node, IDMap)
&& tmpNode.getExecLocation() != ExecLocation.ControlProgram
//&& tmpNode.getCompatibleJobs() != LopProperties.INVALID
&& (tmpNode.getCompatibleJobs() & node.getCompatibleJobs()) == 0)
return false;
}
return true;
}
/**
* Exclude rmvar instruction for varname from deleteInst, if exists
*
* @param varName variable name
* @param deleteInst list of instructions
*/
private static void excludeRemoveInstruction(String varName, ArrayList<Instruction> deleteInst) {
//for(Instruction inst : deleteInst) {
for(int i=0; i < deleteInst.size(); i++) {
Instruction inst = deleteInst.get(i);
if ((inst.getType() == INSTRUCTION_TYPE.CONTROL_PROGRAM || inst.getType() == INSTRUCTION_TYPE.SPARK)
&& ((CPInstruction)inst).getCPInstructionType() == CPINSTRUCTION_TYPE.Variable
&& ((VariableCPInstruction)inst).isRemoveVariable(varName) ) {
deleteInst.remove(i);
}
}
}
/**
* Generate rmvar instructions for the inputs, if their consumer count becomes zero.
*
* @param node low-level operator
* @param inst list of instructions
* @param delteInst list of instructions
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
private void processConsumersForInputs(Lop node, ArrayList<Instruction> inst, ArrayList<Instruction> delteInst) throws DMLRuntimeException {
// reduce the consumer count for all input lops
// if the count becomes zero, then then variable associated w/ input can be removed
for(Lop in : node.getInputs() ) {
if(DMLScript.ENABLE_DEBUG_MODE) {
processConsumers(in, inst, delteInst, node);
}
else {
processConsumers(in, inst, delteInst, null);
}
}
}
private static void processConsumers(Lop node, ArrayList<Instruction> inst, ArrayList<Instruction> deleteInst, Lop locationInfo) throws DMLRuntimeException {
// reduce the consumer count for all input lops
// if the count becomes zero, then then variable associated w/ input can be removed
if ( node.removeConsumer() == 0 ) {
if ( node.getExecLocation() == ExecLocation.Data && ((Data)node).isLiteral() ) {
return;
}
String label = node.getOutputParameters().getLabel();
Instruction currInstr = VariableCPInstruction.prepareRemoveInstruction(label);
if (locationInfo != null)
currInstr.setLocation(locationInfo);
else
currInstr.setLocation(node);
inst.add(currInstr);
excludeRemoveInstruction(label, deleteInst);
}
}
/**
* Method to generate instructions that are executed in Control Program. At
* this point, this DAG has no dependencies on the MR dag. ie. none of the
* inputs are outputs of MR jobs
*
* @param execNodes list of low-level operators
* @param inst list of instructions
* @param writeInst list of write instructions
* @param deleteInst list of delete instructions
* @throws LopsException if LopsException occurs
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
private void generateControlProgramJobs(ArrayList<Lop> execNodes,
ArrayList<Instruction> inst, ArrayList<Instruction> writeInst, ArrayList<Instruction> deleteInst) throws LopsException, DMLRuntimeException {
// nodes to be deleted from execnodes
ArrayList<Lop> markedNodes = new ArrayList<Lop>();
// variable names to be deleted
ArrayList<String> var_deletions = new ArrayList<String>();
HashMap<String, Lop> var_deletionsLineNum = new HashMap<String, Lop>();
boolean doRmVar = false;
for (int i = 0; i < execNodes.size(); i++) {
Lop node = execNodes.get(i);
doRmVar = false;
// mark input scalar read nodes for deletion
// TODO: statiko -- check if this condition ever evaluated to TRUE
if (node.getExecLocation() == ExecLocation.Data
&& ((Data) node).getOperationType() == Data.OperationTypes.READ
&& ((Data) node).getDataType() == DataType.SCALAR
&& node.getOutputParameters().getFile_name() == null ) {
markedNodes.add(node);
continue;
}
// output scalar instructions and mark nodes for deletion
if (node.getExecLocation() == ExecLocation.ControlProgram) {
if (node.getDataType() == DataType.SCALAR) {
// Output from lops with SCALAR data type must
// go into Temporary Variables (Var0, Var1, etc.)
NodeOutput out = setupNodeOutputs(node, ExecType.CP, false, false);
inst.addAll(out.getPreInstructions()); // dummy
deleteInst.addAll(out.getLastInstructions());
} else {
// Output from lops with non-SCALAR data type must
// go into Temporary Files (temp0, temp1, etc.)
NodeOutput out = setupNodeOutputs(node, ExecType.CP, false, false);
inst.addAll(out.getPreInstructions());
boolean hasTransientWriteParent = false;
for ( Lop parent : node.getOutputs() ) {
if ( parent.getExecLocation() == ExecLocation.Data
&& ((Data)parent).getOperationType() == Data.OperationTypes.WRITE
&& ((Data)parent).isTransient() ) {
hasTransientWriteParent = true;
break;
}
}
if ( !hasTransientWriteParent ) {
deleteInst.addAll(out.getLastInstructions());
}
else {
var_deletions.add(node.getOutputParameters().getLabel());
var_deletionsLineNum.put(node.getOutputParameters().getLabel(), node);
}
}
String inst_string = "";
// Lops with arbitrary number of inputs (ParameterizedBuiltin, GroupedAggregate, DataGen)
// are handled separately, by simply passing ONLY the output variable to getInstructions()
if (node.getType() == Lop.Type.ParameterizedBuiltin
|| node.getType() == Lop.Type.GroupedAgg
|| node.getType() == Lop.Type.DataGen ){
inst_string = node.getInstructions(node.getOutputParameters().getLabel());
}
// Lops with arbitrary number of inputs and outputs are handled
// separately as well by passing arrays of inputs and outputs
else if ( node.getType() == Lop.Type.FunctionCallCP )
{
String[] inputs = new String[node.getInputs().size()];
String[] outputs = new String[node.getOutputs().size()];
int count = 0;
for( Lop in : node.getInputs() )
inputs[count++] = in.getOutputParameters().getLabel();
count = 0;
for( Lop out : node.getOutputs() )
{
outputs[count++] = out.getOutputParameters().getLabel();
}
inst_string = node.getInstructions(inputs, outputs);
}
else if (node.getType() == Lop.Type.MULTIPLE_CP) { // ie, MultipleCP class
inst_string = node.getInstructions(node.getOutputParameters().getLabel());
}
else {
if ( node.getInputs().isEmpty() ) {
// currently, such a case exists only for Rand lop
inst_string = node.getInstructions(node.getOutputParameters().getLabel());
}
else if (node.getInputs().size() == 1) {
inst_string = node.getInstructions(node.getInputs()
.get(0).getOutputParameters().getLabel(),
node.getOutputParameters().getLabel());
}
else if (node.getInputs().size() == 2) {
inst_string = node.getInstructions(
node.getInputs().get(0).getOutputParameters().getLabel(),
node.getInputs().get(1).getOutputParameters().getLabel(),
node.getOutputParameters().getLabel());
}
else if (node.getInputs().size() == 3 || node.getType() == Type.Ternary) {
inst_string = node.getInstructions(
node.getInputs().get(0).getOutputParameters().getLabel(),
node.getInputs().get(1).getOutputParameters().getLabel(),
node.getInputs().get(2).getOutputParameters().getLabel(),
node.getOutputParameters().getLabel());
}
else if (node.getInputs().size() == 4) {
inst_string = node.getInstructions(
node.getInputs().get(0).getOutputParameters().getLabel(),
node.getInputs().get(1).getOutputParameters().getLabel(),
node.getInputs().get(2).getOutputParameters().getLabel(),
node.getInputs().get(3).getOutputParameters().getLabel(),
node.getOutputParameters().getLabel());
}
else if (node.getInputs().size() == 5) {
inst_string = node.getInstructions(
node.getInputs().get(0).getOutputParameters().getLabel(),
node.getInputs().get(1).getOutputParameters().getLabel(),
node.getInputs().get(2).getOutputParameters().getLabel(),
node.getInputs().get(3).getOutputParameters().getLabel(),
node.getInputs().get(4).getOutputParameters().getLabel(),
node.getOutputParameters().getLabel());
}
else if (node.getInputs().size() == 6) {
inst_string = node.getInstructions(
node.getInputs().get(0).getOutputParameters().getLabel(),
node.getInputs().get(1).getOutputParameters().getLabel(),
node.getInputs().get(2).getOutputParameters().getLabel(),
node.getInputs().get(3).getOutputParameters().getLabel(),
node.getInputs().get(4).getOutputParameters().getLabel(),
node.getInputs().get(5).getOutputParameters().getLabel(),
node.getOutputParameters().getLabel());
}
else if (node.getInputs().size() == 7) {
inst_string = node.getInstructions(
node.getInputs().get(0).getOutputParameters().getLabel(),
node.getInputs().get(1).getOutputParameters().getLabel(),
node.getInputs().get(2).getOutputParameters().getLabel(),
node.getInputs().get(3).getOutputParameters().getLabel(),
node.getInputs().get(4).getOutputParameters().getLabel(),
node.getInputs().get(5).getOutputParameters().getLabel(),
node.getInputs().get(6).getOutputParameters().getLabel(),
node.getOutputParameters().getLabel());
}
else {
String[] inputs = new String[node.getInputs().size()];
for( int j=0; j<node.getInputs().size(); j++ )
inputs[j] = node.getInputs().get(j).getOutputParameters().getLabel();
inst_string = node.getInstructions(inputs,
node.getOutputParameters().getLabel());
}
}
try {
if( LOG.isTraceEnabled() )
LOG.trace("Generating instruction - "+ inst_string);
Instruction currInstr = InstructionParser.parseSingleInstruction(inst_string);
if(currInstr == null) {
throw new LopsException("Error parsing the instruction:" + inst_string);
}
if (node._beginLine != 0)
currInstr.setLocation(node);
else if ( !node.getOutputs().isEmpty() )
currInstr.setLocation(node.getOutputs().get(0));
else if ( !node.getInputs().isEmpty() )
currInstr.setLocation(node.getInputs().get(0));
inst.add(currInstr);
} catch (Exception e) {
throw new LopsException(node.printErrorLocation() + "Problem generating simple inst - "
+ inst_string, e);
}
markedNodes.add(node);
doRmVar = true;
//continue;
}
else if (node.getExecLocation() == ExecLocation.Data ) {
Data dnode = (Data)node;
Data.OperationTypes op = dnode.getOperationType();
if ( op == Data.OperationTypes.WRITE ) {
NodeOutput out = null;
if ( sendWriteLopToMR(node) ) {
// In this case, Data WRITE lop goes into MR, and
// we don't have to do anything here
doRmVar = false;
}
else {
out = setupNodeOutputs(node, ExecType.CP, false, false);
if ( dnode.getDataType() == DataType.SCALAR ) {
// processing is same for both transient and persistent scalar writes
writeInst.addAll(out.getLastInstructions());
//inst.addAll(out.getLastInstructions());
doRmVar = false;
}
else {
// setupNodeOutputs() handles both transient and persistent matrix writes
if ( dnode.isTransient() ) {
//inst.addAll(out.getPreInstructions()); // dummy ?
deleteInst.addAll(out.getLastInstructions());
doRmVar = false;
}
else {
// In case of persistent write lop, write instruction will be generated
// and that instruction must be added to <code>inst</code> so that it gets
// executed immediately. If it is added to <code>deleteInst</code> then it
// gets executed at the end of program block's execution
inst.addAll(out.getLastInstructions());
doRmVar = true;
}
}
markedNodes.add(node);
//continue;
}
}
else {
// generate a temp label to hold the value that is read from HDFS
if ( node.getDataType() == DataType.SCALAR ) {
node.getOutputParameters().setLabel(Lop.SCALAR_VAR_NAME_PREFIX + var_index.getNextID());
String io_inst = node.getInstructions(node.getOutputParameters().getLabel(),
node.getOutputParameters().getFile_name());
CPInstruction currInstr = CPInstructionParser.parseSingleInstruction(io_inst);
currInstr.setLocation(node);
inst.add(currInstr);
Instruction tempInstr = VariableCPInstruction.prepareRemoveInstruction(node.getOutputParameters().getLabel());
tempInstr.setLocation(node);
deleteInst.add(tempInstr);
}
else {
throw new LopsException("Matrix READs are not handled in CP yet!");
}
markedNodes.add(node);
doRmVar = true;
//continue;
}
}
// see if rmvar instructions can be generated for node's inputs
if(doRmVar)
processConsumersForInputs(node, inst, deleteInst);
doRmVar = false;
}
for ( String var : var_deletions ) {
Instruction rmInst = VariableCPInstruction.prepareRemoveInstruction(var);
if( LOG.isTraceEnabled() )
LOG.trace(" Adding var_deletions: " + rmInst.toString());
rmInst.setLocation(var_deletionsLineNum.get(var));
deleteInst.add(rmInst);
}
// delete all marked nodes
for ( Lop node : markedNodes ) {
execNodes.remove(node);
}
}
/**
* Method to remove all child nodes of a queued node that should be executed
* in a following iteration.
*
* @param node low-level operator
* @param finishedNodes list of finished nodes
* @param execNodes list of exec nodes
* @param queuedNodes list of queued nodes
* @param jobvec list of lists of low-level operators
* @throws LopsException if LopsException occurs
*/
private void removeNodesForNextIteration(Lop node, ArrayList<Lop> finishedNodes,
ArrayList<Lop> execNodes, ArrayList<Lop> queuedNodes,
ArrayList<ArrayList<Lop>> jobvec) throws LopsException {
// only queued nodes with multiple inputs need to be handled.
if (node.getInputs().size() == 1)
return;
//if all children are queued, then there is nothing to do.
boolean allQueued = true;
for( Lop input : node.getInputs() ) {
if( !queuedNodes.contains(input) ) {
allQueued = false;
break;
}
}
if ( allQueued )
return;
if( LOG.isTraceEnabled() )
LOG.trace(" Before remove nodes for next iteration -- size of execNodes " + execNodes.size());
// Determine if <code>node</code> has inputs from the same job or multiple jobs
int jobid = Integer.MIN_VALUE;
boolean inputs_in_same_job = true;
for( Lop input : node.getInputs() ) {
int input_jobid = jobType(input, jobvec);
if ( jobid == Integer.MIN_VALUE )
jobid = input_jobid;
else if ( jobid != input_jobid ) {
inputs_in_same_job = false;
break;
}
}
// Determine if there exist any unassigned inputs to <code>node</code>
// Evaluate only those lops that execute in MR.
boolean unassigned_inputs = false;
for( Lop input : node.getInputs() ) {
//if ( input.getExecLocation() != ExecLocation.ControlProgram && jobType(input, jobvec) == -1 ) {
if ( input.getExecType() == ExecType.MR && !execNodes.contains(input)) { //jobType(input, jobvec) == -1 ) {
unassigned_inputs = true;
break;
}
}
// Determine if any node's children are queued
boolean child_queued = false;
for( Lop input : node.getInputs() ) {
if (queuedNodes.contains(input) ) {
child_queued = true;
break;
}
}
if (LOG.isTraceEnabled()) {
LOG.trace(" Property Flags:");
LOG.trace(" Inputs in same job: " + inputs_in_same_job);
LOG.trace(" Unassigned inputs: " + unassigned_inputs);
LOG.trace(" Child queued: " + child_queued);
}
// Evaluate each lop in <code>execNodes</code> for removal.
// Add lops to be removed to <code>markedNodes</code>.
ArrayList<Lop> markedNodes = new ArrayList<Lop>();
for (Lop tmpNode : execNodes ) {
if (LOG.isTraceEnabled()) {
LOG.trace(" Checking for removal (" + tmpNode.getID() + ") " + tmpNode.toString());
}
// if tmpNode is not a descendant of 'node', then there is no advantage in removing tmpNode for later iterations.
if(!isChild(tmpNode, node, IDMap))
continue;
// handle group input lops
if(node.getInputs().contains(tmpNode) && tmpNode.isAligner()) {
markedNodes.add(tmpNode);
if( LOG.isTraceEnabled() )
LOG.trace(" Removing for next iteration (code 1): (" + tmpNode.getID() + ") " + tmpNode.toString());
}
//if (child_queued) {
// if one of the children are queued,
// remove some child nodes on other leg that may be needed later on.
// For e.g. Group lop.
if (!hasOtherQueuedParentNode(tmpNode, queuedNodes, node)
&& branchHasNoOtherUnExecutedParents(tmpNode, node, execNodes, finishedNodes)) {
boolean queueit = false;
int code = -1;
switch(node.getExecLocation()) {
case Map:
if(branchCanBePiggyBackedMap(tmpNode, node, execNodes, queuedNodes, markedNodes))
queueit = true;
code=2;
break;
case MapAndReduce:
if(branchCanBePiggyBackedMapAndReduce(tmpNode, node, execNodes, queuedNodes)&& !tmpNode.definesMRJob())
queueit = true;
code=3;
break;
case Reduce:
if(branchCanBePiggyBackedReduce(tmpNode, node, execNodes, queuedNodes))
queueit = true;
code=4;
break;
default:
//do nothing
}
if(queueit) {
if( LOG.isTraceEnabled() )
LOG.trace(" Removing for next iteration (code " + code + "): (" + tmpNode.getID() + ") " + tmpNode.toString());
markedNodes.add(tmpNode);
}
}
/*
* "node" has no other queued children.
*
* If inputs are in the same job and "node" is of type
* MapAndReduce, then remove nodes of all types other than
* Reduce, MapAndReduce, and the ones that define a MR job as
* they can be piggybacked later.
*
* e.g: A=Rand, B=Rand, C=A%*%B Here, both inputs of MMCJ lop
* come from Rand job, and they should not be removed.
*
* Other examples: -- MMCJ whose children are of type
* MapAndReduce (say GMR) -- Inputs coming from two different
* jobs .. GMR & REBLOCK
*/
//boolean himr = hasOtherMapAndReduceParentNode(tmpNode, execNodes,node);
//boolean bcbp = branchCanBePiggyBackedMapAndReduce(tmpNode, node, execNodes, finishedNodes);
//System.out.println(" .. " + inputs_in_same_job + "," + himr + "," + bcbp);
if ((inputs_in_same_job || unassigned_inputs)
&& node.getExecLocation() == ExecLocation.MapAndReduce
&& !hasOtherMapAndReduceParentNode(tmpNode, execNodes,node) // don't remove since it already piggybacked with a MapReduce node
&& branchCanBePiggyBackedMapAndReduce(tmpNode, node, execNodes, queuedNodes)
&& !tmpNode.definesMRJob()) {
if( LOG.isTraceEnabled() )
LOG.trace(" Removing for next iteration (code 5): ("+ tmpNode.getID() + ") " + tmpNode.toString());
markedNodes.add(tmpNode);
}
} // for i
// we also need to delete all parent nodes of marked nodes
for ( Lop enode : execNodes ) {
if( LOG.isTraceEnabled() ) {
LOG.trace(" Checking for removal - ("
+ enode.getID() + ") " + enode.toString());
}
if (hasChildNode(enode, markedNodes) && !markedNodes.contains(enode)) {
markedNodes.add(enode);
if( LOG.isTraceEnabled() )
LOG.trace(" Removing for next iteration (code 6) (" + enode.getID() + ") " + enode.toString());
}
}
if ( execNodes.size() != markedNodes.size() ) {
// delete marked nodes from finishedNodes and execNodes
// add to queued nodes
for(Lop n : markedNodes) {
if ( n.usesDistributedCache() )
gmrMapperFootprint -= computeFootprintInMapper(n);
finishedNodes.remove(n);
execNodes.remove(n);
removeNodeByJobType(n, jobvec);
queuedNodes.add(n);
}
}
}
private boolean branchCanBePiggyBackedReduce(Lop tmpNode, Lop node, ArrayList<Lop> execNodes, ArrayList<Lop> queuedNodes) {
if(node.getExecLocation() != ExecLocation.Reduce)
return false;
// if tmpNode is descendant of any queued child of node, then branch can not be piggybacked
for(Lop ni : node.getInputs()) {
if(queuedNodes.contains(ni) && isChild(tmpNode, ni, IDMap))
return false;
}
for( Lop n : execNodes ) {
if(n.equals(node))
continue;
if(n.equals(tmpNode) && n.getExecLocation() != ExecLocation.Map && n.getExecLocation() != ExecLocation.MapOrReduce)
return false;
// check if n is on the branch tmpNode->*->node
if(isChild(n, node, IDMap) && isChild(tmpNode, n, IDMap)) {
if(!node.getInputs().contains(tmpNode) // redundant
&& n.getExecLocation() != ExecLocation.Map && n.getExecLocation() != ExecLocation.MapOrReduce)
return false;
}
}
return true;
}
private boolean branchCanBePiggyBackedMap(Lop tmpNode, Lop node, ArrayList<Lop> execNodes, ArrayList<Lop> queuedNodes, ArrayList<Lop> markedNodes) {
if(node.getExecLocation() != ExecLocation.Map)
return false;
// if tmpNode is descendant of any queued child of node, then branch can not be piggybacked
for(Lop ni : node.getInputs()) {
if(queuedNodes != null && queuedNodes.contains(ni) && isChild(tmpNode, ni, IDMap))
return false;
}
// since node.location=Map: only Map & MapOrReduce lops must be considered
if( tmpNode.definesMRJob() || (tmpNode.getExecLocation() != ExecLocation.Map && tmpNode.getExecLocation() != ExecLocation.MapOrReduce))
return false;
// if there exist a node "dcInput" that is
// -- a) parent of tmpNode, and b) feeds into "node" via distributed cache
// then, tmpNode should not be removed.
// "dcInput" must be executed prior to "node", and removal of tmpNode does not make that happen.
if(node.usesDistributedCache() ) {
for(int dcInputIndex : node.distributedCacheInputIndex()) {
Lop dcInput = node.getInputs().get(dcInputIndex-1);
if(isChild(tmpNode, dcInput, IDMap))
return false;
}
}
// if tmpNode requires an input from distributed cache,
// remove tmpNode only if that input can fit into mappers' memory. If not,
if ( tmpNode.usesDistributedCache() ) {
double memsize = computeFootprintInMapper(tmpNode);
if (node.usesDistributedCache() )
memsize += computeFootprintInMapper(node);
if ( markedNodes != null ) {
for(Lop n : markedNodes) {
if ( n.usesDistributedCache() )
memsize += computeFootprintInMapper(n);
}
}
if ( !checkMemoryLimits(node, memsize ) ) {
return false;
}
}
return ( (tmpNode.getCompatibleJobs() & node.getCompatibleJobs()) > 0);
}
/**
* Function that checks if <code>tmpNode</code> can be piggybacked with MapAndReduce
* lop <code>node</code>.
*
* Decision depends on the exec location of <code>tmpNode</code>. If the exec location is:
* MapAndReduce: CAN NOT be piggybacked since it defines its own MR job
* Reduce: CAN NOT be piggybacked since it must execute before <code>node</code>
* Map or MapOrReduce: CAN be piggybacked ONLY IF it is comatible w/ <code>tmpNode</code>
*
* @param tmpNode temporary low-level operator
* @param node low-level operator
* @param execNodes list of exec nodes
* @param queuedNodes list of queued nodes
* @return true if tmpNode can be piggbacked on node
*/
private boolean branchCanBePiggyBackedMapAndReduce(Lop tmpNode, Lop node,
ArrayList<Lop> execNodes, ArrayList<Lop> queuedNodes) {
if (node.getExecLocation() != ExecLocation.MapAndReduce)
return false;
JobType jt = JobType.findJobTypeFromLop(node);
for ( Lop n : execNodes ) {
if (n.equals(node))
continue;
// Evaluate only nodes on the branch between tmpNode->..->node
if (n.equals(tmpNode) || (isChild(n, node, IDMap) && isChild(tmpNode, n, IDMap))) {
if ( hasOtherMapAndReduceParentNode(tmpNode, queuedNodes,node) )
return false;
ExecLocation el = n.getExecLocation();
if (el != ExecLocation.Map && el != ExecLocation.MapOrReduce)
return false;
else if (!isCompatible(n, jt))
return false;
}
}
return true;
}
private boolean branchHasNoOtherUnExecutedParents(Lop tmpNode, Lop node,
ArrayList<Lop> execNodes, ArrayList<Lop> finishedNodes) {
//if tmpNode has more than one unfinished output, return false
if(tmpNode.getOutputs().size() > 1)
{
int cnt = 0;
for (Lop output : tmpNode.getOutputs() )
if (!finishedNodes.contains(output))
cnt++;
if(cnt != 1)
return false;
}
//check to see if any node between node and tmpNode has more than one unfinished output
for( Lop n : execNodes ) {
if(n.equals(node) || n.equals(tmpNode))
continue;
if(isChild(n, node, IDMap) && isChild(tmpNode, n, IDMap))
{
int cnt = 0;
for (Lop output : n.getOutputs() ) {
if (!finishedNodes.contains(output))
cnt++;
}
if(cnt != 1)
return false;
}
}
return true;
}
/**
* Method to return the job index for a lop.
*
* @param lops low-level operator
* @param jobvec list of lists of low-level operators
* @return job index for a low-level operator
* @throws LopsException if LopsException occurs
*/
private static int jobType(Lop lops, ArrayList<ArrayList<Lop>> jobvec) throws LopsException {
for ( JobType jt : JobType.values()) {
int i = jt.getId();
if (i > 0 && jobvec.get(i) != null && jobvec.get(i).contains(lops)) {
return i;
}
}
return -1;
}
/**
* Method to see if there is a node of type MapAndReduce between tmpNode and node
* in given node collection
*
* @param tmpNode temporary low-level operator
* @param nodeList list of low-level operators
* @param node low-level operator
* @return true if MapAndReduce node between tmpNode and node in nodeList
*/
private boolean hasOtherMapAndReduceParentNode(Lop tmpNode,
ArrayList<Lop> nodeList, Lop node) {
if ( tmpNode.getExecLocation() == ExecLocation.MapAndReduce)
return true;
for ( Lop n : tmpNode.getOutputs() ) {
if ( nodeList.contains(n) && isChild(n,node,IDMap)) {
if(!n.equals(node) && n.getExecLocation() == ExecLocation.MapAndReduce)
return true;
else
return hasOtherMapAndReduceParentNode(n, nodeList, node);
}
}
return false;
}
/**
* Method to check if there is a queued node that is a parent of both tmpNode and node
*
* @param tmpNode temporary low-level operator
* @param queuedNodes list of queued nodes
* @param node low-level operator
* @return true if there is a queued node that is a parent of tmpNode and node
*/
private boolean hasOtherQueuedParentNode(Lop tmpNode, ArrayList<Lop> queuedNodes, Lop node) {
if ( queuedNodes.isEmpty() )
return false;
boolean[] nodeMarked = node.get_reachable();
boolean[] tmpMarked = tmpNode.get_reachable();
long nodeid = IDMap.get(node.getID());
long tmpid = IDMap.get(tmpNode.getID());
for ( Lop qnode : queuedNodes ) {
int id = IDMap.get(qnode.getID());
if ((id != nodeid && nodeMarked[id]) && (id != tmpid && tmpMarked[id]) )
return true;
}
return false;
}
/**
* Method to print the lops grouped by job type
*
* @param jobNodes list of lists of low-level operators
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
private static void printJobNodes(ArrayList<ArrayList<Lop>> jobNodes)
throws DMLRuntimeException
{
if (LOG.isTraceEnabled()){
for ( JobType jt : JobType.values() ) {
int i = jt.getId();
if (i > 0 && jobNodes.get(i) != null && !jobNodes.get(i).isEmpty() ) {
LOG.trace(jt.getName() + " Job Nodes:");
for (int j = 0; j < jobNodes.get(i).size(); j++) {
LOG.trace(" "
+ jobNodes.get(i).get(j).getID() + ") "
+ jobNodes.get(i).get(j).toString());
}
}
}
}
}
/**
* Method to check if there exists any lops with ExecLocation=RecordReader
*
* @param nodes list of low-level operators
* @param loc exec location
* @return true if there is a node with RecordReader exec location
*/
private static boolean hasANode(ArrayList<Lop> nodes, ExecLocation loc) {
for ( Lop n : nodes ) {
if (n.getExecLocation() == ExecLocation.RecordReader)
return true;
}
return false;
}
private ArrayList<ArrayList<Lop>> splitGMRNodesByRecordReader(ArrayList<Lop> gmrnodes)
{
// obtain the list of record reader nodes
ArrayList<Lop> rrnodes = new ArrayList<Lop>();
for (Lop gmrnode : gmrnodes ) {
if (gmrnode.getExecLocation() == ExecLocation.RecordReader)
rrnodes.add(gmrnode);
}
// We allocate one extra vector to hold lops that do not depend on any
// recordreader lops
ArrayList<ArrayList<Lop>> splitGMR = createNodeVectors(rrnodes.size() + 1);
// flags to indicate whether a lop has been added to one of the node vectors
boolean[] flags = new boolean[gmrnodes.size()];
Arrays.fill(flags, false);
// first, obtain all ancestors of recordreader lops
for (int rrid = 0; rrid < rrnodes.size(); rrid++) {
// prepare node list for i^th record reader lop
// add record reader lop
splitGMR.get(rrid).add(rrnodes.get(rrid));
for (int j = 0; j < gmrnodes.size(); j++) {
if (rrnodes.get(rrid).equals(gmrnodes.get(j)))
flags[j] = true;
else if (isChild(rrnodes.get(rrid), gmrnodes.get(j), IDMap)) {
splitGMR.get(rrid).add(gmrnodes.get(j));
flags[j] = true;
}
}
}
// add all remaining lops to a separate job
int jobindex = rrnodes.size(); // the last node vector
for (int i = 0; i < gmrnodes.size(); i++) {
if (!flags[i]) {
splitGMR.get(jobindex).add(gmrnodes.get(i));
flags[i] = true;
}
}
return splitGMR;
}
/**
* Method to generate hadoop jobs. Exec nodes can contains a mixture of node
* types requiring different mr jobs. This method breaks the job into
* sub-types and then invokes the appropriate method to generate
* instructions.
*
* @param execNodes list of exec nodes
* @param inst list of instructions
* @param writeinst list of write instructions
* @param deleteinst list of delete instructions
* @param jobNodes list of list of low-level operators
* @throws LopsException if LopsException occurs
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
private void generateMRJobs(ArrayList<Lop> execNodes,
ArrayList<Instruction> inst,
ArrayList<Instruction> writeinst,
ArrayList<Instruction> deleteinst, ArrayList<ArrayList<Lop>> jobNodes)
throws LopsException, DMLRuntimeException
{
printJobNodes(jobNodes);
ArrayList<Instruction> rmvarinst = new ArrayList<Instruction>();
for (JobType jt : JobType.values()) {
// do nothing, if jt = INVALID or ANY
if ( jt == JobType.INVALID || jt == JobType.ANY )
continue;
int index = jt.getId(); // job id is used as an index into jobNodes
ArrayList<Lop> currNodes = jobNodes.get(index);
// generate MR job
if (currNodes != null && !currNodes.isEmpty() ) {
if( LOG.isTraceEnabled() )
LOG.trace("Generating " + jt.getName() + " job");
if (jt.allowsRecordReaderInstructions() && hasANode(jobNodes.get(index), ExecLocation.RecordReader)) {
// split the nodes by recordReader lops
ArrayList<ArrayList<Lop>> rrlist = splitGMRNodesByRecordReader(jobNodes.get(index));
for (int i = 0; i < rrlist.size(); i++) {
generateMapReduceInstructions(rrlist.get(i), inst, writeinst, deleteinst, rmvarinst, jt);
}
}
else if ( jt.allowsSingleShuffleInstruction() ) {
// These jobs allow a single shuffle instruction.
// We should split the nodes so that a separate job is produced for each shuffle instruction.
Lop.Type splittingLopType = jt.getShuffleLopType();
ArrayList<Lop> nodesForASingleJob = new ArrayList<Lop>();
for (int i = 0; i < jobNodes.get(index).size(); i++) {
if (jobNodes.get(index).get(i).getType() == splittingLopType) {
nodesForASingleJob.clear();
// Add the lop that defines the split
nodesForASingleJob.add(jobNodes.get(index).get(i));
/*
* Add the splitting lop's children. This call is redundant when jt=SORT
* because a sort job ALWAYS has a SINGLE lop in the entire job
* i.e., there are no children to add when jt=SORT.
*/
addChildren(jobNodes.get(index).get(i), nodesForASingleJob, jobNodes.get(index));
if ( jt.isCompatibleWithParentNodes() ) {
/*
* If the splitting lop is compatible with parent nodes
* then they must be added to the job. For example, MMRJ lop
* may have a Data(Write) lop as its parent, which can be
* executed along with MMRJ.
*/
addParents(jobNodes.get(index).get(i), nodesForASingleJob, jobNodes.get(index));
}
generateMapReduceInstructions(nodesForASingleJob, inst, writeinst, deleteinst, rmvarinst, jt);
}
}
}
else {
// the default case
generateMapReduceInstructions(jobNodes.get(index), inst, writeinst, deleteinst, rmvarinst, jt);
}
}
}
inst.addAll(rmvarinst);
}
/**
* Method to add all parents of "node" in exec_n to node_v.
*
* @param node low-level operator
* @param node_v list of nodes
* @param exec_n list of nodes
*/
private void addParents(Lop node, ArrayList<Lop> node_v, ArrayList<Lop> exec_n) {
for (Lop enode : exec_n ) {
if (isChild(node, enode, IDMap)) {
if (!node_v.contains(enode)) {
if( LOG.isTraceEnabled() )
LOG.trace("Adding parent - " + enode.toString());
node_v.add(enode);
}
}
}
}
/**
* Method to add all relevant data nodes for set of exec nodes.
*
* @param node low-level operator
* @param node_v list of nodes
* @param exec_n list of nodes
*/
private static void addChildren(Lop node, ArrayList<Lop> node_v, ArrayList<Lop> exec_n) {
// add child in exec nodes that is not of type scalar
if (exec_n.contains(node)
&& node.getExecLocation() != ExecLocation.ControlProgram) {
if (!node_v.contains(node)) {
node_v.add(node);
if(LOG.isTraceEnabled())
LOG.trace(" Added child " + node.toString());
}
}
if (!exec_n.contains(node))
return;
// recurse
for (Lop n : node.getInputs() ) {
addChildren(n, node_v, exec_n);
}
}
/**
* Method that determines the output format for a given node.
*
* @param node low-level operator
* @param cellModeOverride override mode
* @return output info
* @throws LopsException if LopsException occurs
*/
private static OutputInfo getOutputInfo(Lop node, boolean cellModeOverride)
throws LopsException
{
if ( (node.getDataType() == DataType.SCALAR && node.getExecType() == ExecType.CP)
|| node instanceof FunctionCallCP )
return null;
OutputInfo oinfo = null;
OutputParameters oparams = node.getOutputParameters();
if (oparams.isBlocked()) {
if ( !cellModeOverride )
oinfo = OutputInfo.BinaryBlockOutputInfo;
else {
// output format is overridden, for example, due to recordReaderInstructions in the job
oinfo = OutputInfo.BinaryCellOutputInfo;
// record decision of overriding in lop's outputParameters so that
// subsequent jobs that use this lop's output know the correct format.
// TODO: ideally, this should be done by having a member variable in Lop
// which stores the outputInfo.
try {
oparams.setDimensions(oparams.getNumRows(), oparams.getNumCols(), -1, -1, oparams.getNnz(), oparams.getUpdateType());
} catch(HopsException e) {
throw new LopsException(node.printErrorLocation() + "error in getOutputInfo in Dag ", e);
}
}
} else {
if (oparams.getFormat() == Format.TEXT || oparams.getFormat() == Format.MM)
oinfo = OutputInfo.TextCellOutputInfo;
else if ( oparams.getFormat() == Format.CSV ) {
oinfo = OutputInfo.CSVOutputInfo;
}
else {
oinfo = OutputInfo.BinaryCellOutputInfo;
}
}
/* Instead of following hardcoding, one must get this information from Lops */
if (node.getType() == Type.SortKeys && node.getExecType() == ExecType.MR) {
if( ((SortKeys)node).getOpType() == SortKeys.OperationTypes.Indexes)
oinfo = OutputInfo.BinaryBlockOutputInfo;
else
oinfo = OutputInfo.OutputInfoForSortOutput;
} else if (node.getType() == Type.CombineBinary) {
// Output format of CombineBinary (CB) depends on how the output is consumed
CombineBinary combine = (CombineBinary) node;
if ( combine.getOperation() == org.apache.sysml.lops.CombineBinary.OperationTypes.PreSort ) {
oinfo = OutputInfo.OutputInfoForSortInput;
}
else if ( combine.getOperation() == org.apache.sysml.lops.CombineBinary.OperationTypes.PreCentralMoment
|| combine.getOperation() == org.apache.sysml.lops.CombineBinary.OperationTypes.PreCovUnweighted
|| combine.getOperation() == org.apache.sysml.lops.CombineBinary.OperationTypes.PreGroupedAggUnweighted ) {
oinfo = OutputInfo.WeightedPairOutputInfo;
}
} else if ( node.getType() == Type.CombineTernary) {
oinfo = OutputInfo.WeightedPairOutputInfo;
} else if (node.getType() == Type.CentralMoment
|| node.getType() == Type.CoVariance )
{
// CMMR always operate in "cell mode",
// and the output is always in cell format
oinfo = OutputInfo.BinaryCellOutputInfo;
}
return oinfo;
}
private String prepareAssignVarInstruction(Lop input, Lop node) {
StringBuilder sb = new StringBuilder();
sb.append(ExecType.CP);
sb.append(Lop.OPERAND_DELIMITOR);
sb.append("assignvar");
sb.append(Lop.OPERAND_DELIMITOR);
sb.append( input.prepScalarInputOperand(ExecType.CP) );
sb.append(Lop.OPERAND_DELIMITOR);
sb.append(node.prepOutputOperand());
return sb.toString();
}
/**
* Method to setup output filenames and outputInfos, and to generate related instructions
*
* @param node low-level operator
* @param et exec type
* @param cellModeOverride override mode
* @param copyTWrite ?
* @return node output
* @throws DMLRuntimeException if DMLRuntimeException occurs
* @throws LopsException if LopsException occurs
*/
private NodeOutput setupNodeOutputs(Lop node, ExecType et, boolean cellModeOverride, boolean copyTWrite)
throws DMLRuntimeException, LopsException {
OutputParameters oparams = node.getOutputParameters();
NodeOutput out = new NodeOutput();
node.setConsumerCount(node.getOutputs().size());
// Compute the output format for this node
out.setOutInfo(getOutputInfo(node, cellModeOverride));
// If node is NOT of type Data then we must generate
// a variable to hold the value produced by this node
// note: functioncallcp requires no createvar, rmvar since
// since outputs are explicitly specified
if (node.getExecLocation() != ExecLocation.Data )
{
if (node.getDataType() == DataType.SCALAR) {
oparams.setLabel(Lop.SCALAR_VAR_NAME_PREFIX + var_index.getNextID());
out.setVarName(oparams.getLabel());
Instruction currInstr = VariableCPInstruction.prepareRemoveInstruction(oparams.getLabel());
currInstr.setLocation(node);
out.addLastInstruction(currInstr);
}
else if(node instanceof ParameterizedBuiltin
&& ((ParameterizedBuiltin)node).getOp() == org.apache.sysml.lops.ParameterizedBuiltin.OperationTypes.TRANSFORM) {
ParameterizedBuiltin pbi = (ParameterizedBuiltin)node;
Lop input = pbi.getNamedInput(ParameterizedBuiltinFunctionExpression.TF_FN_PARAM_DATA);
if(input.getDataType()== DataType.FRAME) {
// Output of transform is in CSV format, which gets subsequently reblocked
// TODO: change it to output binaryblock
Data dataInput = (Data) input;
oparams.setFile_name(getNextUniqueFilename());
oparams.setLabel(getNextUniqueVarname(DataType.MATRIX));
// generate an instruction that creates a symbol table entry for the new variable in CSV format
Data delimLop = (Data) dataInput.getNamedInputLop(
DataExpression.DELIM_DELIMITER, DataExpression.DEFAULT_DELIM_DELIMITER);
Instruction createvarInst = VariableCPInstruction.prepareCreateVariableInstruction(
oparams.getLabel(), oparams.getFile_name(), true,
DataType.MATRIX, OutputInfo.outputInfoToString(OutputInfo.CSVOutputInfo),
new MatrixCharacteristics(oparams.getNumRows(), oparams.getNumCols(), -1, -1, oparams.getNnz()), oparams.getUpdateType(),
false, delimLop.getStringValue(), true
);
createvarInst.setLocation(node);
out.addPreInstruction(createvarInst);
// temp file as well as the variable has to be deleted at the end
Instruction currInstr = VariableCPInstruction.prepareRemoveInstruction(oparams.getLabel());
currInstr.setLocation(node);
out.addLastInstruction(currInstr);
// finally, add the generated filename and variable name to the list of outputs
out.setFileName(oparams.getFile_name());
out.setVarName(oparams.getLabel());
}
else {
throw new LopsException("Input to transform() has an invalid type: " + input.getDataType() + ", it must be FRAME.");
}
}
else if(!(node instanceof FunctionCallCP)) //general case
{
// generate temporary filename and a variable name to hold the
// output produced by "rootNode"
oparams.setFile_name(getNextUniqueFilename());
oparams.setLabel(getNextUniqueVarname(node.getDataType()));
// generate an instruction that creates a symbol table entry for the new variable
//String createInst = prepareVariableInstruction("createvar", node);
//out.addPreInstruction(CPInstructionParser.parseSingleInstruction(createInst));
int rpb = (int) oparams.getRowsInBlock();
int cpb = (int) oparams.getColsInBlock();
Instruction createvarInst = VariableCPInstruction.prepareCreateVariableInstruction(
oparams.getLabel(),
oparams.getFile_name(),
true, node.getDataType(),
OutputInfo.outputInfoToString(getOutputInfo(node, false)),
new MatrixCharacteristics(oparams.getNumRows(), oparams.getNumCols(), rpb, cpb, oparams.getNnz()),
oparams.getUpdateType()
);
createvarInst.setLocation(node);
out.addPreInstruction(createvarInst);
// temp file as well as the variable has to be deleted at the end
Instruction currInstr = VariableCPInstruction.prepareRemoveInstruction(oparams.getLabel());
currInstr.setLocation(node);
out.addLastInstruction(currInstr);
// finally, add the generated filename and variable name to the list of outputs
out.setFileName(oparams.getFile_name());
out.setVarName(oparams.getLabel());
}
else {
// If the function call is set with output lops (e.g., multi return builtin),
// generate a createvar instruction for each function output
FunctionCallCP fcall = (FunctionCallCP) node;
if ( fcall.getFunctionOutputs() != null ) {
for( Lop fnOut: fcall.getFunctionOutputs()) {
OutputParameters fnOutParams = fnOut.getOutputParameters();
//OutputInfo oinfo = getOutputInfo((N)fnOut, false);
Instruction createvarInst = VariableCPInstruction.prepareCreateVariableInstruction(
fnOutParams.getLabel(),
getFilePath() + fnOutParams.getLabel(),
true, fnOut.getDataType(),
OutputInfo.outputInfoToString(getOutputInfo(fnOut, false)),
new MatrixCharacteristics(fnOutParams.getNumRows(), fnOutParams.getNumCols(), (int)fnOutParams.getRowsInBlock(), (int)fnOutParams.getColsInBlock(), fnOutParams.getNnz()),
oparams.getUpdateType()
);
if (node._beginLine != 0)
createvarInst.setLocation(node);
else
createvarInst.setLocation(fnOut);
out.addPreInstruction(createvarInst);
}
}
}
}
// rootNode is of type Data
else {
if ( node.getDataType() == DataType.SCALAR ) {
// generate assignment operations for final and transient writes
if ( oparams.getFile_name() == null && !(node instanceof Data && ((Data)node).isPersistentWrite()) ) {
String io_inst = prepareAssignVarInstruction(node.getInputs().get(0), node);
CPInstruction currInstr = CPInstructionParser.parseSingleInstruction(io_inst);
if (node._beginLine != 0)
currInstr.setLocation(node);
else if ( !node.getInputs().isEmpty() )
currInstr.setLocation(node.getInputs().get(0));
out.addLastInstruction(currInstr);
}
else {
//CP PERSISTENT WRITE SCALARS
Lop fname = ((Data)node).getNamedInputLop(DataExpression.IO_FILENAME);
String io_inst = node.getInstructions(node.getInputs().get(0).getOutputParameters().getLabel(), fname.getOutputParameters().getLabel());
CPInstruction currInstr = CPInstructionParser.parseSingleInstruction(io_inst);
if (node._beginLine != 0)
currInstr.setLocation(node);
else if ( !node.getInputs().isEmpty() )
currInstr.setLocation(node.getInputs().get(0));
out.addLastInstruction(currInstr);
}
}
else {
if ( ((Data)node).isTransient() ) {
if ( et == ExecType.CP ) {
// If transient matrix write is in CP then its input MUST be executed in CP as well.
// get variable and filename associated with the input
String inputFileName = node.getInputs().get(0).getOutputParameters().getFile_name();
String inputVarName = node.getInputs().get(0).getOutputParameters().getLabel();
String constVarName = oparams.getLabel();
String constFileName = inputFileName + constVarName;
/*
* Symbol Table state must change as follows:
*
* FROM:
* mvar1 -> temp21
*
* TO:
* mVar1 -> temp21
* tVarH -> temp21
*/
Instruction currInstr = VariableCPInstruction.prepareCopyInstruction(inputVarName, constVarName);
currInstr.setLocation(node);
out.addLastInstruction(currInstr);
out.setFileName(constFileName);
}
else {
if(copyTWrite) {
Instruction currInstr = VariableCPInstruction.prepareCopyInstruction(node.getInputs().get(0).getOutputParameters().getLabel(), oparams.getLabel());
currInstr.setLocation(node);
out.addLastInstruction(currInstr);
return out;
}
/*
* Since the "rootNode" is a transient data node, we first need to generate a
* temporary filename as well as a variable name to hold the <i>immediate</i>
* output produced by "rootNode". These generated HDFS filename and the
* variable name must be changed at the end of an iteration/program block
* so that the subsequent iteration/program block can correctly access the
* generated data. Therefore, we need to distinguish between the following:
*
* 1) Temporary file name & variable name: They hold the immediate output
* produced by "rootNode". Both names are generated below.
*
* 2) Constant file name & variable name: They are constant across iterations.
* Variable name is given by rootNode's label that is created in the upper layers.
* File name is generated by concatenating "temporary file name" and "constant variable name".
*
* Temporary files must be moved to constant files at the end of the iteration/program block.
*/
// generate temporary filename & var name
String tempVarName = oparams.getLabel() + "temp";
String tempFileName = getNextUniqueFilename();
//String createInst = prepareVariableInstruction("createvar", tempVarName, node.getDataType(), node.getValueType(), tempFileName, oparams, out.getOutInfo());
//out.addPreInstruction(CPInstructionParser.parseSingleInstruction(createInst));
int rpb = (int) oparams.getRowsInBlock();
int cpb = (int) oparams.getColsInBlock();
Instruction createvarInst = VariableCPInstruction.prepareCreateVariableInstruction(
tempVarName,
tempFileName,
true, node.getDataType(),
OutputInfo.outputInfoToString(out.getOutInfo()),
new MatrixCharacteristics(oparams.getNumRows(), oparams.getNumCols(), rpb, cpb, oparams.getNnz()),
oparams.getUpdateType()
);
createvarInst.setLocation(node);
out.addPreInstruction(createvarInst);
String constVarName = oparams.getLabel();
String constFileName = tempFileName + constVarName;
oparams.setFile_name(getFilePath() + constFileName);
/*
* Since this is a node that denotes a transient read/write, we need to make sure
* that the data computed for a given variable in a given iteration is passed on
* to the next iteration. This is done by generating miscellaneous instructions
* that gets executed at the end of the program block.
*
* The state of the symbol table must change
*
* FROM:
* tVarA -> temp21tVarA (old copy of temp21)
* tVarAtemp -> temp21 (new copy that should override the old copy)
*
* TO:
* tVarA -> temp21tVarA
*/
// rename the temp variable to constant variable (e.g., cpvar tVarAtemp tVarA)
/*Instruction currInstr = VariableCPInstruction.prepareCopyInstruction(tempVarName, constVarName);
if(DMLScript.ENABLE_DEBUG_MODE) {
currInstr.setLineNum(node._beginLine);
}
out.addLastInstruction(currInstr);
Instruction tempInstr = VariableCPInstruction.prepareRemoveInstruction(tempVarName);
if(DMLScript.ENABLE_DEBUG_MODE) {
tempInstr.setLineNum(node._beginLine);
}
out.addLastInstruction(tempInstr);*/
// Generate a single mvvar instruction (e.g., mvvar tempA A)
// instead of two instructions "cpvar tempA A" and "rmvar tempA"
Instruction currInstr = VariableCPInstruction.prepareMoveInstruction(tempVarName, constVarName);
currInstr.setLocation(node);
out.addLastInstruction(currInstr);
// finally, add the temporary filename and variable name to the list of outputs
out.setFileName(tempFileName);
out.setVarName(tempVarName);
}
}
// rootNode is not a transient write. It is a persistent write.
else {
if(et == ExecType.MR) { //MR PERSISTENT WRITE
// create a variable to hold the result produced by this "rootNode"
oparams.setLabel("pVar" + var_index.getNextID() );
//String createInst = prepareVariableInstruction("createvar", node);
//out.addPreInstruction(CPInstructionParser.parseSingleInstruction(createInst));
int rpb = (int) oparams.getRowsInBlock();
int cpb = (int) oparams.getColsInBlock();
Lop fnameLop = ((Data)node).getNamedInputLop(DataExpression.IO_FILENAME);
String fnameStr = (fnameLop instanceof Data && ((Data)fnameLop).isLiteral()) ?
fnameLop.getOutputParameters().getLabel()
: Lop.VARIABLE_NAME_PLACEHOLDER + fnameLop.getOutputParameters().getLabel() + Lop.VARIABLE_NAME_PLACEHOLDER;
Instruction createvarInst;
// for MatrixMarket format, the creatvar will output the result to a temporary file in textcell format
// the CP write instruction (post instruction) after the MR instruction will merge the result into a single
// part MM format file on hdfs.
if (oparams.getFormat() == Format.CSV) {
String tempFileName = getNextUniqueFilename();
String createInst = node.getInstructions(tempFileName);
createvarInst= CPInstructionParser.parseSingleInstruction(createInst);
//NOTE: no instruction patching because final write from cp instruction
String writeInst = node.getInstructions(oparams.getLabel(), fnameLop.getOutputParameters().getLabel() );
CPInstruction currInstr = CPInstructionParser.parseSingleInstruction(writeInst);
currInstr.setLocation(node);
out.addPostInstruction(currInstr);
// remove the variable
CPInstruction tempInstr = CPInstructionParser.parseSingleInstruction(
"CP" + Lop.OPERAND_DELIMITOR + "rmfilevar" + Lop.OPERAND_DELIMITOR
+ oparams.getLabel() + Lop.VALUETYPE_PREFIX + Expression.ValueType.UNKNOWN + Lop.OPERAND_DELIMITOR
+ "true" + Lop.VALUETYPE_PREFIX + "BOOLEAN");
tempInstr.setLocation(node);
out.addLastInstruction(tempInstr);
}
else if (oparams.getFormat() == Format.MM ) {
createvarInst= VariableCPInstruction.prepareCreateVariableInstruction(
oparams.getLabel(),
getNextUniqueFilename(),
false, node.getDataType(),
OutputInfo.outputInfoToString(getOutputInfo(node, false)),
new MatrixCharacteristics(oparams.getNumRows(), oparams.getNumCols(), rpb, cpb, oparams.getNnz()),
oparams.getUpdateType()
);
//NOTE: no instruction patching because final write from cp instruction
String writeInst = node.getInstructions(oparams.getLabel(), fnameLop.getOutputParameters().getLabel());
CPInstruction currInstr = CPInstructionParser.parseSingleInstruction(writeInst);
currInstr.setLocation(node);
out.addPostInstruction(currInstr);
// remove the variable
CPInstruction tempInstr = CPInstructionParser.parseSingleInstruction(
"CP" + Lop.OPERAND_DELIMITOR + "rmfilevar" + Lop.OPERAND_DELIMITOR
+ oparams.getLabel() + Lop.VALUETYPE_PREFIX + Expression.ValueType.UNKNOWN + Lop.OPERAND_DELIMITOR
+ "true" + Lop.VALUETYPE_PREFIX + "BOOLEAN");
tempInstr.setLocation(node);
out.addLastInstruction(tempInstr);
}
else {
createvarInst= VariableCPInstruction.prepareCreateVariableInstruction(
oparams.getLabel(),
fnameStr,
false, node.getDataType(),
OutputInfo.outputInfoToString(getOutputInfo(node, false)),
new MatrixCharacteristics(oparams.getNumRows(), oparams.getNumCols(), rpb, cpb, oparams.getNnz()),
oparams.getUpdateType()
);
// remove the variable
CPInstruction currInstr = CPInstructionParser.parseSingleInstruction(
"CP" + Lop.OPERAND_DELIMITOR + "rmfilevar" + Lop.OPERAND_DELIMITOR
+ oparams.getLabel() + Lop.VALUETYPE_PREFIX + Expression.ValueType.UNKNOWN + Lop.OPERAND_DELIMITOR
+ "false" + Lop.VALUETYPE_PREFIX + "BOOLEAN");
currInstr.setLocation(node);
out.addLastInstruction(currInstr);
}
createvarInst.setLocation(node);
out.addPreInstruction(createvarInst);
// finally, add the filename and variable name to the list of outputs
out.setFileName(oparams.getFile_name());
out.setVarName(oparams.getLabel());
}
else { //CP PERSISTENT WRITE
// generate a write instruction that writes matrix to HDFS
Lop fname = ((Data)node).getNamedInputLop(DataExpression.IO_FILENAME);
Instruction currInstr = null;
Lop inputLop = node.getInputs().get(0);
// Case of a transient read feeding into only one output persistent binaryblock write
// Move the temporary file on HDFS to required persistent location, insteadof copying.
if (inputLop.getExecLocation() == ExecLocation.Data
&& inputLop.getOutputs().size() == 1
&& ((Data)inputLop).isTransient()
&& ((Data)inputLop).getOutputParameters().isBlocked()
&& node.getOutputParameters().isBlocked() ) {
// transient read feeding into persistent write in blocked representation
// simply, move the file
//prepare filename (literal or variable in order to support dynamic write)
String fnameStr = (fname instanceof Data && ((Data)fname).isLiteral()) ?
fname.getOutputParameters().getLabel()
: Lop.VARIABLE_NAME_PLACEHOLDER + fname.getOutputParameters().getLabel() + Lop.VARIABLE_NAME_PLACEHOLDER;
currInstr = (CPInstruction) VariableCPInstruction.prepareMoveInstruction(
inputLop.getOutputParameters().getLabel(),
fnameStr, "binaryblock" );
}
else {
String io_inst = node.getInstructions(
node.getInputs().get(0).getOutputParameters().getLabel(),
fname.getOutputParameters().getLabel());
if(node.getExecType() == ExecType.SPARK)
// This will throw an exception if the exectype of hop is set incorrectly
// Note: the exec type and exec location of lops needs to be set to SPARK and ControlProgram respectively
currInstr = SPInstructionParser.parseSingleInstruction(io_inst);
else
currInstr = CPInstructionParser.parseSingleInstruction(io_inst);
}
if ( !node.getInputs().isEmpty() && node.getInputs().get(0)._beginLine != 0)
currInstr.setLocation(node.getInputs().get(0));
else
currInstr.setLocation(node);
out.addLastInstruction(currInstr);
}
}
}
}
return out;
}
/**
* Method to generate MapReduce job instructions from a given set of nodes.
*
* @param execNodes list of exec nodes
* @param inst list of instructions
* @param writeinst list of write instructions
* @param deleteinst list of delete instructions
* @param rmvarinst list of rmvar instructions
* @param jt job type
* @throws LopsException if LopsException occurs
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
private void generateMapReduceInstructions(ArrayList<Lop> execNodes,
ArrayList<Instruction> inst, ArrayList<Instruction> writeinst, ArrayList<Instruction> deleteinst, ArrayList<Instruction> rmvarinst,
JobType jt) throws LopsException, DMLRuntimeException
{
ArrayList<Byte> resultIndices = new ArrayList<Byte>();
ArrayList<String> inputs = new ArrayList<String>();
ArrayList<String> outputs = new ArrayList<String>();
ArrayList<InputInfo> inputInfos = new ArrayList<InputInfo>();
ArrayList<OutputInfo> outputInfos = new ArrayList<OutputInfo>();
ArrayList<Long> numRows = new ArrayList<Long>();
ArrayList<Long> numCols = new ArrayList<Long>();
ArrayList<Long> numRowsPerBlock = new ArrayList<Long>();
ArrayList<Long> numColsPerBlock = new ArrayList<Long>();
ArrayList<String> mapperInstructions = new ArrayList<String>();
ArrayList<String> randInstructions = new ArrayList<String>();
ArrayList<String> recordReaderInstructions = new ArrayList<String>();
int numReducers = 0;
int replication = 1;
ArrayList<String> inputLabels = new ArrayList<String>();
ArrayList<String> outputLabels = new ArrayList<String>();
ArrayList<Instruction> renameInstructions = new ArrayList<Instruction>();
ArrayList<Instruction> variableInstructions = new ArrayList<Instruction>();
ArrayList<Instruction> postInstructions = new ArrayList<Instruction>();
ArrayList<Integer> MRJobLineNumbers = null;
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers = new ArrayList<Integer>();
}
ArrayList<Lop> inputLops = new ArrayList<Lop>();
boolean cellModeOverride = false;
/* Find the nodes that produce an output */
ArrayList<Lop> rootNodes = new ArrayList<Lop>();
getOutputNodes(execNodes, rootNodes, jt);
if( LOG.isTraceEnabled() )
LOG.trace("# of root nodes = " + rootNodes.size());
/* Remove transient writes that are simple copy of transient reads */
if (jt == JobType.GMR || jt == JobType.GMRCELL) {
ArrayList<Lop> markedNodes = new ArrayList<Lop>();
// only keep data nodes that are results of some computation.
for ( Lop rnode : rootNodes ) {
if (rnode.getExecLocation() == ExecLocation.Data
&& ((Data) rnode).isTransient()
&& ((Data) rnode).getOperationType() == OperationTypes.WRITE
&& ((Data) rnode).getDataType() == DataType.MATRIX) {
// no computation, just a copy
if (rnode.getInputs().get(0).getExecLocation() == ExecLocation.Data
&& ((Data) rnode.getInputs().get(0)).isTransient()
&& rnode.getOutputParameters().getLabel().equals(
rnode.getInputs().get(0).getOutputParameters().getLabel()))
{
markedNodes.add(rnode);
}
}
}
// delete marked nodes
rootNodes.removeAll(markedNodes);
markedNodes.clear();
if ( rootNodes.isEmpty() )
return;
}
// structure that maps node to their indices that will be used in the instructions
HashMap<Lop, Integer> nodeIndexMapping = new HashMap<Lop, Integer>();
/* Determine all input data files */
for ( Lop rnode : rootNodes ) {
getInputPathsAndParameters(rnode, execNodes, inputs, inputInfos, numRows, numCols,
numRowsPerBlock, numColsPerBlock, nodeIndexMapping, inputLabels, inputLops, MRJobLineNumbers);
}
// In case of RAND job, instructions are defined in the input file
if (jt == JobType.DATAGEN)
randInstructions = inputs;
int[] start_index = new int[1];
start_index[0] = inputs.size();
/* Get RecordReader Instructions */
// currently, recordreader instructions are allowed only in GMR jobs
if (jt == JobType.GMR || jt == JobType.GMRCELL) {
for ( Lop rnode : rootNodes ) {
getRecordReaderInstructions(rnode, execNodes, inputs, recordReaderInstructions,
nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers);
if ( recordReaderInstructions.size() > 1 )
throw new LopsException("MapReduce job can only have a single recordreader instruction: " + recordReaderInstructions.toString());
}
}
/*
* Handle cases when job's output is FORCED to be cell format.
* - If there exist a cell input, then output can not be blocked.
* Only exception is when jobType = REBLOCK/CSVREBLOCK (for obvisous reason)
* or when jobType = RAND since RandJob takes a special input file,
* whose format should not be used to dictate the output format.
* - If there exists a recordReader instruction
* - If jobtype = GroupedAgg. This job can only run in cell mode.
*/
//
if ( jt != JobType.REBLOCK && jt != JobType.CSV_REBLOCK && jt != JobType.DATAGEN && jt != JobType.TRANSFORM) {
for (int i=0; i < inputInfos.size(); i++)
if ( inputInfos.get(i) == InputInfo.BinaryCellInputInfo || inputInfos.get(i) == InputInfo.TextCellInputInfo )
cellModeOverride = true;
}
if ( !recordReaderInstructions.isEmpty() || jt == JobType.GROUPED_AGG )
cellModeOverride = true;
/* Get Mapper Instructions */
for (int i = 0; i < rootNodes.size(); i++) {
getMapperInstructions(rootNodes.get(i), execNodes, inputs,
mapperInstructions, nodeIndexMapping, start_index,
inputLabels, inputLops, MRJobLineNumbers);
}
if (LOG.isTraceEnabled()) {
LOG.trace(" Input strings: " + inputs.toString());
if (jt == JobType.DATAGEN)
LOG.trace(" Rand instructions: " + getCSVString(randInstructions));
if (jt == JobType.GMR)
LOG.trace(" RecordReader instructions: " + getCSVString(recordReaderInstructions));
LOG.trace(" Mapper instructions: " + getCSVString(mapperInstructions));
}
/* Get Shuffle and Reducer Instructions */
ArrayList<String> shuffleInstructions = new ArrayList<String>();
ArrayList<String> aggInstructionsReducer = new ArrayList<String>();
ArrayList<String> otherInstructionsReducer = new ArrayList<String>();
for( Lop rn : rootNodes ) {
int resultIndex = getAggAndOtherInstructions(
rn, execNodes, shuffleInstructions, aggInstructionsReducer,
otherInstructionsReducer, nodeIndexMapping, start_index,
inputLabels, inputLops, MRJobLineNumbers);
if ( resultIndex == -1)
throw new LopsException("Unexpected error in piggybacking!");
if ( rn.getExecLocation() == ExecLocation.Data
&& ((Data)rn).getOperationType() == Data.OperationTypes.WRITE && ((Data)rn).isTransient()
&& rootNodes.contains(rn.getInputs().get(0))
) {
// Both rn (a transient write) and its input are root nodes.
// Instead of creating two copies of the data, simply generate a cpvar instruction
NodeOutput out = setupNodeOutputs(rn, ExecType.MR, cellModeOverride, true);
writeinst.addAll(out.getLastInstructions());
}
else {
resultIndices.add(Byte.valueOf((byte)resultIndex));
// setup output filenames and outputInfos and generate related instructions
NodeOutput out = setupNodeOutputs(rn, ExecType.MR, cellModeOverride, false);
outputLabels.add(out.getVarName());
outputs.add(out.getFileName());
outputInfos.add(out.getOutInfo());
if (LOG.isTraceEnabled()) {
LOG.trace(" Output Info: " + out.getFileName() + ";" + OutputInfo.outputInfoToString(out.getOutInfo()) + ";" + out.getVarName());
}
renameInstructions.addAll(out.getLastInstructions());
variableInstructions.addAll(out.getPreInstructions());
postInstructions.addAll(out.getPostInstructions());
}
}
/* Determine if the output dimensions are known */
byte[] resultIndicesByte = new byte[resultIndices.size()];
for (int i = 0; i < resultIndicesByte.length; i++) {
resultIndicesByte[i] = resultIndices.get(i).byteValue();
}
if (LOG.isTraceEnabled()) {
LOG.trace(" Shuffle Instructions: " + getCSVString(shuffleInstructions));
LOG.trace(" Aggregate Instructions: " + getCSVString(aggInstructionsReducer));
LOG.trace(" Other instructions =" + getCSVString(otherInstructionsReducer));
LOG.trace(" Output strings: " + outputs.toString());
LOG.trace(" ResultIndices = " + resultIndices.toString());
}
/* Prepare the MapReduce job instruction */
MRJobInstruction mr = new MRJobInstruction(jt);
// check if this is a map-only job. If not, set the number of reducers
if ( !shuffleInstructions.isEmpty() || !aggInstructionsReducer.isEmpty() || !otherInstructionsReducer.isEmpty() )
numReducers = total_reducers;
// set inputs, outputs, and other other properties for the job
mr.setInputOutputLabels(inputLabels.toArray(new String[0]), outputLabels.toArray(new String[0]));
mr.setOutputs(resultIndicesByte);
mr.setDimsUnknownFilePrefix(getFilePath());
mr.setNumberOfReducers(numReducers);
mr.setReplication(replication);
// set instructions for recordReader and mapper
mr.setRecordReaderInstructions(getCSVString(recordReaderInstructions));
mr.setMapperInstructions(getCSVString(mapperInstructions));
//compute and set mapper memory requirements (for consistency of runtime piggybacking)
if( jt == JobType.GMR ) {
double mem = 0;
for( Lop n : execNodes )
mem += computeFootprintInMapper(n);
mr.setMemoryRequirements(mem);
}
if ( jt == JobType.DATAGEN )
mr.setRandInstructions(getCSVString(randInstructions));
// set shuffle instructions
mr.setShuffleInstructions(getCSVString(shuffleInstructions));
// set reducer instruction
mr.setAggregateInstructionsInReducer(getCSVString(aggInstructionsReducer));
mr.setOtherInstructionsInReducer(getCSVString(otherInstructionsReducer));
if(DMLScript.ENABLE_DEBUG_MODE) {
// set line number information for each MR instruction
mr.setMRJobInstructionsLineNumbers(MRJobLineNumbers);
}
/* Add the prepared instructions to output set */
inst.addAll(variableInstructions);
inst.add(mr);
inst.addAll(postInstructions);
deleteinst.addAll(renameInstructions);
for (Lop l : inputLops) {
if(DMLScript.ENABLE_DEBUG_MODE) {
processConsumers(l, rmvarinst, deleteinst, l);
}
else {
processConsumers(l, rmvarinst, deleteinst, null);
}
}
}
/**
* converts an array list into a Lop.INSTRUCTION_DELIMITOR separated string
*
* @param inputStrings list of input strings
* @return Lop.INSTRUCTION_DELIMITOR separated string
*/
private static String getCSVString(ArrayList<String> inputStrings) {
StringBuilder sb = new StringBuilder();
for ( String str : inputStrings ) {
if( str != null ) {
if( sb.length()>0 )
sb.append(Lop.INSTRUCTION_DELIMITOR);
sb.append( str );
}
}
return sb.toString();
}
/**
* Method to populate aggregate and other instructions in reducer.
*
* @param node low-level operator
* @param execNodes list of exec nodes
* @param shuffleInstructions list of shuffle instructions
* @param aggInstructionsReducer ?
* @param otherInstructionsReducer ?
* @param nodeIndexMapping node index mapping
* @param start_index start index
* @param inputLabels list of input labels
* @param inputLops list of input lops
* @param MRJobLineNumbers MR job line numbers
* @return -1 if problem
* @throws LopsException if LopsException occurs
*/
private int getAggAndOtherInstructions(Lop node, ArrayList<Lop> execNodes,
ArrayList<String> shuffleInstructions,
ArrayList<String> aggInstructionsReducer,
ArrayList<String> otherInstructionsReducer,
HashMap<Lop, Integer> nodeIndexMapping, int[] start_index,
ArrayList<String> inputLabels, ArrayList<Lop> inputLops,
ArrayList<Integer> MRJobLineNumbers) throws LopsException
{
int ret_val = -1;
if (nodeIndexMapping.containsKey(node))
return nodeIndexMapping.get(node);
// if not an input source and not in exec nodes, return.
if (!execNodes.contains(node))
return ret_val;
ArrayList<Integer> inputIndices = new ArrayList<Integer>();
// recurse
// For WRITE, since the first element from input is the real input (the other elements
// are parameters for the WRITE operation), so we only need to take care of the
// first element.
if (node.getType() == Lop.Type.Data && ((Data)node).getOperationType() == Data.OperationTypes.WRITE) {
ret_val = getAggAndOtherInstructions(node.getInputs().get(0),
execNodes, shuffleInstructions, aggInstructionsReducer,
otherInstructionsReducer, nodeIndexMapping, start_index,
inputLabels, inputLops, MRJobLineNumbers);
inputIndices.add(ret_val);
}
else {
for ( Lop cnode : node.getInputs() ) {
ret_val = getAggAndOtherInstructions(cnode,
execNodes, shuffleInstructions, aggInstructionsReducer,
otherInstructionsReducer, nodeIndexMapping, start_index,
inputLabels, inputLops, MRJobLineNumbers);
inputIndices.add(ret_val);
}
}
if (node.getExecLocation() == ExecLocation.Data ) {
if ( ((Data)node).getFileFormatType() == FileFormatTypes.CSV
&& !(node.getInputs().get(0) instanceof ParameterizedBuiltin
&& ((ParameterizedBuiltin)node.getInputs().get(0)).getOp() == org.apache.sysml.lops.ParameterizedBuiltin.OperationTypes.TRANSFORM)) {
// Generate write instruction, which goes into CSV_WRITE Job
int output_index = start_index[0];
shuffleInstructions.add(node.getInstructions(inputIndices.get(0), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
nodeIndexMapping.put(node, output_index);
start_index[0]++;
return output_index;
}
else {
return ret_val;
}
}
if (node.getExecLocation() == ExecLocation.MapAndReduce) {
/* Generate Shuffle Instruction for "node", and return the index associated with produced output */
boolean instGenerated = true;
int output_index = start_index[0];
switch(node.getType()) {
/* Lop types that take a single input */
case ReBlock:
case CSVReBlock:
case SortKeys:
case CentralMoment:
case CoVariance:
case GroupedAgg:
case DataPartition:
shuffleInstructions.add(node.getInstructions(inputIndices.get(0), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
break;
case ParameterizedBuiltin:
if( ((ParameterizedBuiltin)node).getOp() == org.apache.sysml.lops.ParameterizedBuiltin.OperationTypes.TRANSFORM ) {
shuffleInstructions.add(node.getInstructions(output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
}
break;
/* Lop types that take two inputs */
case MMCJ:
case MMRJ:
case CombineBinary:
shuffleInstructions.add(node.getInstructions(inputIndices.get(0), inputIndices.get(1), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
break;
/* Lop types that take three inputs */
case CombineTernary:
shuffleInstructions.add(node.getInstructions(inputIndices
.get(0), inputIndices.get(1), inputIndices.get(2), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
break;
default:
instGenerated = false;
break;
}
if ( instGenerated ) {
nodeIndexMapping.put(node, output_index);
start_index[0]++;
return output_index;
}
else {
return inputIndices.get(0);
}
}
/* Get instructions for aligned reduce and other lops below the reduce. */
if (node.getExecLocation() == ExecLocation.Reduce
|| node.getExecLocation() == ExecLocation.MapOrReduce
|| hasChildNode(node, execNodes, ExecLocation.MapAndReduce)) {
if (inputIndices.size() == 1) {
int output_index = start_index[0];
start_index[0]++;
if (node.getType() == Type.Aggregate) {
aggInstructionsReducer.add(node.getInstructions(
inputIndices.get(0), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
}
else {
otherInstructionsReducer.add(node.getInstructions(
inputIndices.get(0), output_index));
}
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
nodeIndexMapping.put(node, output_index);
return output_index;
} else if (inputIndices.size() == 2) {
int output_index = start_index[0];
start_index[0]++;
otherInstructionsReducer.add(node.getInstructions(inputIndices
.get(0), inputIndices.get(1), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
nodeIndexMapping.put(node, output_index);
// populate list of input labels.
// only Unary lops can contribute to labels
if (node instanceof Unary && node.getInputs().size() > 1) {
int index = 0;
for (int i = 0; i < node.getInputs().size(); i++) {
if (node.getInputs().get(i).getDataType() == DataType.SCALAR) {
index = i;
break;
}
}
if (node.getInputs().get(index).getExecLocation() == ExecLocation.Data
&& !((Data) (node.getInputs().get(index))).isLiteral()) {
inputLabels.add(node.getInputs().get(index).getOutputParameters().getLabel());
inputLops.add(node.getInputs().get(index));
}
if (node.getInputs().get(index).getExecLocation() != ExecLocation.Data) {
inputLabels.add(node.getInputs().get(index).getOutputParameters().getLabel());
inputLops.add(node.getInputs().get(index));
}
}
return output_index;
} else if (inputIndices.size() == 3 || node.getType() == Type.Ternary) {
int output_index = start_index[0];
start_index[0]++;
if (node.getType() == Type.Ternary ) {
// in case of CTABLE_TRANSFORM_SCALAR_WEIGHT: inputIndices.get(2) would be -1
otherInstructionsReducer.add(node.getInstructions(
inputIndices.get(0), inputIndices.get(1),
inputIndices.get(2), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
nodeIndexMapping.put(node, output_index);
}
else if( node.getType() == Type.ParameterizedBuiltin ){
otherInstructionsReducer.add(node.getInstructions(
inputIndices.get(0), inputIndices.get(1),
inputIndices.get(2), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
nodeIndexMapping.put(node, output_index);
}
else
{
otherInstructionsReducer.add(node.getInstructions(
inputIndices.get(0), inputIndices.get(1),
inputIndices.get(2), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
nodeIndexMapping.put(node, output_index);
return output_index;
}
return output_index;
}
else if (inputIndices.size() == 4) {
int output_index = start_index[0];
start_index[0]++;
otherInstructionsReducer.add(node.getInstructions(
inputIndices.get(0), inputIndices.get(1),
inputIndices.get(2), inputIndices.get(3), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
nodeIndexMapping.put(node, output_index);
return output_index;
}
else
throw new LopsException("Invalid number of inputs to a lop: "
+ inputIndices.size());
}
return -1;
}
/**
* Method to get record reader instructions for a MR job.
*
* @param node low-level operator
* @param execNodes list of exec nodes
* @param inputStrings list of input strings
* @param recordReaderInstructions list of record reader instructions
* @param nodeIndexMapping node index mapping
* @param start_index start index
* @param inputLabels list of input labels
* @param inputLops list of input lops
* @param MRJobLineNumbers MR job line numbers
* @return -1 if problem
* @throws LopsException if LopsException occurs
*/
private static int getRecordReaderInstructions(Lop node, ArrayList<Lop> execNodes,
ArrayList<String> inputStrings,
ArrayList<String> recordReaderInstructions,
HashMap<Lop, Integer> nodeIndexMapping, int[] start_index,
ArrayList<String> inputLabels, ArrayList<Lop> inputLops,
ArrayList<Integer> MRJobLineNumbers) throws LopsException
{
// if input source, return index
if (nodeIndexMapping.containsKey(node))
return nodeIndexMapping.get(node);
// not input source and not in exec nodes, then return.
if (!execNodes.contains(node))
return -1;
ArrayList<Integer> inputIndices = new ArrayList<Integer>();
int max_input_index = -1;
//N child_for_max_input_index = null;
// get mapper instructions
for (int i = 0; i < node.getInputs().size(); i++) {
// recurse
Lop childNode = node.getInputs().get(i);
int ret_val = getRecordReaderInstructions(childNode, execNodes,
inputStrings, recordReaderInstructions, nodeIndexMapping,
start_index, inputLabels, inputLops, MRJobLineNumbers);
inputIndices.add(ret_val);
if (ret_val > max_input_index) {
max_input_index = ret_val;
//child_for_max_input_index = childNode;
}
}
// only lops with execLocation as RecordReader can contribute
// instructions
if ((node.getExecLocation() == ExecLocation.RecordReader)) {
int output_index = max_input_index;
// cannot reuse index if this is true
// need to add better indexing schemes
output_index = start_index[0];
start_index[0]++;
nodeIndexMapping.put(node, output_index);
// populate list of input labels.
// only Ranagepick lop can contribute to labels
if (node.getType() == Type.PickValues) {
PickByCount pbc = (PickByCount) node;
if (pbc.getOperationType() == PickByCount.OperationTypes.RANGEPICK) {
int scalarIndex = 1; // always the second input is a scalar
// if data lop not a literal -- add label
if (node.getInputs().get(scalarIndex).getExecLocation() == ExecLocation.Data
&& !((Data) (node.getInputs().get(scalarIndex))).isLiteral()) {
inputLabels.add(node.getInputs().get(scalarIndex).getOutputParameters().getLabel());
inputLops.add(node.getInputs().get(scalarIndex));
}
// if not data lop, then this is an intermediate variable.
if (node.getInputs().get(scalarIndex).getExecLocation() != ExecLocation.Data) {
inputLabels.add(node.getInputs().get(scalarIndex).getOutputParameters().getLabel());
inputLops.add(node.getInputs().get(scalarIndex));
}
}
}
// get recordreader instruction.
if (node.getInputs().size() == 2) {
recordReaderInstructions.add(node.getInstructions(inputIndices
.get(0), inputIndices.get(1), output_index));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
}
else
throw new LopsException(
"Unexpected number of inputs while generating a RecordReader Instruction");
return output_index;
}
return -1;
}
/**
* Method to get mapper instructions for a MR job.
*
* @param node low-level operator
* @param execNodes list of exec nodes
* @param inputStrings list of input strings
* @param instructionsInMapper list of instructions in mapper
* @param nodeIndexMapping ?
* @param start_index starting index
* @param inputLabels input labels
* @param MRJoblineNumbers MR job line numbers
* @return -1 if problem
* @throws LopsException if LopsException occurs
*/
private int getMapperInstructions(Lop node, ArrayList<Lop> execNodes,
ArrayList<String> inputStrings,
ArrayList<String> instructionsInMapper,
HashMap<Lop, Integer> nodeIndexMapping, int[] start_index,
ArrayList<String> inputLabels, ArrayList<Lop> inputLops,
ArrayList<Integer> MRJobLineNumbers) throws LopsException
{
// if input source, return index
if (nodeIndexMapping.containsKey(node))
return nodeIndexMapping.get(node);
// not input source and not in exec nodes, then return.
if (!execNodes.contains(node))
return -1;
ArrayList<Integer> inputIndices = new ArrayList<Integer>();
int max_input_index = -1;
// get mapper instructions
for( Lop childNode : node.getInputs()) {
int ret_val = getMapperInstructions(childNode, execNodes,
inputStrings, instructionsInMapper, nodeIndexMapping,
start_index, inputLabels, inputLops, MRJobLineNumbers);
inputIndices.add(ret_val);
if (ret_val > max_input_index) {
max_input_index = ret_val;
}
}
// only map and map-or-reduce without a reduce child node can contribute
// to mapper instructions.
if ((node.getExecLocation() == ExecLocation.Map || node
.getExecLocation() == ExecLocation.MapOrReduce)
&& !hasChildNode(node, execNodes, ExecLocation.MapAndReduce)
&& !hasChildNode(node, execNodes, ExecLocation.Reduce)
) {
int output_index = max_input_index;
// cannot reuse index if this is true
// need to add better indexing schemes
// if (child_for_max_input_index.getOutputs().size() > 1) {
output_index = start_index[0];
start_index[0]++;
// }
nodeIndexMapping.put(node, output_index);
// populate list of input labels.
// only Unary lops can contribute to labels
if (node instanceof Unary && node.getInputs().size() > 1) {
// Following code must be executed only for those Unary
// operators that have more than one input
// It should not be executed for "true" unary operators like
// cos(A).
int index = 0;
for (int i1 = 0; i1 < node.getInputs().size(); i1++) {
if (node.getInputs().get(i1).getDataType() == DataType.SCALAR) {
index = i1;
break;
}
}
// if data lop not a literal -- add label
if (node.getInputs().get(index).getExecLocation() == ExecLocation.Data
&& !((Data) (node.getInputs().get(index))).isLiteral()) {
inputLabels.add(node.getInputs().get(index).getOutputParameters().getLabel());
inputLops.add(node.getInputs().get(index));
}
// if not data lop, then this is an intermediate variable.
if (node.getInputs().get(index).getExecLocation() != ExecLocation.Data) {
inputLabels.add(node.getInputs().get(index).getOutputParameters().getLabel());
inputLops.add(node.getInputs().get(index));
}
}
// get mapper instruction.
if (node.getInputs().size() == 1)
instructionsInMapper.add(node.getInstructions(inputIndices
.get(0), output_index));
else if (node.getInputs().size() == 2) {
instructionsInMapper.add(node.getInstructions(inputIndices
.get(0), inputIndices.get(1), output_index));
}
else if (node.getInputs().size() == 3)
instructionsInMapper.add(node.getInstructions(inputIndices.get(0),
inputIndices.get(1),
inputIndices.get(2),
output_index));
else if ( node.getInputs().size() == 4) {
// Example: Reshape
instructionsInMapper.add(node.getInstructions(
inputIndices.get(0),
inputIndices.get(1),
inputIndices.get(2),
inputIndices.get(3),
output_index ));
}
else if ( node.getInputs().size() == 5) {
// Example: RangeBasedReIndex A[row_l:row_u, col_l:col_u]
instructionsInMapper.add(node.getInstructions(
inputIndices.get(0),
inputIndices.get(1),
inputIndices.get(2),
inputIndices.get(3),
inputIndices.get(4),
output_index ));
}
else if ( node.getInputs().size() == 7 ) {
// Example: RangeBasedReIndex A[row_l:row_u, col_l:col_u] = B
instructionsInMapper.add(node.getInstructions(
inputIndices.get(0),
inputIndices.get(1),
inputIndices.get(2),
inputIndices.get(3),
inputIndices.get(4),
inputIndices.get(5),
inputIndices.get(6),
output_index ));
}
else
throw new LopsException("Node with " + node.getInputs().size() + " inputs is not supported in dag.java.");
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
return output_index;
}
return -1;
}
// Method to populate inputs and also populates node index mapping.
private static void getInputPathsAndParameters(Lop node, ArrayList<Lop> execNodes,
ArrayList<String> inputStrings, ArrayList<InputInfo> inputInfos,
ArrayList<Long> numRows, ArrayList<Long> numCols,
ArrayList<Long> numRowsPerBlock, ArrayList<Long> numColsPerBlock,
HashMap<Lop, Integer> nodeIndexMapping, ArrayList<String> inputLabels,
ArrayList<Lop> inputLops, ArrayList<Integer> MRJobLineNumbers)
throws LopsException {
// treat rand as an input.
if (node.getType() == Type.DataGen && execNodes.contains(node)
&& !nodeIndexMapping.containsKey(node)) {
numRows.add(node.getOutputParameters().getNumRows());
numCols.add(node.getOutputParameters().getNumCols());
numRowsPerBlock.add(node.getOutputParameters().getRowsInBlock());
numColsPerBlock.add(node.getOutputParameters().getColsInBlock());
inputStrings.add(node.getInstructions(inputStrings.size(), inputStrings.size()));
if(DMLScript.ENABLE_DEBUG_MODE) {
MRJobLineNumbers.add(node._beginLine);
}
inputInfos.add(InputInfo.TextCellInputInfo);
nodeIndexMapping.put(node, inputStrings.size() - 1);
return;
}
// get input file names
if (!execNodes.contains(node)
&& !nodeIndexMapping.containsKey(node)
&& !(node.getExecLocation() == ExecLocation.Data)
&& (!(node.getExecLocation() == ExecLocation.ControlProgram && node
.getDataType() == DataType.SCALAR))
|| (!execNodes.contains(node)
&& node.getExecLocation() == ExecLocation.Data
&& ((Data) node).getOperationType() == Data.OperationTypes.READ
&& ((Data) node).getDataType() != DataType.SCALAR && !nodeIndexMapping
.containsKey(node))) {
if (node.getOutputParameters().getFile_name() != null) {
inputStrings.add(node.getOutputParameters().getFile_name());
} else {
// use label name
inputStrings.add(Lop.VARIABLE_NAME_PLACEHOLDER + node.getOutputParameters().getLabel()
+ Lop.VARIABLE_NAME_PLACEHOLDER);
}
inputLabels.add(node.getOutputParameters().getLabel());
inputLops.add(node);
numRows.add(node.getOutputParameters().getNumRows());
numCols.add(node.getOutputParameters().getNumCols());
numRowsPerBlock.add(node.getOutputParameters().getRowsInBlock());
numColsPerBlock.add(node.getOutputParameters().getColsInBlock());
InputInfo nodeInputInfo = null;
// Check if file format type is binary or text and update infos
if (node.getOutputParameters().isBlocked()) {
if (node.getOutputParameters().getFormat() == Format.BINARY)
nodeInputInfo = InputInfo.BinaryBlockInputInfo;
else
throw new LopsException("Invalid format (" + node.getOutputParameters().getFormat() + ") encountered for a node/lop (ID=" + node.getID() + ") with blocked output.");
}
else {
if (node.getOutputParameters().getFormat() == Format.TEXT)
nodeInputInfo = InputInfo.TextCellInputInfo;
else
nodeInputInfo = InputInfo.BinaryCellInputInfo;
}
/*
* Hardcode output Key and Value Classes for SortKeys
*/
// TODO: statiko -- remove this hardcoding -- i.e., lops must encode
// the information on key/value classes
if (node.getType() == Type.SortKeys) {
// SortKeys is the input to some other lop (say, L)
// InputInfo of L is the ouputInfo of SortKeys, which is
// (compactformat, doubleWriteable, IntWritable)
nodeInputInfo = new InputInfo(PickFromCompactInputFormat.class,
DoubleWritable.class, IntWritable.class);
} else if (node.getType() == Type.CombineBinary) {
// CombineBinary is the input to some other lop (say, L)
// InputInfo of L is the ouputInfo of CombineBinary
// And, the outputInfo of CombineBinary depends on the operation!
CombineBinary combine = (CombineBinary) node;
if ( combine.getOperation() == org.apache.sysml.lops.CombineBinary.OperationTypes.PreSort ) {
nodeInputInfo = new InputInfo(SequenceFileInputFormat.class,
DoubleWritable.class, IntWritable.class);
}
else if ( combine.getOperation() == org.apache.sysml.lops.CombineBinary.OperationTypes.PreCentralMoment
|| combine.getOperation() == org.apache.sysml.lops.CombineBinary.OperationTypes.PreCovUnweighted
|| combine.getOperation() == org.apache.sysml.lops.CombineBinary.OperationTypes.PreGroupedAggUnweighted ) {
nodeInputInfo = InputInfo.WeightedPairInputInfo;
}
} else if ( node.getType() == Type.CombineTernary ) {
nodeInputInfo = InputInfo.WeightedPairInputInfo;
}
inputInfos.add(nodeInputInfo);
nodeIndexMapping.put(node, inputStrings.size() - 1);
return;
}
// if exec nodes does not contain node at this point, return.
if (!execNodes.contains(node))
return;
// process children recursively
for ( Lop lop : node.getInputs() ) {
getInputPathsAndParameters(lop, execNodes, inputStrings,
inputInfos, numRows, numCols, numRowsPerBlock,
numColsPerBlock, nodeIndexMapping, inputLabels, inputLops, MRJobLineNumbers);
}
}
/**
* Method to find all terminal nodes.
*
* @param execNodes list of exec nodes
* @param rootNodes list of root nodes
* @param jt job type
*/
private static void getOutputNodes(ArrayList<Lop> execNodes, ArrayList<Lop> rootNodes, JobType jt) {
for ( Lop node : execNodes ) {
// terminal node
if (node.getOutputs().isEmpty() && !rootNodes.contains(node)) {
rootNodes.add(node);
}
else {
// check for nodes with at least one child outside execnodes
int cnt = 0;
for (Lop lop : node.getOutputs() ) {
cnt += (!execNodes.contains(lop)) ? 1 : 0;
}
if (cnt > 0 && !rootNodes.contains(node) // not already a rootnode
&& !(node.getExecLocation() == ExecLocation.Data
&& ((Data) node).getOperationType() == OperationTypes.READ
&& ((Data) node).getDataType() == DataType.MATRIX) ) // Not a matrix Data READ
{
if ( jt.allowsSingleShuffleInstruction() && node.getExecLocation() != ExecLocation.MapAndReduce)
continue;
if (cnt < node.getOutputs().size()) {
if(!node.getProducesIntermediateOutput())
rootNodes.add(node);
}
else
rootNodes.add(node);
}
}
}
}
/**
* check to see if a is the child of b (i.e., there is a directed path from a to b)
*
* @param a child lop
* @param b parent lop
* @param IDMap id map
* @return true if a child of b
*/
private static boolean isChild(Lop a, Lop b, HashMap<Long, Integer> IDMap) {
int bID = IDMap.get(b.getID());
return a.get_reachable()[bID];
}
/**
* Method to topologically sort lops
*
* @param v list of lops
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
private void doTopologicalSort_strict_order(ArrayList<Lop> v) {
//int numNodes = v.size();
/*
* Step 1: compute the level for each node in the DAG. Level for each node is
* computed as lops are created. So, this step is need not be performed here.
* Step 2: sort the nodes by level, and within a level by node ID.
*/
// Step1: Performed at the time of creating Lops
// Step2: sort nodes by level, and then by node ID
Lop[] nodearray = v.toArray(new Lop[0]);
Arrays.sort(nodearray, new LopComparator());
// Copy sorted nodes into "v" and construct a mapping between Lop IDs and sequence of numbers
v.clear();
IDMap.clear();
for (int i = 0; i < nodearray.length; i++) {
v.add(nodearray[i]);
IDMap.put(v.get(i).getID(), i);
}
/*
* Compute of All-pair reachability graph (Transitive Closure) of the DAG.
* - Perform a depth-first search (DFS) from every node $u$ in the DAG
* - and construct the list of reachable nodes from the node $u$
* - store the constructed reachability information in $u$.reachable[] boolean array
*/
//
//
for (int i = 0; i < nodearray.length; i++) {
boolean[] arr = v.get(i).create_reachable(nodearray.length);
Arrays.fill(arr, false);
dagDFS(v.get(i), arr);
}
// print the nodes in sorted order
if (LOG.isTraceEnabled()) {
for ( Lop vnode : v ) {
StringBuilder sb = new StringBuilder();
sb.append(vnode.getID());
sb.append("(");
sb.append(vnode.getLevel());
sb.append(") ");
sb.append(vnode.getType());
sb.append("(");
for(Lop vin : vnode.getInputs()) {
sb.append(vin.getID());
sb.append(",");
}
sb.append("), ");
LOG.trace(sb.toString());
}
LOG.trace("topological sort -- done");
}
}
/**
* Method to perform depth-first traversal from a given node in the DAG.
* Store the reachability information in marked[] boolean array.
*
* @param root low-level operator
* @param marked reachability results
*/
private void dagDFS(Lop root, boolean[] marked) {
//contains check currently required for globalopt, will be removed when cleaned up
if( !IDMap.containsKey(root.getID()) )
return;
int mapID = IDMap.get(root.getID());
if ( marked[mapID] )
return;
marked[mapID] = true;
for( Lop lop : root.getOutputs() ) {
dagDFS(lop, marked);
}
}
private static boolean hasDirectChildNode(Lop node, ArrayList<Lop> childNodes) {
if ( childNodes.isEmpty() )
return false;
for( Lop cnode : childNodes ) {
if ( cnode.getOutputs().contains(node))
return true;
}
return false;
}
private boolean hasChildNode(Lop node, ArrayList<Lop> nodes) {
return hasChildNode(node, nodes, ExecLocation.INVALID);
}
private boolean hasChildNode(Lop node, ArrayList<Lop> childNodes, ExecLocation type) {
if ( childNodes.isEmpty() )
return false;
int index = IDMap.get(node.getID());
for( Lop cnode : childNodes ) {
if ( (type == ExecLocation.INVALID || cnode.getExecLocation() == type) && cnode.get_reachable()[index])
return true;
}
return false;
}
private Lop getChildNode(Lop node, ArrayList<Lop> childNodes, ExecLocation type) {
if ( childNodes.isEmpty() )
return null;
int index = IDMap.get(node.getID());
for( Lop cnode : childNodes ) {
if ( cnode.getExecLocation() == type && cnode.get_reachable()[index])
return cnode;
}
return null;
}
/*
* Returns a node "n" such that
* 1) n \in parentNodes
* 2) n is an ancestor of "node"
* 3) n.ExecLocation = type
*
* Returns null if no such "n" exists
*
*/
private Lop getParentNode(Lop node, ArrayList<Lop> parentNodes, ExecLocation type) {
if ( parentNodes.isEmpty() )
return null;
for( Lop pn : parentNodes ) {
int index = IDMap.get( pn.getID() );
if ( pn.getExecLocation() == type && node.get_reachable()[index])
return pn;
}
return null;
}
// Checks if "node" has any descendants in nodesVec with definedMRJob flag
// set to true
private boolean hasMRJobChildNode(Lop node, ArrayList<Lop> nodesVec) {
if ( nodesVec.isEmpty() )
return false;
int index = IDMap.get(node.getID());
for( Lop n : nodesVec ) {
if ( n.definesMRJob() && n.get_reachable()[index])
return true;
}
return false;
}
private boolean checkDataGenAsChildNode(Lop node, ArrayList<Lop> nodesVec) {
if( nodesVec.isEmpty() )
return true;
int index = IDMap.get(node.getID());
boolean onlyDatagen = true;
for( Lop n : nodesVec ) {
if ( n.definesMRJob() && n.get_reachable()[index] && JobType.findJobTypeFromLop(n) != JobType.DATAGEN )
onlyDatagen = false;
}
// return true also when there is no lop in "nodesVec" that defines a MR job.
return onlyDatagen;
}
private static int getChildAlignment(Lop node, ArrayList<Lop> execNodes, ExecLocation type)
{
for (Lop n : node.getInputs() ) {
if (!execNodes.contains(n))
continue;
if (execNodes.contains(n) && n.getExecLocation() == type) {
if (n.getBreaksAlignment())
return MR_CHILD_FOUND_BREAKS_ALIGNMENT;
else
return MR_CHILD_FOUND_DOES_NOT_BREAK_ALIGNMENT;
}
else {
int ret = getChildAlignment(n, execNodes, type);
if (ret == MR_CHILD_FOUND_DOES_NOT_BREAK_ALIGNMENT
|| ret == CHILD_DOES_NOT_BREAK_ALIGNMENT) {
if (n.getBreaksAlignment())
return CHILD_BREAKS_ALIGNMENT;
else
return CHILD_DOES_NOT_BREAK_ALIGNMENT;
}
else if (ret == MRCHILD_NOT_FOUND
|| ret == CHILD_BREAKS_ALIGNMENT
|| ret == MR_CHILD_FOUND_BREAKS_ALIGNMENT)
return ret;
else
throw new RuntimeException("Something wrong in getChildAlignment().");
}
}
return MRCHILD_NOT_FOUND;
}
private boolean hasParentNode(Lop node, ArrayList<Lop> parentNodes) {
if ( parentNodes.isEmpty() )
return false;
for( Lop pnode : parentNodes ) {
int index = IDMap.get( pnode.getID() );
if ( node.get_reachable()[index])
return true;
}
return false;
}
}
| iyounus/incubator-systemml | src/main/java/org/apache/sysml/lops/compile/Dag.java | Java | apache-2.0 | 143,562 |
from artnet import *
import SocketServer
import time, os, random, datetime, sys
import argparse
import socket
import struct
from subprocess import Popen, PIPE, STDOUT
import glob
DEBUG = False
UDP_IP = "2.0.0.61"
UDP_PORT = 6454
| ScienceWorldCA/domelights | backend/artnet-bridge/artnet-server.py | Python | apache-2.0 | 234 |
/**
* Created by dmitry on 21.11.16.
*/
import React, { Component } from 'react';
import { Container, Content, Spinner } from 'native-base';
// TODO: Рядом лежат спиннеры, поди можно прикрячить
export default class Loading extends Component {
render() {
return (
<Container>
<Content contentContainerStyle={{
flex: 1,
flexDirection: 'row',
justifyContent: 'center'
}}>
<Spinner color="blue"/>
</Content>
</Container>
);
}
} | dima11221122/63pokupki-react-native | js/components/loading/index.js | JavaScript | apache-2.0 | 551 |
/*
* Copyright 2017 GcsSloop
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Last modified 2017-03-08 01:01:18
*
* GitHub: https://github.com/GcsSloop
* Website: http://www.gcssloop.com
* Weibo: http://weibo.com/GcsSloop
*/
package com.github.florent37.expectanim.core.position;
import android.view.View;
/**
* Created by florentchampigny on 17/02/2017.
*/
public class PositionAnimExpectationRightOf extends PositionAnimationViewDependant {
public PositionAnimExpectationRightOf(View otherView) {
super(otherView);
setForPositionX(true);
}
@Override
public Float getCalculatedValueX(View viewToMove) {
return viewCalculator.finalPositionRightOfView(otherView) + getMargin(viewToMove);
}
@Override
public Float getCalculatedValueY(View viewToMove) {
return null;
}
}
| GcsSloop/diycode | expectanim/src/main/java/com/github/florent37/expectanim/core/position/PositionAnimExpectationRightOf.java | Java | apache-2.0 | 1,365 |
/*
* Copyright 2007 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jsefa.common.converter;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* Enum constant annotation.
*
* @author Norman Lahme-Huetig
*
*/
@Retention(RUNTIME)
@Target({FIELD})
public @interface EnumConstant {
/**
* The display name of the enum constant.
*/
String value();
}
| Manmay/JSefa | src/main/java/org/jsefa/common/converter/EnumConstant.java | Java | apache-2.0 | 1,076 |
package com.douwe.notes.resource.impl;
import com.douwe.notes.entities.Cycle;
import com.douwe.notes.resource.ICycleResource;
import com.douwe.notes.service.ICycleService;
import com.douwe.notes.service.IInsfrastructureService;
import com.douwe.notes.service.ServiceException;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.ejb.EJB;
import javax.ws.rs.Path;
/**
*
* @author Vincent Douwe <douwevincent@yahoo.fr>
*/
@Path("/cycles")
public class CycleResource implements ICycleResource{
@EJB
private IInsfrastructureService infranstructureService;
@EJB
private ICycleService cycleService;
public Cycle createCycle(Cycle cycle) {
try {
return cycleService.saveOrUpdateCycle(cycle);
} catch (ServiceException ex) {
Logger.getLogger(CycleResource.class.getName()).log(Level.SEVERE, null, ex);
return null;
}
}
public List<Cycle> getAllCycle() {
try {
return cycleService.getAllCycles();
} catch (ServiceException ex) {
Logger.getLogger(CycleResource.class.getName()).log(Level.SEVERE, null, ex);
return null;
}
}
public Cycle getCycle(long id) {
try {
return cycleService.findCycleById(id);
} catch (ServiceException ex) {
Logger.getLogger(CycleResource.class.getName()).log(Level.SEVERE, null, ex);
return null;
}
}
public Cycle updateCycle(long id, Cycle cycle) {
try {
Cycle c = cycleService.findCycleById(id);
if(c != null){
c.setNom(cycle.getNom());
return cycleService.saveOrUpdateCycle(c);
}
return null;
} catch (ServiceException ex) {
Logger.getLogger(CycleResource.class.getName()).log(Level.SEVERE, null, ex);
return null;
}
}
public void deleteCycle(long id) {
try {
cycleService.deleteCycle(id);
} catch (ServiceException ex) {
Logger.getLogger(CycleResource.class.getName()).log(Level.SEVERE, null, ex);
}
}
public IInsfrastructureService getInfranstructureService() {
return infranstructureService;
}
public void setInfranstructureService(IInsfrastructureService infranstructureService) {
this.infranstructureService = infranstructureService;
}
public ICycleService getCycleService() {
return cycleService;
}
public void setCycleService(ICycleService cycleService) {
this.cycleService = cycleService;
}
}
| royken/notes | src/main/java/com/douwe/notes/resource/impl/CycleResource.java | Java | apache-2.0 | 2,688 |
//main javascript
(function init() {
// If we need to load requirejs before loading butter, make it so
if (typeof define === "undefined") {
var rscript = document.createElement("script");
rscript.onload = function () {
init();
};
rscript.src = "require.js";
document.head.appendChild(rscript);
return;
}
require.config({
baseUrl: 'js/',
paths: {
// the left side is the module ID,
// the right side is the path to
// the jQuery file, relative to baseUrl.
// Also, the path should NOT include
// the '.js' file extension. This example
// is using jQuery 1.8.2 located at
// js/jquery-1.8.2.js, relative to
// the HTML page.
jquery: 'lib/jquery-2.1.3.min',
namedwebsockets: 'lib/namedwebsockets',
qrcode: 'lib/qrcode.min',
webcodecam:'lib/WebCodeCam.min',
qrcodelib:'lib/qrcodelib',
socketio: '/socket.io/socket.io',
shake: 'lib/shake'
}
});
// Start the main app logic.
define("mediascape", ["mediascape/Agentcontext/agentcontext",
"mediascape/Association/association",
"mediascape/Discovery/discovery",
"mediascape/DiscoveryAgentContext/discoveryagentcontext",
"mediascape/Sharedstate/sharedstate",
"mediascape/Mappingservice/mappingservice",
"mediascape/Applicationcontext/applicationcontext"], function ($, Modules) {
//jQuery, modules and the discovery/modules module are all.
//loaded and can be used here now.
//creation of mediascape and discovery objects.
var mediascape = {};
var moduleList = Array.prototype.slice.apply(arguments);
mediascape.init = function (options) {
mediascapeOptions = {};
_this = Object.create(mediascape);
for (var i = 0; i < moduleList.length; i++) {
var name = moduleList[i].__moduleName;
var dontCall = ['sharedState', 'mappingService', 'applicationContext'];
if (dontCall.indexOf(name) === -1) {
mediascape[name] = new moduleList[i](mediascape, "gq" + i, mediascape);
} else {
mediascape[name] = moduleList[i];
}
}
return _this;
};
mediascape.version = "0.0.1";
// See if we have any waiting init calls that happened before we loaded require.
if (window.mediascape) {
var args = window.mediascape.__waiting;
delete window.mediascape;
if (args) {
mediascape.init.apply(this, args);
}
}
window.mediascape = mediascape;
//return of mediascape object with discovery and features objects and its functions
return mediascape;
});
require(["mediascape"], function (mediascape) {
mediascape.init();
/**
*
* Polyfill for custonevents
*/
(function () {
function CustomEvent(event, params) {
params = params || {
bubbles: false,
cancelable: false,
detail: undefined
};
var evt = document.createEvent('CustomEvent');
evt.initCustomEvent(event, params.bubbles, params.cancelable, params.detail);
return evt;
};
CustomEvent.prototype = window.Event.prototype;
window.CustomEvent = CustomEvent;
})();
var event = new CustomEvent("mediascape-ready", {
"detail": {
"loaded": true
}
});
document.dispatchEvent(event);
});
}());
| martinangel/association | helloworld/Triggers/js/mediascape/mediascape.js | JavaScript | apache-2.0 | 3,983 |
/*
* Copyright 2010 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multiset;
import com.google.common.collect.Sets;
import com.google.javascript.jscomp.CompilerOptions.AliasTransformation;
import com.google.javascript.jscomp.CompilerOptions.AliasTransformationHandler;
import com.google.javascript.jscomp.Scope.Var;
import com.google.javascript.rhino.IR;
import com.google.javascript.rhino.JSDocInfo;
import com.google.javascript.rhino.Node;
import com.google.javascript.rhino.SourcePosition;
import com.google.javascript.rhino.Token;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
/**
* Process aliases in goog.scope blocks.
*
* goog.scope(function() {
* var dom = goog.dom;
* var DIV = dom.TagName.DIV;
*
* dom.createElement(DIV);
* });
*
* should become
*
* goog.dom.createElement(goog.dom.TagName.DIV);
*
* The advantage of using goog.scope is that the compiler will *guarantee*
* the anonymous function will be inlined, even if it can't prove
* that it's semantically correct to do so. For example, consider this case:
*
* goog.scope(function() {
* goog.getBar = function () { return alias; };
* ...
* var alias = foo.bar;
* })
*
* In theory, the compiler can't inline 'alias' unless it can prove that
* goog.getBar is called only after 'alias' is defined.
*
* In practice, the compiler will inline 'alias' anyway, at the risk of
* 'fixing' bad code.
*
* @author robbyw@google.com (Robby Walker)
*/
class ScopedAliases implements HotSwapCompilerPass {
/** Name used to denote an scoped function block used for aliasing. */
static final String SCOPING_METHOD_NAME = "goog.scope";
private final AbstractCompiler compiler;
private final PreprocessorSymbolTable preprocessorSymbolTable;
private final AliasTransformationHandler transformationHandler;
// Errors
static final DiagnosticType GOOG_SCOPE_USED_IMPROPERLY = DiagnosticType.error(
"JSC_GOOG_SCOPE_USED_IMPROPERLY",
"The call to goog.scope must be alone in a single statement.");
static final DiagnosticType GOOG_SCOPE_HAS_BAD_PARAMETERS =
DiagnosticType.error(
"JSC_GOOG_SCOPE_HAS_BAD_PARAMETERS",
"The call to goog.scope must take only a single parameter. It must" +
" be an anonymous function that itself takes no parameters.");
static final DiagnosticType GOOG_SCOPE_REFERENCES_THIS = DiagnosticType.error(
"JSC_GOOG_SCOPE_REFERENCES_THIS",
"The body of a goog.scope function cannot reference 'this'.");
static final DiagnosticType GOOG_SCOPE_USES_RETURN = DiagnosticType.error(
"JSC_GOOG_SCOPE_USES_RETURN",
"The body of a goog.scope function cannot use 'return'.");
static final DiagnosticType GOOG_SCOPE_USES_THROW = DiagnosticType.error(
"JSC_GOOG_SCOPE_USES_THROW",
"The body of a goog.scope function cannot use 'throw'.");
static final DiagnosticType GOOG_SCOPE_ALIAS_REDEFINED = DiagnosticType.error(
"JSC_GOOG_SCOPE_ALIAS_REDEFINED",
"The alias {0} is assigned a value more than once.");
static final DiagnosticType GOOG_SCOPE_ALIAS_CYCLE = DiagnosticType.error(
"JSC_GOOG_SCOPE_ALIAS_CYCLE",
"The aliases {0} has a cycle.");
static final DiagnosticType GOOG_SCOPE_NON_ALIAS_LOCAL = DiagnosticType.error(
"JSC_GOOG_SCOPE_NON_ALIAS_LOCAL",
"The local variable {0} is in a goog.scope and is not an alias.");
private Multiset<String> scopedAliasNames = HashMultiset.create();
ScopedAliases(AbstractCompiler compiler,
@Nullable PreprocessorSymbolTable preprocessorSymbolTable,
AliasTransformationHandler transformationHandler) {
this.compiler = compiler;
this.preprocessorSymbolTable = preprocessorSymbolTable;
this.transformationHandler = transformationHandler;
}
@Override
public void process(Node externs, Node root) {
hotSwapScript(root, null);
}
@Override
public void hotSwapScript(Node root, Node originalRoot) {
Traversal traversal = new Traversal();
NodeTraversal.traverse(compiler, root, traversal);
if (!traversal.hasErrors()) {
// Apply the aliases.
List<AliasUsage> aliasWorkQueue =
Lists.newArrayList(traversal.getAliasUsages());
while (!aliasWorkQueue.isEmpty()) {
List<AliasUsage> newQueue = Lists.newArrayList();
for (AliasUsage aliasUsage : aliasWorkQueue) {
if (aliasUsage.referencesOtherAlias()) {
newQueue.add(aliasUsage);
} else {
aliasUsage.applyAlias();
}
}
// Prevent an infinite loop.
if (newQueue.size() == aliasWorkQueue.size()) {
Var cycleVar = newQueue.get(0).aliasVar;
compiler.report(JSError.make(
cycleVar.getNode(), GOOG_SCOPE_ALIAS_CYCLE, cycleVar.getName()));
break;
} else {
aliasWorkQueue = newQueue;
}
}
// Remove the alias definitions.
for (Node aliasDefinition : traversal.getAliasDefinitionsInOrder()) {
if (aliasDefinition.getParent().isVar() &&
aliasDefinition.getParent().hasOneChild()) {
aliasDefinition.getParent().detachFromParent();
} else {
aliasDefinition.detachFromParent();
}
}
// Collapse the scopes.
for (Node scopeCall : traversal.getScopeCalls()) {
Node expressionWithScopeCall = scopeCall.getParent();
Node scopeClosureBlock = scopeCall.getLastChild().getLastChild();
scopeClosureBlock.detachFromParent();
expressionWithScopeCall.getParent().replaceChild(
expressionWithScopeCall,
scopeClosureBlock);
NodeUtil.tryMergeBlock(scopeClosureBlock);
}
if (traversal.getAliasUsages().size() > 0 ||
traversal.getAliasDefinitionsInOrder().size() > 0 ||
traversal.getScopeCalls().size() > 0) {
compiler.reportCodeChange();
}
}
}
private abstract class AliasUsage {
final Var aliasVar;
final Node aliasReference;
AliasUsage(Var aliasVar, Node aliasReference) {
this.aliasVar = aliasVar;
this.aliasReference = aliasReference;
}
/** Checks to see if this references another alias. */
public boolean referencesOtherAlias() {
Node aliasDefinition = aliasVar.getInitialValue();
Node root = NodeUtil.getRootOfQualifiedName(aliasDefinition);
Var otherAliasVar = aliasVar.getScope().getOwnSlot(root.getString());
return otherAliasVar != null;
}
public abstract void applyAlias();
}
private class AliasedNode extends AliasUsage {
AliasedNode(Var aliasVar, Node aliasReference) {
super(aliasVar, aliasReference);
}
@Override
public void applyAlias() {
Node aliasDefinition = aliasVar.getInitialValue();
aliasReference.getParent().replaceChild(
aliasReference, aliasDefinition.cloneTree());
}
}
private class AliasedTypeNode extends AliasUsage {
AliasedTypeNode(Var aliasVar, Node aliasReference) {
super(aliasVar, aliasReference);
}
@Override
public void applyAlias() {
Node aliasDefinition = aliasVar.getInitialValue();
String aliasName = aliasVar.getName();
String typeName = aliasReference.getString();
String aliasExpanded =
Preconditions.checkNotNull(aliasDefinition.getQualifiedName());
Preconditions.checkState(typeName.startsWith(aliasName));
aliasReference.setString(typeName.replaceFirst(aliasName, aliasExpanded));
}
}
private class Traversal implements NodeTraversal.ScopedCallback {
// The job of this class is to collect these three data sets.
// The order of this list determines the order that aliases are applied.
private final List<Node> aliasDefinitionsInOrder = Lists.newArrayList();
private final List<Node> scopeCalls = Lists.newArrayList();
private final List<AliasUsage> aliasUsages = Lists.newArrayList();
// This map is temporary and cleared for each scope.
private final Map<String, Var> aliases = Maps.newHashMap();
// Suppose you create an alias.
// var x = goog.x;
// As a side-effect, this means you can shadow the namespace 'goog'
// in inner scopes. When we inline the namespaces, we have to rename
// these shadows.
//
// Fortunately, we already have a name uniquifier that runs during tree
// normalization (before optimizations). We run it here on a limited
// set of variables, but only as a last resort (because this will screw
// up warning messages downstream).
private final Set<String> forbiddenLocals = Sets.newHashSet("$jscomp");
private boolean hasNamespaceShadows = false;
private boolean hasErrors = false;
private AliasTransformation transformation = null;
Collection<Node> getAliasDefinitionsInOrder() {
return aliasDefinitionsInOrder;
}
private List<AliasUsage> getAliasUsages() {
return aliasUsages;
}
List<Node> getScopeCalls() {
return scopeCalls;
}
boolean hasErrors() {
return hasErrors;
}
private boolean isCallToScopeMethod(Node n) {
return n.isCall() &&
SCOPING_METHOD_NAME.equals(n.getFirstChild().getQualifiedName());
}
@Override
public void enterScope(NodeTraversal t) {
Node n = t.getCurrentNode().getParent();
if (n != null && isCallToScopeMethod(n)) {
transformation = transformationHandler.logAliasTransformation(
n.getSourceFileName(), getSourceRegion(n));
findAliases(t);
}
}
@Override
public void exitScope(NodeTraversal t) {
if (t.getScopeDepth() > 2) {
findNamespaceShadows(t);
}
if (t.getScopeDepth() == 2) {
renameNamespaceShadows(t);
aliases.clear();
forbiddenLocals.clear();
transformation = null;
hasNamespaceShadows = false;
}
}
@Override
public final boolean shouldTraverse(NodeTraversal t, Node n, Node parent) {
if (n.isFunction() && t.inGlobalScope()) {
// Do not traverse in to functions except for goog.scope functions.
if (parent == null || !isCallToScopeMethod(parent)) {
return false;
}
}
return true;
}
private SourcePosition<AliasTransformation> getSourceRegion(Node n) {
Node testNode = n;
Node next = null;
for (; next != null || testNode.isScript();) {
next = testNode.getNext();
testNode = testNode.getParent();
}
int endLine = next == null ? Integer.MAX_VALUE : next.getLineno();
int endChar = next == null ? Integer.MAX_VALUE : next.getCharno();
SourcePosition<AliasTransformation> pos =
new SourcePosition<AliasTransformation>() {};
pos.setPositionInformation(
n.getLineno(), n.getCharno(), endLine, endChar);
return pos;
}
private void report(NodeTraversal t, Node n, DiagnosticType error,
String... arguments) {
compiler.report(t.makeError(n, error, arguments));
hasErrors = true;
}
private void findAliases(NodeTraversal t) {
Scope scope = t.getScope();
for (Var v : scope.getVarIterable()) {
Node n = v.getNode();
Node parent = n.getParent();
boolean isVar = parent.isVar();
boolean isFunctionDecl = NodeUtil.isFunctionDeclaration(parent);
if (isVar && n.getFirstChild() != null && n.getFirstChild().isQualifiedName()) {
recordAlias(v);
} else if (v.isBleedingFunction()) {
// Bleeding functions already get a BAD_PARAMETERS error, so just
// do nothing.
} else if (parent.getType() == Token.LP) {
// Parameters of the scope function also get a BAD_PARAMETERS
// error.
} else if (isVar || isFunctionDecl) {
boolean isHoisted = NodeUtil.isHoistedFunctionDeclaration(parent);
Node grandparent = parent.getParent();
Node value = v.getInitialValue() != null ?
v.getInitialValue() :
null;
Node varNode = null;
String name = n.getString();
int nameCount = scopedAliasNames.count(name);
scopedAliasNames.add(name);
String globalName =
"$jscomp.scope." + name + (nameCount == 0 ? "" : ("$" + nameCount));
compiler.ensureLibraryInjected("base");
// First, we need to free up the function expression (EXPR)
// to be used in another expression.
if (isFunctionDecl) {
// Replace "function NAME() { ... }" with "var NAME;".
Node existingName = v.getNameNode();
// We can't keep the local name on the function expression,
// because IE is buggy and will leak the name into the global
// scope. This is covered in more detail here:
// http://wiki.ecmascript.org/lib/exe/fetch.php?id=resources:resources&cache=cache&media=resources:jscriptdeviationsfromes3.pdf
//
// This will only cause problems if this is a hoisted, recursive
// function, and the programmer is using the hoisting.
Node newName = IR.name("").useSourceInfoFrom(existingName);
value.replaceChild(existingName, newName);
varNode = IR.var(existingName).useSourceInfoFrom(existingName);
grandparent.replaceChild(parent, varNode);
} else {
if (value != null) {
// If this is a VAR, we can just detach the expression and
// the tree will still be valid.
value.detachFromParent();
}
varNode = parent;
}
// Add $jscomp.scope.name = EXPR;
// Make sure we copy over all the jsdoc and debug info.
if (value != null || v.getJSDocInfo() != null) {
Node newDecl = NodeUtil.newQualifiedNameNodeDeclaration(
compiler.getCodingConvention(),
globalName,
value,
v.getJSDocInfo())
.useSourceInfoIfMissingFromForTree(n);
NodeUtil.setDebugInformation(
newDecl.getFirstChild().getFirstChild(), n, name);
if (isHoisted) {
grandparent.addChildToFront(newDecl);
} else {
grandparent.addChildBefore(newDecl, varNode);
}
}
// Rewrite "var name = EXPR;" to "var name = $jscomp.scope.name;"
v.getNameNode().addChildToFront(
NodeUtil.newQualifiedNameNode(
compiler.getCodingConvention(), globalName, n, name));
recordAlias(v);
} else {
// Do not other kinds of local symbols, like catch params.
report(t, n, GOOG_SCOPE_NON_ALIAS_LOCAL, n.getString());
}
}
}
private void recordAlias(Var aliasVar) {
String name = aliasVar.getName();
aliases.put(name, aliasVar);
String qualifiedName =
aliasVar.getInitialValue().getQualifiedName();
transformation.addAlias(name, qualifiedName);
int rootIndex = qualifiedName.indexOf(".");
if (rootIndex != -1) {
String qNameRoot = qualifiedName.substring(0, rootIndex);
if (!aliases.containsKey(qNameRoot)) {
forbiddenLocals.add(qNameRoot);
}
}
}
/** Find out if there are any local shadows of namespaces. */
private void findNamespaceShadows(NodeTraversal t) {
if (hasNamespaceShadows) {
return;
}
Scope scope = t.getScope();
for (Var v : scope.getVarIterable()) {
if (forbiddenLocals.contains(v.getName())) {
hasNamespaceShadows = true;
return;
}
}
}
/**
* Rename any local shadows of namespaces.
* This should be a very rare occurrence, so only do this traversal
* if we know that we need it.
*/
private void renameNamespaceShadows(NodeTraversal t) {
if (hasNamespaceShadows) {
MakeDeclaredNamesUnique.Renamer renamer =
new MakeDeclaredNamesUnique.WhitelistedRenamer(
new MakeDeclaredNamesUnique.ContextualRenamer(),
forbiddenLocals);
for (String s : forbiddenLocals) {
renamer.addDeclaredName(s);
}
MakeDeclaredNamesUnique uniquifier =
new MakeDeclaredNamesUnique(renamer);
NodeTraversal.traverse(compiler, t.getScopeRoot(), uniquifier);
}
}
private void validateScopeCall(NodeTraversal t, Node n, Node parent) {
if (preprocessorSymbolTable != null) {
preprocessorSymbolTable.addReference(n.getFirstChild());
}
if (!parent.isExprResult()) {
report(t, n, GOOG_SCOPE_USED_IMPROPERLY);
}
if (n.getChildCount() != 2) {
// The goog.scope call should have exactly 1 parameter. The first
// child is the "goog.scope" and the second should be the parameter.
report(t, n, GOOG_SCOPE_HAS_BAD_PARAMETERS);
} else {
Node anonymousFnNode = n.getChildAtIndex(1);
if (!anonymousFnNode.isFunction() ||
NodeUtil.getFunctionName(anonymousFnNode) != null ||
NodeUtil.getFunctionParameters(anonymousFnNode).hasChildren()) {
report(t, anonymousFnNode, GOOG_SCOPE_HAS_BAD_PARAMETERS);
} else {
scopeCalls.add(n);
}
}
}
@Override
public void visit(NodeTraversal t, Node n, Node parent) {
if (isCallToScopeMethod(n)) {
validateScopeCall(t, n, n.getParent());
}
if (t.getScopeDepth() < 2) {
return;
}
int type = n.getType();
Var aliasVar = null;
if (type == Token.NAME) {
String name = n.getString();
Var lexicalVar = t.getScope().getVar(n.getString());
if (lexicalVar != null && lexicalVar == aliases.get(name)) {
aliasVar = lexicalVar;
}
}
// Validate the top-level of the goog.scope block.
if (t.getScopeDepth() == 2) {
if (aliasVar != null && NodeUtil.isLValue(n)) {
if (aliasVar.getNode() == n) {
aliasDefinitionsInOrder.add(n);
// Return early, to ensure that we don't record a definition
// twice.
return;
} else {
report(t, n, GOOG_SCOPE_ALIAS_REDEFINED, n.getString());
}
}
if (type == Token.RETURN) {
report(t, n, GOOG_SCOPE_USES_RETURN);
} else if (type == Token.THIS) {
report(t, n, GOOG_SCOPE_REFERENCES_THIS);
} else if (type == Token.THROW) {
report(t, n, GOOG_SCOPE_USES_THROW);
}
}
// Validate all descendent scopes of the goog.scope block.
if (t.getScopeDepth() >= 2) {
// Check if this name points to an alias.
if (aliasVar != null) {
// Note, to support the transitive case, it's important we don't
// clone aliasedNode here. For example,
// var g = goog; var d = g.dom; d.createElement('DIV');
// The node in aliasedNode (which is "g") will be replaced in the
// changes pass above with "goog". If we cloned here, we'd end up
// with <code>g.dom.createElement('DIV')</code>.
aliasUsages.add(new AliasedNode(aliasVar, n));
}
JSDocInfo info = n.getJSDocInfo();
if (info != null) {
for (Node node : info.getTypeNodes()) {
fixTypeNode(node);
}
}
// TODO(robbyw): Error for goog.scope not at root.
}
}
private void fixTypeNode(Node typeNode) {
if (typeNode.isString()) {
String name = typeNode.getString();
int endIndex = name.indexOf('.');
if (endIndex == -1) {
endIndex = name.length();
}
String baseName = name.substring(0, endIndex);
Var aliasVar = aliases.get(baseName);
if (aliasVar != null) {
aliasUsages.add(new AliasedTypeNode(aliasVar, typeNode));
}
}
for (Node child = typeNode.getFirstChild(); child != null;
child = child.getNext()) {
fixTypeNode(child);
}
}
}
}
| jhiswin/idiil-closure-compiler | src/com/google/javascript/jscomp/ScopedAliases.java | Java | apache-2.0 | 21,121 |
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package restore
import (
"context"
"database/sql"
"fmt"
"io"
"math"
"os"
"strings"
"sync"
"time"
"github.com/coreos/go-semver/semver"
"github.com/docker/go-units"
"github.com/google/uuid"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
sstpb "github.com/pingcap/kvproto/pkg/import_sstpb"
berrors "github.com/pingcap/tidb/br/pkg/errors"
"github.com/pingcap/tidb/br/pkg/lightning/backend"
"github.com/pingcap/tidb/br/pkg/lightning/backend/importer"
"github.com/pingcap/tidb/br/pkg/lightning/backend/kv"
"github.com/pingcap/tidb/br/pkg/lightning/backend/local"
"github.com/pingcap/tidb/br/pkg/lightning/backend/tidb"
"github.com/pingcap/tidb/br/pkg/lightning/checkpoints"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/lightning/config"
"github.com/pingcap/tidb/br/pkg/lightning/errormanager"
"github.com/pingcap/tidb/br/pkg/lightning/glue"
"github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/lightning/metric"
"github.com/pingcap/tidb/br/pkg/lightning/mydump"
"github.com/pingcap/tidb/br/pkg/lightning/tikv"
verify "github.com/pingcap/tidb/br/pkg/lightning/verification"
"github.com/pingcap/tidb/br/pkg/lightning/web"
"github.com/pingcap/tidb/br/pkg/lightning/worker"
"github.com/pingcap/tidb/br/pkg/pdutil"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/br/pkg/version"
"github.com/pingcap/tidb/br/pkg/version/build"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/util/collate"
pd "github.com/tikv/pd/client"
"go.uber.org/atomic"
"go.uber.org/multierr"
"go.uber.org/zap"
"modernc.org/mathutil"
)
const (
FullLevelCompact = -1
Level1Compact = 1
)
const (
defaultGCLifeTime = 100 * time.Hour
)
const (
indexEngineID = -1
)
const (
compactStateIdle int32 = iota
compactStateDoing
)
const (
TaskMetaTableName = "task_meta"
TableMetaTableName = "table_meta"
// CreateTableMetadataTable stores the per-table sub jobs information used by TiDB Lightning
CreateTableMetadataTable = `CREATE TABLE IF NOT EXISTS %s (
task_id BIGINT(20) UNSIGNED,
table_id BIGINT(64) NOT NULL,
table_name VARCHAR(64) NOT NULL,
row_id_base BIGINT(20) NOT NULL DEFAULT 0,
row_id_max BIGINT(20) NOT NULL DEFAULT 0,
total_kvs_base BIGINT(20) UNSIGNED NOT NULL DEFAULT 0,
total_bytes_base BIGINT(20) UNSIGNED NOT NULL DEFAULT 0,
checksum_base BIGINT(20) UNSIGNED NOT NULL DEFAULT 0,
total_kvs BIGINT(20) UNSIGNED NOT NULL DEFAULT 0,
total_bytes BIGINT(20) UNSIGNED NOT NULL DEFAULT 0,
checksum BIGINT(20) UNSIGNED NOT NULL DEFAULT 0,
status VARCHAR(32) NOT NULL,
has_duplicates BOOL NOT NULL DEFAULT 0,
PRIMARY KEY (table_id, task_id)
);`
// CreateTaskMetaTable stores the pre-lightning metadata used by TiDB Lightning
CreateTaskMetaTable = `CREATE TABLE IF NOT EXISTS %s (
task_id BIGINT(20) UNSIGNED NOT NULL,
pd_cfgs VARCHAR(2048) NOT NULL DEFAULT '',
status VARCHAR(32) NOT NULL,
state TINYINT(1) NOT NULL DEFAULT 0 COMMENT '0: normal, 1: exited before finish',
source_bytes BIGINT(20) UNSIGNED NOT NULL DEFAULT 0,
cluster_avail BIGINT(20) UNSIGNED NOT NULL DEFAULT 0,
PRIMARY KEY (task_id)
);`
compactionLowerThreshold = 512 * units.MiB
compactionUpperThreshold = 32 * units.GiB
)
var (
minTiKVVersionForDuplicateResolution = *semver.New("5.2.0")
maxTiKVVersionForDuplicateResolution = version.NextMajorVersion()
)
// DeliverPauser is a shared pauser to pause progress to (*chunkRestore).encodeLoop
var DeliverPauser = common.NewPauser()
// nolint:gochecknoinits // TODO: refactor
func init() {
failpoint.Inject("SetMinDeliverBytes", func(v failpoint.Value) {
minDeliverBytes = uint64(v.(int))
})
}
type saveCp struct {
tableName string
merger checkpoints.TableCheckpointMerger
waitCh chan<- error
}
type errorSummary struct {
status checkpoints.CheckpointStatus
err error
}
type errorSummaries struct {
sync.Mutex
logger log.Logger
summary map[string]errorSummary
}
// makeErrorSummaries returns an initialized errorSummaries instance
func makeErrorSummaries(logger log.Logger) errorSummaries {
return errorSummaries{
logger: logger,
summary: make(map[string]errorSummary),
}
}
func (es *errorSummaries) emitLog() {
es.Lock()
defer es.Unlock()
if errorCount := len(es.summary); errorCount > 0 {
logger := es.logger
logger.Error("tables failed to be imported", zap.Int("count", errorCount))
for tableName, errorSummary := range es.summary {
logger.Error("-",
zap.String("table", tableName),
zap.String("status", errorSummary.status.MetricName()),
log.ShortError(errorSummary.err),
)
}
}
}
func (es *errorSummaries) record(tableName string, err error, status checkpoints.CheckpointStatus) {
es.Lock()
defer es.Unlock()
es.summary[tableName] = errorSummary{status: status, err: err}
}
const (
diskQuotaStateIdle int32 = iota
diskQuotaStateChecking
diskQuotaStateImporting
diskQuotaMaxReaders = 1 << 30
)
// diskQuotaLock is essentially a read/write lock. The implement here is inspired by sync.RWMutex.
// diskQuotaLock removed the unnecessary blocking `RLock` method and add a non-blocking `TryRLock` method.
type diskQuotaLock struct {
w sync.Mutex // held if there are pending writers
writerSem chan struct{} // semaphore for writers to wait for completing readers
readerCount atomic.Int32 // number of pending readers
readerWait atomic.Int32 // number of departing readers
}
func newDiskQuotaLock() *diskQuotaLock {
return &diskQuotaLock{writerSem: make(chan struct{})}
}
func (d *diskQuotaLock) Lock() {
d.w.Lock()
// Announce to readers there is a pending writer.
r := d.readerCount.Sub(diskQuotaMaxReaders) + diskQuotaMaxReaders
if r != 0 && d.readerWait.Add(r) != 0 {
// Wait for active readers.
<-d.writerSem
}
}
func (d *diskQuotaLock) Unlock() {
d.readerCount.Add(diskQuotaMaxReaders)
d.w.Unlock()
}
func (d *diskQuotaLock) TryRLock() (locked bool) {
r := d.readerCount.Load()
for r >= 0 {
if d.readerCount.CAS(r, r+1) {
return true
}
r = d.readerCount.Load()
}
return false
}
func (d *diskQuotaLock) RUnlock() {
if d.readerCount.Dec() < 0 {
if d.readerWait.Dec() == 0 {
// The last reader unblocks the writer.
d.writerSem <- struct{}{}
}
}
}
type Controller struct {
cfg *config.Config
dbMetas []*mydump.MDDatabaseMeta
dbInfos map[string]*checkpoints.TidbDBInfo
tableWorkers *worker.Pool
indexWorkers *worker.Pool
regionWorkers *worker.Pool
ioWorkers *worker.Pool
checksumWorks *worker.Pool
pauser *common.Pauser
backend backend.Backend
tidbGlue glue.Glue
alterTableLock sync.Mutex
sysVars map[string]string
tls *common.TLS
checkTemplate Template
errorSummaries errorSummaries
checkpointsDB checkpoints.DB
saveCpCh chan saveCp
checkpointsWg sync.WaitGroup
closedEngineLimit *worker.Pool
store storage.ExternalStorage
metaMgrBuilder metaMgrBuilder
errorMgr *errormanager.ErrorManager
taskMgr taskMetaMgr
diskQuotaLock *diskQuotaLock
diskQuotaState atomic.Int32
compactState atomic.Int32
status *LightningStatus
}
type LightningStatus struct {
FinishedFileSize atomic.Int64
TotalFileSize atomic.Int64
}
func NewRestoreController(
ctx context.Context,
dbMetas []*mydump.MDDatabaseMeta,
cfg *config.Config,
status *LightningStatus,
s storage.ExternalStorage,
g glue.Glue,
) (*Controller, error) {
return NewRestoreControllerWithPauser(ctx, dbMetas, cfg, status, s, DeliverPauser, g)
}
func NewRestoreControllerWithPauser(
ctx context.Context,
dbMetas []*mydump.MDDatabaseMeta,
cfg *config.Config,
status *LightningStatus,
s storage.ExternalStorage,
pauser *common.Pauser,
g glue.Glue,
) (*Controller, error) {
tls, err := cfg.ToTLS()
if err != nil {
return nil, err
}
cpdb, err := g.OpenCheckpointsDB(ctx, cfg)
if err != nil {
return nil, errors.Annotate(err, "open checkpoint db failed")
}
taskCp, err := cpdb.TaskCheckpoint(ctx)
if err != nil {
return nil, errors.Annotate(err, "get task checkpoint failed")
}
if err := verifyCheckpoint(cfg, taskCp); err != nil {
return nil, errors.Trace(err)
}
// reuse task id to reuse task meta correctly.
if taskCp != nil {
cfg.TaskID = taskCp.TaskID
}
// TODO: support Lightning via SQL
db, err := g.GetDB()
if err != nil {
return nil, errors.Trace(err)
}
errorMgr := errormanager.New(db, cfg)
if err := errorMgr.Init(ctx); err != nil {
return nil, errors.Annotate(err, "failed to init error manager")
}
var backend backend.Backend
switch cfg.TikvImporter.Backend {
case config.BackendImporter:
var err error
backend, err = importer.NewImporter(ctx, tls, cfg.TikvImporter.Addr, cfg.TiDB.PdAddr)
if err != nil {
return nil, errors.Annotate(err, "open importer backend failed")
}
case config.BackendTiDB:
db, err := g.GetDB()
if err != nil {
return nil, errors.Annotate(err, "open tidb backend failed")
}
backend = tidb.NewTiDBBackend(db, cfg.TikvImporter.OnDuplicate, errorMgr)
case config.BackendLocal:
var rLimit local.Rlim_t
rLimit, err = local.GetSystemRLimit()
if err != nil {
return nil, err
}
maxOpenFiles := int(rLimit / local.Rlim_t(cfg.App.TableConcurrency))
// check overflow
if maxOpenFiles < 0 {
maxOpenFiles = math.MaxInt32
}
if cfg.TikvImporter.DuplicateResolution != config.DupeResAlgNone {
if err := tikv.CheckTiKVVersion(ctx, tls, cfg.TiDB.PdAddr, minTiKVVersionForDuplicateResolution, maxTiKVVersionForDuplicateResolution); err != nil {
if berrors.Is(err, berrors.ErrVersionMismatch) {
log.L().Warn("TiKV version doesn't support duplicate resolution. The resolution algorithm will fall back to 'none'", zap.Error(err))
cfg.TikvImporter.DuplicateResolution = config.DupeResAlgNone
} else {
return nil, errors.Annotate(err, "check TiKV version for duplicate resolution failed")
}
}
}
backend, err = local.NewLocalBackend(ctx, tls, cfg, g, maxOpenFiles, errorMgr)
if err != nil {
return nil, errors.Annotate(err, "build local backend failed")
}
err = verifyLocalFile(ctx, cpdb, cfg.TikvImporter.SortedKVDir)
if err != nil {
return nil, err
}
default:
return nil, errors.New("unknown backend: " + cfg.TikvImporter.Backend)
}
var metaBuilder metaMgrBuilder
switch cfg.TikvImporter.Backend {
case config.BackendLocal, config.BackendImporter:
metaBuilder = &dbMetaMgrBuilder{
db: db,
taskID: cfg.TaskID,
schema: cfg.App.MetaSchemaName,
needChecksum: cfg.PostRestore.Checksum != config.OpLevelOff,
}
default:
metaBuilder = noopMetaMgrBuilder{}
}
rc := &Controller{
cfg: cfg,
dbMetas: dbMetas,
tableWorkers: nil,
indexWorkers: nil,
regionWorkers: worker.NewPool(ctx, cfg.App.RegionConcurrency, "region"),
ioWorkers: worker.NewPool(ctx, cfg.App.IOConcurrency, "io"),
checksumWorks: worker.NewPool(ctx, cfg.TiDB.ChecksumTableConcurrency, "checksum"),
pauser: pauser,
backend: backend,
tidbGlue: g,
sysVars: defaultImportantVariables,
tls: tls,
checkTemplate: NewSimpleTemplate(),
errorSummaries: makeErrorSummaries(log.L()),
checkpointsDB: cpdb,
saveCpCh: make(chan saveCp),
closedEngineLimit: worker.NewPool(ctx, cfg.App.TableConcurrency*2, "closed-engine"),
store: s,
metaMgrBuilder: metaBuilder,
errorMgr: errorMgr,
diskQuotaLock: newDiskQuotaLock(),
status: status,
taskMgr: nil,
}
return rc, nil
}
func (rc *Controller) Close() {
rc.backend.Close()
rc.tidbGlue.GetSQLExecutor().Close()
}
func (rc *Controller) Run(ctx context.Context) error {
opts := []func(context.Context) error{
rc.setGlobalVariables,
rc.restoreSchema,
rc.preCheckRequirements,
rc.restoreTables,
rc.fullCompact,
rc.cleanCheckpoints,
}
task := log.L().Begin(zap.InfoLevel, "the whole procedure")
var err error
finished := false
outside:
for i, process := range opts {
err = process(ctx)
if i == len(opts)-1 {
finished = true
}
logger := task.With(zap.Int("step", i), log.ShortError(err))
switch {
case err == nil:
case log.IsContextCanceledError(err):
logger.Info("task canceled")
err = nil
break outside
default:
logger.Error("run failed")
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
break outside // ps : not continue
}
}
// if process is cancelled, should make sure checkpoints are written to db.
if !finished {
rc.waitCheckpointFinish()
}
task.End(zap.ErrorLevel, err)
rc.errorSummaries.emitLog()
return errors.Trace(err)
}
type schemaStmtType int
func (stmtType schemaStmtType) String() string {
switch stmtType {
case schemaCreateDatabase:
return "restore database schema"
case schemaCreateTable:
return "restore table schema"
case schemaCreateView:
return "restore view schema"
}
return "unknown statement of schema"
}
const (
schemaCreateDatabase schemaStmtType = iota
schemaCreateTable
schemaCreateView
)
type schemaJob struct {
dbName string
tblName string // empty for create db jobs
stmtType schemaStmtType
stmts []*schemaStmt
}
type schemaStmt struct {
sql string
}
type restoreSchemaWorker struct {
ctx context.Context
quit context.CancelFunc
jobCh chan *schemaJob
errCh chan error
wg sync.WaitGroup
glue glue.Glue
store storage.ExternalStorage
}
func (worker *restoreSchemaWorker) makeJobs(
dbMetas []*mydump.MDDatabaseMeta,
getTables func(context.Context, string) ([]*model.TableInfo, error),
) error {
defer func() {
close(worker.jobCh)
worker.quit()
}()
var err error
// 1. restore databases, execute statements concurrency
for _, dbMeta := range dbMetas {
restoreSchemaJob := &schemaJob{
dbName: dbMeta.Name,
stmtType: schemaCreateDatabase,
stmts: make([]*schemaStmt, 0, 1),
}
restoreSchemaJob.stmts = append(restoreSchemaJob.stmts, &schemaStmt{
sql: createDatabaseIfNotExistStmt(dbMeta.Name),
})
err = worker.appendJob(restoreSchemaJob)
if err != nil {
return err
}
}
err = worker.wait()
if err != nil {
return err
}
// 2. restore tables, execute statements concurrency
for _, dbMeta := range dbMetas {
// we can ignore error here, and let check failed later if schema not match
tables, _ := getTables(worker.ctx, dbMeta.Name)
tableMap := make(map[string]struct{})
for _, t := range tables {
tableMap[t.Name.L] = struct{}{}
}
for _, tblMeta := range dbMeta.Tables {
if _, ok := tableMap[strings.ToLower(tblMeta.Name)]; ok {
// we already has this table in TiDB.
// we should skip ddl job and let SchemaValid check.
continue
} else if tblMeta.SchemaFile.FileMeta.Path == "" {
return errors.Errorf("table `%s`.`%s` schema not found", dbMeta.Name, tblMeta.Name)
}
sql, err := tblMeta.GetSchema(worker.ctx, worker.store)
if sql != "" {
stmts, err := createTableIfNotExistsStmt(worker.glue.GetParser(), sql, dbMeta.Name, tblMeta.Name)
if err != nil {
return err
}
restoreSchemaJob := &schemaJob{
dbName: dbMeta.Name,
tblName: tblMeta.Name,
stmtType: schemaCreateTable,
stmts: make([]*schemaStmt, 0, len(stmts)),
}
for _, sql := range stmts {
restoreSchemaJob.stmts = append(restoreSchemaJob.stmts, &schemaStmt{
sql: sql,
})
}
err = worker.appendJob(restoreSchemaJob)
if err != nil {
return err
}
}
if err != nil {
return err
}
}
}
err = worker.wait()
if err != nil {
return err
}
// 3. restore views. Since views can cross database we must restore views after all table schemas are restored.
for _, dbMeta := range dbMetas {
for _, viewMeta := range dbMeta.Views {
sql, err := viewMeta.GetSchema(worker.ctx, worker.store)
if sql != "" {
stmts, err := createTableIfNotExistsStmt(worker.glue.GetParser(), sql, dbMeta.Name, viewMeta.Name)
if err != nil {
return err
}
restoreSchemaJob := &schemaJob{
dbName: dbMeta.Name,
tblName: viewMeta.Name,
stmtType: schemaCreateView,
stmts: make([]*schemaStmt, 0, len(stmts)),
}
for _, sql := range stmts {
restoreSchemaJob.stmts = append(restoreSchemaJob.stmts, &schemaStmt{
sql: sql,
})
}
err = worker.appendJob(restoreSchemaJob)
if err != nil {
return err
}
// we don't support restore views concurrency, cauz it maybe will raise a error
err = worker.wait()
if err != nil {
return err
}
}
if err != nil {
return err
}
}
}
return nil
}
func (worker *restoreSchemaWorker) doJob() {
var session *sql.Conn
defer func() {
if session != nil {
_ = session.Close()
}
}()
loop:
for {
select {
case <-worker.ctx.Done():
// don't `return` or throw `worker.ctx.Err()`here,
// if we `return`, we can't mark cancelled jobs as done,
// if we `throw(worker.ctx.Err())`, it will be blocked to death
break loop
case job := <-worker.jobCh:
if job == nil {
// successful exit
return
}
var err error
if session == nil {
session, err = func() (*sql.Conn, error) {
// TODO: support lightning in SQL
db, err := worker.glue.GetDB()
if err != nil {
return nil, errors.Trace(err)
}
return db.Conn(worker.ctx)
}()
if err != nil {
worker.wg.Done()
worker.throw(err)
// don't return
break loop
}
}
logger := log.With(zap.String("db", job.dbName), zap.String("table", job.tblName))
sqlWithRetry := common.SQLWithRetry{
Logger: log.L(),
DB: session,
}
for _, stmt := range job.stmts {
task := logger.Begin(zap.DebugLevel, fmt.Sprintf("execute SQL: %s", stmt.sql))
err = sqlWithRetry.Exec(worker.ctx, "run create schema job", stmt.sql)
task.End(zap.ErrorLevel, err)
if err != nil {
err = errors.Annotatef(err, "%s %s failed", job.stmtType.String(), common.UniqueTable(job.dbName, job.tblName))
worker.wg.Done()
worker.throw(err)
// don't return
break loop
}
}
worker.wg.Done()
}
}
// mark the cancelled job as `Done`, a little tricky,
// cauz we need make sure `worker.wg.Wait()` wouldn't blocked forever
for range worker.jobCh {
worker.wg.Done()
}
}
func (worker *restoreSchemaWorker) wait() error {
// avoid to `worker.wg.Wait()` blocked forever when all `doJob`'s goroutine exited.
// don't worry about goroutine below, it never become a zombie,
// cauz we have mechanism to clean cancelled jobs in `worker.jobCh`.
// means whole jobs has been send to `worker.jobCh` would be done.
waitCh := make(chan struct{})
go func() {
worker.wg.Wait()
close(waitCh)
}()
select {
case err := <-worker.errCh:
return err
case <-worker.ctx.Done():
return worker.ctx.Err()
case <-waitCh:
return nil
}
}
func (worker *restoreSchemaWorker) throw(err error) {
select {
case <-worker.ctx.Done():
// don't throw `worker.ctx.Err()` again, it will be blocked to death.
return
case worker.errCh <- err:
worker.quit()
}
}
func (worker *restoreSchemaWorker) appendJob(job *schemaJob) error {
worker.wg.Add(1)
select {
case err := <-worker.errCh:
// cancel the job
worker.wg.Done()
return err
case <-worker.ctx.Done():
// cancel the job
worker.wg.Done()
return worker.ctx.Err()
case worker.jobCh <- job:
return nil
}
}
func (rc *Controller) restoreSchema(ctx context.Context) error {
// create table with schema file
// we can handle the duplicated created with createIfNotExist statement
// and we will check the schema in TiDB is valid with the datafile in DataCheck later.
logTask := log.L().Begin(zap.InfoLevel, "restore all schema")
concurrency := utils.MinInt(rc.cfg.App.RegionConcurrency, 8)
childCtx, cancel := context.WithCancel(ctx)
worker := restoreSchemaWorker{
ctx: childCtx,
quit: cancel,
jobCh: make(chan *schemaJob, concurrency),
errCh: make(chan error),
glue: rc.tidbGlue,
store: rc.store,
}
for i := 0; i < concurrency; i++ {
go worker.doJob()
}
getTableFunc := rc.backend.FetchRemoteTableModels
if !rc.tidbGlue.OwnsSQLExecutor() {
getTableFunc = rc.tidbGlue.GetTables
}
err := worker.makeJobs(rc.dbMetas, getTableFunc)
logTask.End(zap.ErrorLevel, err)
if err != nil {
return err
}
dbInfos, err := LoadSchemaInfo(ctx, rc.dbMetas, getTableFunc)
if err != nil {
return errors.Trace(err)
}
rc.dbInfos = dbInfos
if rc.tidbGlue.OwnsSQLExecutor() {
if err = rc.DataCheck(ctx); err != nil {
return errors.Trace(err)
}
}
// Load new checkpoints
err = rc.checkpointsDB.Initialize(ctx, rc.cfg, dbInfos)
if err != nil {
return errors.Trace(err)
}
failpoint.Inject("InitializeCheckpointExit", func() {
log.L().Warn("exit triggered", zap.String("failpoint", "InitializeCheckpointExit"))
os.Exit(0)
})
go rc.listenCheckpointUpdates()
sysVars := ObtainImportantVariables(ctx, rc.tidbGlue.GetSQLExecutor(), !rc.isTiDBBackend())
// override by manually set vars
for k, v := range rc.cfg.TiDB.Vars {
sysVars[k] = v
}
rc.sysVars = sysVars
// Estimate the number of chunks for progress reporting
err = rc.estimateChunkCountIntoMetrics(ctx)
if err != nil {
return errors.Trace(err)
}
return nil
}
// verifyCheckpoint check whether previous task checkpoint is compatible with task config
func verifyCheckpoint(cfg *config.Config, taskCp *checkpoints.TaskCheckpoint) error {
if taskCp == nil {
return nil
}
// always check the backend value even with 'check-requirements = false'
retryUsage := "destroy all checkpoints"
if cfg.Checkpoint.Driver == config.CheckpointDriverFile {
retryUsage = fmt.Sprintf("delete the file '%s'", cfg.Checkpoint.DSN)
}
retryUsage += " and remove all restored tables and try again"
if cfg.TikvImporter.Backend != taskCp.Backend {
return errors.Errorf("config 'tikv-importer.backend' value '%s' different from checkpoint value '%s', please %s", cfg.TikvImporter.Backend, taskCp.Backend, retryUsage)
}
if cfg.App.CheckRequirements {
if build.ReleaseVersion != taskCp.LightningVer {
var displayVer string
if len(taskCp.LightningVer) != 0 {
displayVer = fmt.Sprintf("at '%s'", taskCp.LightningVer)
} else {
displayVer = "before v4.0.6/v3.0.19"
}
return errors.Errorf("lightning version is '%s', but checkpoint was created %s, please %s", build.ReleaseVersion, displayVer, retryUsage)
}
errorFmt := "config '%s' value '%s' different from checkpoint value '%s'. You may set 'check-requirements = false' to skip this check or " + retryUsage
if cfg.Mydumper.SourceDir != taskCp.SourceDir {
return errors.Errorf(errorFmt, "mydumper.data-source-dir", cfg.Mydumper.SourceDir, taskCp.SourceDir)
}
if cfg.TikvImporter.Backend == config.BackendLocal && cfg.TikvImporter.SortedKVDir != taskCp.SortedKVDir {
return errors.Errorf(errorFmt, "mydumper.sorted-kv-dir", cfg.TikvImporter.SortedKVDir, taskCp.SortedKVDir)
}
if cfg.TikvImporter.Backend == config.BackendImporter && cfg.TikvImporter.Addr != taskCp.ImporterAddr {
return errors.Errorf(errorFmt, "tikv-importer.addr", cfg.TikvImporter.Backend, taskCp.Backend)
}
if cfg.TiDB.Host != taskCp.TiDBHost {
return errors.Errorf(errorFmt, "tidb.host", cfg.TiDB.Host, taskCp.TiDBHost)
}
if cfg.TiDB.Port != taskCp.TiDBPort {
return errors.Errorf(errorFmt, "tidb.port", cfg.TiDB.Port, taskCp.TiDBPort)
}
if cfg.TiDB.PdAddr != taskCp.PdAddr {
return errors.Errorf(errorFmt, "tidb.pd-addr", cfg.TiDB.PdAddr, taskCp.PdAddr)
}
}
return nil
}
// for local backend, we should check if local SST exists in disk, otherwise we'll lost data
func verifyLocalFile(ctx context.Context, cpdb checkpoints.DB, dir string) error {
targetTables, err := cpdb.GetLocalStoringTables(ctx)
if err != nil {
return errors.Trace(err)
}
for tableName, engineIDs := range targetTables {
for _, engineID := range engineIDs {
_, eID := backend.MakeUUID(tableName, engineID)
file := local.Engine{UUID: eID}
err := file.Exist(dir)
if err != nil {
log.L().Error("can't find local file",
zap.String("table name", tableName),
zap.Int32("engine ID", engineID))
return errors.Trace(err)
}
}
}
return nil
}
func (rc *Controller) estimateChunkCountIntoMetrics(ctx context.Context) error {
estimatedChunkCount := 0.0
estimatedEngineCnt := int64(0)
batchSize := rc.cfg.Mydumper.BatchSize
if batchSize <= 0 {
// if rows in source files are not sorted by primary key(if primary is number or cluster index enabled),
// the key range in each data engine may have overlap, thus a bigger engine size can somewhat alleviate it.
batchSize = config.DefaultBatchSize
}
for _, dbMeta := range rc.dbMetas {
for _, tableMeta := range dbMeta.Tables {
tableName := common.UniqueTable(dbMeta.Name, tableMeta.Name)
dbCp, err := rc.checkpointsDB.Get(ctx, tableName)
if err != nil {
return errors.Trace(err)
}
fileChunks := make(map[string]float64)
for engineID, eCp := range dbCp.Engines {
if eCp.Status < checkpoints.CheckpointStatusImported {
estimatedEngineCnt++
}
if engineID == indexEngineID {
continue
}
for _, c := range eCp.Chunks {
if _, ok := fileChunks[c.Key.Path]; !ok {
fileChunks[c.Key.Path] = 0.0
}
remainChunkCnt := float64(c.Chunk.EndOffset-c.Chunk.Offset) / float64(c.Chunk.EndOffset-c.Key.Offset)
fileChunks[c.Key.Path] += remainChunkCnt
}
}
// estimate engines count if engine cp is empty
if len(dbCp.Engines) == 0 {
estimatedEngineCnt += ((tableMeta.TotalSize + int64(batchSize) - 1) / int64(batchSize)) + 1
}
for _, fileMeta := range tableMeta.DataFiles {
if cnt, ok := fileChunks[fileMeta.FileMeta.Path]; ok {
estimatedChunkCount += cnt
continue
}
if fileMeta.FileMeta.Type == mydump.SourceTypeCSV {
cfg := rc.cfg.Mydumper
if fileMeta.FileMeta.FileSize > int64(cfg.MaxRegionSize) && cfg.StrictFormat && !cfg.CSV.Header {
estimatedChunkCount += math.Round(float64(fileMeta.FileMeta.FileSize) / float64(cfg.MaxRegionSize))
} else {
estimatedChunkCount++
}
} else {
estimatedChunkCount++
}
}
}
}
metric.ChunkCounter.WithLabelValues(metric.ChunkStateEstimated).Add(estimatedChunkCount)
metric.ProcessedEngineCounter.WithLabelValues(metric.ChunkStateEstimated, metric.TableResultSuccess).
Add(float64(estimatedEngineCnt))
rc.tidbGlue.Record(glue.RecordEstimatedChunk, uint64(estimatedChunkCount))
return nil
}
func firstErr(errors ...error) error {
for _, err := range errors {
if err != nil {
return err
}
}
return nil
}
func (rc *Controller) saveStatusCheckpoint(ctx context.Context, tableName string, engineID int32, err error, statusIfSucceed checkpoints.CheckpointStatus) error {
merger := &checkpoints.StatusCheckpointMerger{Status: statusIfSucceed, EngineID: engineID}
logger := log.L().With(zap.String("table", tableName), zap.Int32("engine_id", engineID),
zap.String("new_status", statusIfSucceed.MetricName()), zap.Error(err))
logger.Debug("update checkpoint")
switch {
case err == nil:
break
case !common.IsContextCanceledError(err):
merger.SetInvalid()
rc.errorSummaries.record(tableName, err, statusIfSucceed)
default:
return nil
}
if engineID == checkpoints.WholeTableEngineID {
metric.RecordTableCount(statusIfSucceed.MetricName(), err)
} else {
metric.RecordEngineCount(statusIfSucceed.MetricName(), err)
}
waitCh := make(chan error, 1)
rc.saveCpCh <- saveCp{tableName: tableName, merger: merger, waitCh: waitCh}
select {
case saveCpErr := <-waitCh:
if saveCpErr != nil {
logger.Error("failed to save status checkpoint", log.ShortError(saveCpErr))
}
return saveCpErr
case <-ctx.Done():
return ctx.Err()
}
}
// listenCheckpointUpdates will combine several checkpoints together to reduce database load.
func (rc *Controller) listenCheckpointUpdates() {
rc.checkpointsWg.Add(1)
var lock sync.Mutex
coalesed := make(map[string]*checkpoints.TableCheckpointDiff)
var waiters []chan<- error
hasCheckpoint := make(chan struct{}, 1)
defer close(hasCheckpoint)
go func() {
for range hasCheckpoint {
lock.Lock()
cpd := coalesed
coalesed = make(map[string]*checkpoints.TableCheckpointDiff)
ws := waiters
waiters = nil
lock.Unlock()
//nolint:scopelint // This would be either INLINED or ERASED, at compile time.
failpoint.Inject("SlowDownCheckpointUpdate", func() {})
if len(cpd) > 0 {
err := rc.checkpointsDB.Update(cpd)
for _, w := range ws {
w <- err
}
web.BroadcastCheckpointDiff(cpd)
}
rc.checkpointsWg.Done()
}
}()
for scp := range rc.saveCpCh {
lock.Lock()
cpd, ok := coalesed[scp.tableName]
if !ok {
cpd = checkpoints.NewTableCheckpointDiff()
coalesed[scp.tableName] = cpd
}
scp.merger.MergeInto(cpd)
if scp.waitCh != nil {
waiters = append(waiters, scp.waitCh)
}
if len(hasCheckpoint) == 0 {
rc.checkpointsWg.Add(1)
hasCheckpoint <- struct{}{}
}
lock.Unlock()
//nolint:scopelint // This would be either INLINED or ERASED, at compile time.
failpoint.Inject("FailIfImportedChunk", func(val failpoint.Value) {
if merger, ok := scp.merger.(*checkpoints.ChunkCheckpointMerger); ok && merger.Checksum.SumKVS() >= uint64(val.(int)) {
rc.checkpointsWg.Done()
rc.checkpointsWg.Wait()
panic("forcing failure due to FailIfImportedChunk")
}
})
//nolint:scopelint // This would be either INLINED or ERASED, at compile time.
failpoint.Inject("FailIfStatusBecomes", func(val failpoint.Value) {
if merger, ok := scp.merger.(*checkpoints.StatusCheckpointMerger); ok && merger.EngineID >= 0 && int(merger.Status) == val.(int) {
rc.checkpointsWg.Done()
rc.checkpointsWg.Wait()
panic("forcing failure due to FailIfStatusBecomes")
}
})
//nolint:scopelint // This would be either INLINED or ERASED, at compile time.
failpoint.Inject("FailIfIndexEngineImported", func(val failpoint.Value) {
if merger, ok := scp.merger.(*checkpoints.StatusCheckpointMerger); ok &&
merger.EngineID == checkpoints.WholeTableEngineID &&
merger.Status == checkpoints.CheckpointStatusIndexImported && val.(int) > 0 {
rc.checkpointsWg.Done()
rc.checkpointsWg.Wait()
panic("forcing failure due to FailIfIndexEngineImported")
}
})
//nolint:scopelint // This would be either INLINED or ERASED, at compile time.
failpoint.Inject("KillIfImportedChunk", func(val failpoint.Value) {
if merger, ok := scp.merger.(*checkpoints.ChunkCheckpointMerger); ok && merger.Checksum.SumKVS() >= uint64(val.(int)) {
if err := common.KillMySelf(); err != nil {
log.L().Warn("KillMySelf() failed to kill itself", log.ShortError(err))
}
}
})
}
rc.checkpointsWg.Done()
}
// buildRunPeriodicActionAndCancelFunc build the runPeriodicAction func and a cancel func
func (rc *Controller) buildRunPeriodicActionAndCancelFunc(ctx context.Context, stop <-chan struct{}) (func(), func(bool)) {
cancelFuncs := make([]func(bool), 0)
closeFuncs := make([]func(), 0)
// a nil channel blocks forever.
// if the cron duration is zero we use the nil channel to skip the action.
var logProgressChan <-chan time.Time
if rc.cfg.Cron.LogProgress.Duration > 0 {
logProgressTicker := time.NewTicker(rc.cfg.Cron.LogProgress.Duration)
closeFuncs = append(closeFuncs, func() {
logProgressTicker.Stop()
})
logProgressChan = logProgressTicker.C
}
glueProgressTicker := time.NewTicker(3 * time.Second)
closeFuncs = append(closeFuncs, func() {
glueProgressTicker.Stop()
})
var switchModeChan <-chan time.Time
// tidb backend don't need to switch tikv to import mode
if rc.cfg.TikvImporter.Backend != config.BackendTiDB && rc.cfg.Cron.SwitchMode.Duration > 0 {
switchModeTicker := time.NewTicker(rc.cfg.Cron.SwitchMode.Duration)
cancelFuncs = append(cancelFuncs, func(bool) { switchModeTicker.Stop() })
cancelFuncs = append(cancelFuncs, func(do bool) {
if do {
rc.switchToNormalMode(ctx)
}
})
switchModeChan = switchModeTicker.C
}
var checkQuotaChan <-chan time.Time
// only local storage has disk quota concern.
if rc.cfg.TikvImporter.Backend == config.BackendLocal && rc.cfg.Cron.CheckDiskQuota.Duration > 0 {
checkQuotaTicker := time.NewTicker(rc.cfg.Cron.CheckDiskQuota.Duration)
cancelFuncs = append(cancelFuncs, func(bool) { checkQuotaTicker.Stop() })
checkQuotaChan = checkQuotaTicker.C
}
return func() {
defer func() {
for _, f := range closeFuncs {
f()
}
}()
if rc.cfg.Cron.SwitchMode.Duration > 0 {
rc.switchToImportMode(ctx)
}
start := time.Now()
for {
select {
case <-ctx.Done():
log.L().Warn("stopping periodic actions", log.ShortError(ctx.Err()))
return
case <-stop:
log.L().Info("everything imported, stopping periodic actions")
return
case <-switchModeChan:
// periodically switch to import mode, as requested by TiKV 3.0
rc.switchToImportMode(ctx)
case <-logProgressChan:
// log the current progress periodically, so OPS will know that we're still working
nanoseconds := float64(time.Since(start).Nanoseconds())
// the estimated chunk is not accurate(likely under estimated), but the actual count is not accurate
// before the last table start, so use the bigger of the two should be a workaround
estimated := metric.ReadCounter(metric.ChunkCounter.WithLabelValues(metric.ChunkStateEstimated))
pending := metric.ReadCounter(metric.ChunkCounter.WithLabelValues(metric.ChunkStatePending))
if estimated < pending {
estimated = pending
}
finished := metric.ReadCounter(metric.ChunkCounter.WithLabelValues(metric.ChunkStateFinished))
totalTables := metric.ReadCounter(metric.TableCounter.WithLabelValues(metric.TableStatePending, metric.TableResultSuccess))
completedTables := metric.ReadCounter(metric.TableCounter.WithLabelValues(metric.TableStateCompleted, metric.TableResultSuccess))
bytesRead := metric.ReadHistogramSum(metric.RowReadBytesHistogram)
engineEstimated := metric.ReadCounter(metric.ProcessedEngineCounter.WithLabelValues(metric.ChunkStateEstimated, metric.TableResultSuccess))
enginePending := metric.ReadCounter(metric.ProcessedEngineCounter.WithLabelValues(metric.ChunkStatePending, metric.TableResultSuccess))
if engineEstimated < enginePending {
engineEstimated = enginePending
}
engineFinished := metric.ReadCounter(metric.ProcessedEngineCounter.WithLabelValues(metric.TableStateImported, metric.TableResultSuccess))
bytesWritten := metric.ReadCounter(metric.BytesCounter.WithLabelValues(metric.TableStateWritten))
bytesImported := metric.ReadCounter(metric.BytesCounter.WithLabelValues(metric.TableStateImported))
var state string
var remaining zap.Field
switch {
case finished >= estimated:
if engineFinished < engineEstimated {
state = "importing"
} else {
state = "post-processing"
}
case finished > 0:
state = "writing"
default:
state = "preparing"
}
// since we can't accurately estimate the extra time cost by import after all writing are finished,
// so here we use estimatedWritingProgress * 0.8 + estimatedImportingProgress * 0.2 as the total
// progress.
remaining = zap.Skip()
totalPercent := 0.0
if finished > 0 {
writePercent := math.Min(finished/estimated, 1.0)
importPercent := 1.0
if bytesWritten > 0 {
totalBytes := bytesWritten / writePercent
importPercent = math.Min(bytesImported/totalBytes, 1.0)
}
totalPercent = writePercent*0.8 + importPercent*0.2
if totalPercent < 1.0 {
remainNanoseconds := (1.0 - totalPercent) / totalPercent * nanoseconds
remaining = zap.Duration("remaining", time.Duration(remainNanoseconds).Round(time.Second))
}
}
formatPercent := func(finish, estimate float64) string {
speed := ""
if estimated > 0 {
speed = fmt.Sprintf(" (%.1f%%)", finish/estimate*100)
}
return speed
}
// avoid output bytes speed if there are no unfinished chunks
chunkSpeed := zap.Skip()
if bytesRead > 0 {
chunkSpeed = zap.Float64("speed(MiB/s)", bytesRead/(1048576e-9*nanoseconds))
}
// Note: a speed of 28 MiB/s roughly corresponds to 100 GiB/hour.
log.L().Info("progress",
zap.String("total", fmt.Sprintf("%.1f%%", totalPercent*100)),
// zap.String("files", fmt.Sprintf("%.0f/%.0f (%.1f%%)", finished, estimated, finished/estimated*100)),
zap.String("tables", fmt.Sprintf("%.0f/%.0f%s", completedTables, totalTables, formatPercent(completedTables, totalTables))),
zap.String("chunks", fmt.Sprintf("%.0f/%.0f%s", finished, estimated, formatPercent(finished, estimated))),
zap.String("engines", fmt.Sprintf("%.f/%.f%s", engineFinished, engineEstimated, formatPercent(engineFinished, engineEstimated))),
chunkSpeed,
zap.String("state", state),
remaining,
)
case <-checkQuotaChan:
// verify the total space occupied by sorted-kv-dir is below the quota,
// otherwise we perform an emergency import.
rc.enforceDiskQuota(ctx)
case <-glueProgressTicker.C:
finished := metric.ReadCounter(metric.ChunkCounter.WithLabelValues(metric.ChunkStateFinished))
rc.tidbGlue.Record(glue.RecordFinishedChunk, uint64(finished))
}
}
}, func(do bool) {
log.L().Info("cancel periodic actions", zap.Bool("do", do))
for _, f := range cancelFuncs {
f(do)
}
}
}
var checksumManagerKey struct{}
const (
pauseGCTTLForDupeRes = time.Hour
pauseGCIntervalForDupeRes = time.Minute
)
func (rc *Controller) keepPauseGCForDupeRes(ctx context.Context) (<-chan struct{}, error) {
tlsOpt := rc.tls.ToPDSecurityOption()
pdCli, err := pd.NewClientWithContext(ctx, []string{rc.cfg.TiDB.PdAddr}, tlsOpt)
if err != nil {
return nil, errors.Trace(err)
}
serviceID := "lightning-duplicate-resolution-" + uuid.New().String()
ttl := int64(pauseGCTTLForDupeRes / time.Second)
var (
safePoint uint64
paused bool
)
// Try to get the minimum safe point across all services as our GC safe point.
for i := 0; i < 10; i++ {
if i > 0 {
time.Sleep(time.Second * 3)
}
minSafePoint, err := pdCli.UpdateServiceGCSafePoint(ctx, serviceID, ttl, 1)
if err != nil {
pdCli.Close()
return nil, errors.Trace(err)
}
newMinSafePoint, err := pdCli.UpdateServiceGCSafePoint(ctx, serviceID, ttl, minSafePoint)
if err != nil {
pdCli.Close()
return nil, errors.Trace(err)
}
if newMinSafePoint <= minSafePoint {
safePoint = minSafePoint
paused = true
break
}
log.L().Warn(
"Failed to register GC safe point because the current minimum safe point is newer"+
" than what we assume, will retry newMinSafePoint next time",
zap.Uint64("minSafePoint", minSafePoint),
zap.Uint64("newMinSafePoint", newMinSafePoint),
)
}
if !paused {
pdCli.Close()
return nil, errors.New("failed to pause GC for duplicate resolution after all retries")
}
exitCh := make(chan struct{})
go func(safePoint uint64) {
defer pdCli.Close()
defer close(exitCh)
ticker := time.NewTicker(pauseGCIntervalForDupeRes)
defer ticker.Stop()
for {
select {
case <-ticker.C:
minSafePoint, err := pdCli.UpdateServiceGCSafePoint(ctx, serviceID, ttl, safePoint)
if err != nil {
log.L().Warn("Failed to register GC safe point", zap.Error(err))
continue
}
if minSafePoint > safePoint {
log.L().Warn("The current minimum safe point is newer than what we hold, duplicate records are at"+
"risk of being GC and not detectable",
zap.Uint64("safePoint", safePoint),
zap.Uint64("minSafePoint", minSafePoint),
)
safePoint = minSafePoint
}
case <-ctx.Done():
stopCtx, cancelFunc := context.WithTimeout(context.Background(), time.Second*5)
if _, err := pdCli.UpdateServiceGCSafePoint(stopCtx, serviceID, 0, safePoint); err != nil {
log.L().Warn("Failed to reset safe point ttl to zero", zap.Error(err))
}
// just make compiler happy
cancelFunc()
return
}
}
}(safePoint)
return exitCh, nil
}
func (rc *Controller) restoreTables(ctx context.Context) error {
if rc.cfg.TikvImporter.DuplicateResolution != config.DupeResAlgNone {
subCtx, cancel := context.WithCancel(ctx)
exitCh, err := rc.keepPauseGCForDupeRes(subCtx)
if err != nil {
cancel()
return errors.Trace(err)
}
defer func() {
cancel()
<-exitCh
}()
}
logTask := log.L().Begin(zap.InfoLevel, "restore all tables data")
if rc.tableWorkers == nil {
rc.tableWorkers = worker.NewPool(ctx, rc.cfg.App.TableConcurrency, "table")
}
if rc.indexWorkers == nil {
rc.indexWorkers = worker.NewPool(ctx, rc.cfg.App.IndexConcurrency, "index")
}
// for local backend, we should disable some pd scheduler and change some settings, to
// make split region and ingest sst more stable
// because importer backend is mostly use for v3.x cluster which doesn't support these api,
// so we also don't do this for import backend
finishSchedulers := func() {}
// if one lightning failed abnormally, and can't determine whether it needs to switch back,
// we do not do switch back automatically
cleanupFunc := func() {}
switchBack := false
taskFinished := false
if rc.cfg.TikvImporter.Backend == config.BackendLocal {
logTask.Info("removing PD leader®ion schedulers")
restoreFn, err := rc.taskMgr.CheckAndPausePdSchedulers(ctx)
finishSchedulers = func() {
if restoreFn != nil {
// use context.Background to make sure this restore function can still be executed even if ctx is canceled
restoreCtx := context.Background()
needSwitchBack, needCleanup, err := rc.taskMgr.CheckAndFinishRestore(restoreCtx, taskFinished)
if err != nil {
logTask.Warn("check restore pd schedulers failed", zap.Error(err))
return
}
switchBack = needSwitchBack
if needSwitchBack {
if restoreE := restoreFn(restoreCtx); restoreE != nil {
logTask.Warn("failed to restore removed schedulers, you may need to restore them manually", zap.Error(restoreE))
}
logTask.Info("add back PD leader®ion schedulers")
// clean up task metas
if needCleanup {
logTask.Info("cleanup task metas")
if cleanupErr := rc.taskMgr.Cleanup(restoreCtx); cleanupErr != nil {
logTask.Warn("failed to clean task metas, you may need to restore them manually", zap.Error(cleanupErr))
}
// cleanup table meta and schema db if needed.
cleanupFunc = func() {
if e := rc.taskMgr.CleanupAllMetas(restoreCtx); err != nil {
logTask.Warn("failed to clean table task metas, you may need to restore them manually", zap.Error(e))
}
}
}
}
}
rc.taskMgr.Close()
}
if err != nil {
return errors.Trace(err)
}
}
defer func() {
if switchBack {
cleanupFunc()
}
}()
type task struct {
tr *TableRestore
cp *checkpoints.TableCheckpoint
}
totalTables := 0
for _, dbMeta := range rc.dbMetas {
totalTables += len(dbMeta.Tables)
}
postProcessTaskChan := make(chan task, totalTables)
var wg sync.WaitGroup
var restoreErr common.OnceError
stopPeriodicActions := make(chan struct{})
periodicActions, cancelFunc := rc.buildRunPeriodicActionAndCancelFunc(ctx, stopPeriodicActions)
go periodicActions()
finishFuncCalled := false
defer func() {
if !finishFuncCalled {
finishSchedulers()
cancelFunc(switchBack)
finishFuncCalled = true
}
}()
defer close(stopPeriodicActions)
taskCh := make(chan task, rc.cfg.App.IndexConcurrency)
defer close(taskCh)
manager, err := newChecksumManager(ctx, rc)
if err != nil {
return errors.Trace(err)
}
ctx2 := context.WithValue(ctx, &checksumManagerKey, manager)
for i := 0; i < rc.cfg.App.IndexConcurrency; i++ {
go func() {
for task := range taskCh {
tableLogTask := task.tr.logger.Begin(zap.InfoLevel, "restore table")
web.BroadcastTableCheckpoint(task.tr.tableName, task.cp)
needPostProcess, err := task.tr.restoreTable(ctx2, rc, task.cp)
err = errors.Annotatef(err, "restore table %s failed", task.tr.tableName)
tableLogTask.End(zap.ErrorLevel, err)
web.BroadcastError(task.tr.tableName, err)
metric.RecordTableCount("completed", err)
restoreErr.Set(err)
if needPostProcess {
postProcessTaskChan <- task
}
wg.Done()
}
}()
}
for _, dbMeta := range rc.dbMetas {
dbInfo := rc.dbInfos[dbMeta.Name]
for _, tableMeta := range dbMeta.Tables {
tableInfo := dbInfo.Tables[tableMeta.Name]
tableName := common.UniqueTable(dbInfo.Name, tableInfo.Name)
cp, err := rc.checkpointsDB.Get(ctx, tableName)
if err != nil {
return errors.Trace(err)
}
igCols, err := rc.cfg.Mydumper.IgnoreColumns.GetIgnoreColumns(dbInfo.Name, tableInfo.Name, rc.cfg.Mydumper.CaseSensitive)
if err != nil {
return errors.Trace(err)
}
tr, err := NewTableRestore(tableName, tableMeta, dbInfo, tableInfo, cp, igCols.Columns)
if err != nil {
return errors.Trace(err)
}
wg.Add(1)
select {
case taskCh <- task{tr: tr, cp: cp}:
case <-ctx.Done():
return ctx.Err()
}
}
}
wg.Wait()
// if context is done, should return directly
select {
case <-ctx.Done():
err = restoreErr.Get()
if err == nil {
err = ctx.Err()
}
logTask.End(zap.ErrorLevel, err)
return err
default:
}
// stop periodic tasks for restore table such as pd schedulers and switch-mode tasks.
// this can help make cluster switching back to normal state more quickly.
// finishSchedulers()
// cancelFunc(switchBack)
// finishFuncCalled = true
taskFinished = true
close(postProcessTaskChan)
// otherwise, we should run all tasks in the post-process task chan
for i := 0; i < rc.cfg.App.TableConcurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for task := range postProcessTaskChan {
metaMgr := rc.metaMgrBuilder.TableMetaMgr(task.tr)
// force all the remain post-process tasks to be executed
_, err = task.tr.postProcess(ctx2, rc, task.cp, true, metaMgr)
restoreErr.Set(err)
}
}()
}
wg.Wait()
err = restoreErr.Get()
logTask.End(zap.ErrorLevel, err)
return err
}
func (tr *TableRestore) restoreTable(
ctx context.Context,
rc *Controller,
cp *checkpoints.TableCheckpoint,
) (bool, error) {
// 1. Load the table info.
select {
case <-ctx.Done():
return false, ctx.Err()
default:
}
metaMgr := rc.metaMgrBuilder.TableMetaMgr(tr)
// no need to do anything if the chunks are already populated
if len(cp.Engines) > 0 {
tr.logger.Info("reusing engines and files info from checkpoint",
zap.Int("enginesCnt", len(cp.Engines)),
zap.Int("filesCnt", cp.CountChunks()),
)
} else if cp.Status < checkpoints.CheckpointStatusAllWritten {
if err := tr.populateChunks(ctx, rc, cp); err != nil {
return false, errors.Trace(err)
}
// fetch the max chunk row_id max value as the global max row_id
rowIDMax := int64(0)
for _, engine := range cp.Engines {
if len(engine.Chunks) > 0 && engine.Chunks[len(engine.Chunks)-1].Chunk.RowIDMax > rowIDMax {
rowIDMax = engine.Chunks[len(engine.Chunks)-1].Chunk.RowIDMax
}
}
db, _ := rc.tidbGlue.GetDB()
versionStr, err := version.FetchVersion(ctx, db)
if err != nil {
return false, errors.Trace(err)
}
versionInfo := version.ParseServerInfo(versionStr)
// "show table next_row_id" is only available after tidb v4.0.0
if versionInfo.ServerVersion.Major >= 4 &&
(rc.cfg.TikvImporter.Backend == config.BackendLocal || rc.cfg.TikvImporter.Backend == config.BackendImporter) {
// first, insert a new-line into meta table
if err = metaMgr.InitTableMeta(ctx); err != nil {
return false, err
}
checksum, rowIDBase, err := metaMgr.AllocTableRowIDs(ctx, rowIDMax)
if err != nil {
return false, err
}
tr.RebaseChunkRowIDs(cp, rowIDBase)
if checksum != nil {
if cp.Checksum != *checksum {
cp.Checksum = *checksum
rc.saveCpCh <- saveCp{
tableName: tr.tableName,
merger: &checkpoints.TableChecksumMerger{
Checksum: cp.Checksum,
},
}
}
tr.logger.Info("checksum before restore table", zap.Object("checksum", &cp.Checksum))
}
}
if err := rc.checkpointsDB.InsertEngineCheckpoints(ctx, tr.tableName, cp.Engines); err != nil {
return false, errors.Trace(err)
}
web.BroadcastTableCheckpoint(tr.tableName, cp)
// rebase the allocator so it exceeds the number of rows.
if tr.tableInfo.Core.PKIsHandle && tr.tableInfo.Core.ContainsAutoRandomBits() {
cp.AllocBase = mathutil.MaxInt64(cp.AllocBase, tr.tableInfo.Core.AutoRandID)
if err := tr.alloc.Get(autoid.AutoRandomType).Rebase(context.Background(), cp.AllocBase, false); err != nil {
return false, err
}
} else {
cp.AllocBase = mathutil.MaxInt64(cp.AllocBase, tr.tableInfo.Core.AutoIncID)
if err := tr.alloc.Get(autoid.RowIDAllocType).Rebase(context.Background(), cp.AllocBase, false); err != nil {
return false, err
}
}
rc.saveCpCh <- saveCp{
tableName: tr.tableName,
merger: &checkpoints.RebaseCheckpointMerger{
AllocBase: cp.AllocBase,
},
}
}
// 2. Restore engines (if still needed)
err := tr.restoreEngines(ctx, rc, cp)
if err != nil {
return false, errors.Trace(err)
}
err = metaMgr.UpdateTableStatus(ctx, metaStatusRestoreFinished)
if err != nil {
return false, errors.Trace(err)
}
// 3. Post-process. With the last parameter set to false, we can allow delay analyze execute latter
return tr.postProcess(ctx, rc, cp, false /* force-analyze */, metaMgr)
}
// do full compaction for the whole data.
func (rc *Controller) fullCompact(ctx context.Context) error {
if !rc.cfg.PostRestore.Compact {
log.L().Info("skip full compaction")
return nil
}
// wait until any existing level-1 compact to complete first.
task := log.L().Begin(zap.InfoLevel, "wait for completion of existing level 1 compaction")
for !rc.compactState.CAS(compactStateIdle, compactStateDoing) {
time.Sleep(100 * time.Millisecond)
}
task.End(zap.ErrorLevel, nil)
return errors.Trace(rc.doCompact(ctx, FullLevelCompact))
}
func (rc *Controller) doCompact(ctx context.Context, level int32) error {
tls := rc.tls.WithHost(rc.cfg.TiDB.PdAddr)
return tikv.ForAllStores(
ctx,
tls,
tikv.StoreStateDisconnected,
func(c context.Context, store *tikv.Store) error {
return tikv.Compact(c, tls, store.Address, level)
},
)
}
func (rc *Controller) switchToImportMode(ctx context.Context) {
log.L().Info("switch to import mode")
rc.switchTiKVMode(ctx, sstpb.SwitchMode_Import)
}
func (rc *Controller) switchToNormalMode(ctx context.Context) {
log.L().Info("switch to normal mode")
rc.switchTiKVMode(ctx, sstpb.SwitchMode_Normal)
}
func (rc *Controller) switchTiKVMode(ctx context.Context, mode sstpb.SwitchMode) {
// // tidb backend don't need to switch tikv to import mode
if rc.isTiDBBackend() {
return
}
// It is fine if we miss some stores which did not switch to Import mode,
// since we're running it periodically, so we exclude disconnected stores.
// But it is essential all stores be switched back to Normal mode to allow
// normal operation.
var minState tikv.StoreState
if mode == sstpb.SwitchMode_Import {
minState = tikv.StoreStateOffline
} else {
minState = tikv.StoreStateDisconnected
}
tls := rc.tls.WithHost(rc.cfg.TiDB.PdAddr)
// we ignore switch mode failure since it is not fatal.
// no need log the error, it is done in kv.SwitchMode already.
_ = tikv.ForAllStores(
ctx,
tls,
minState,
func(c context.Context, store *tikv.Store) error {
return tikv.SwitchMode(c, tls, store.Address, mode)
},
)
}
func (rc *Controller) enforceDiskQuota(ctx context.Context) {
if !rc.diskQuotaState.CAS(diskQuotaStateIdle, diskQuotaStateChecking) {
// do not run multiple the disk quota check / import simultaneously.
// (we execute the lock check in background to avoid blocking the cron thread)
return
}
go func() {
// locker is assigned when we detect the disk quota is exceeded.
// before the disk quota is confirmed exceeded, we keep the diskQuotaLock
// unlocked to avoid periodically interrupting the writer threads.
var locker sync.Locker
defer func() {
rc.diskQuotaState.Store(diskQuotaStateIdle)
if locker != nil {
locker.Unlock()
}
}()
isRetrying := false
for {
// sleep for a cycle if we are retrying because there is nothing new to import.
if isRetrying {
select {
case <-ctx.Done():
return
case <-time.After(rc.cfg.Cron.CheckDiskQuota.Duration):
}
} else {
isRetrying = true
}
quota := int64(rc.cfg.TikvImporter.DiskQuota)
largeEngines, inProgressLargeEngines, totalDiskSize, totalMemSize := rc.backend.CheckDiskQuota(quota)
metric.LocalStorageUsageBytesGauge.WithLabelValues("disk").Set(float64(totalDiskSize))
metric.LocalStorageUsageBytesGauge.WithLabelValues("mem").Set(float64(totalMemSize))
logger := log.With(
zap.Int64("diskSize", totalDiskSize),
zap.Int64("memSize", totalMemSize),
zap.Int64("quota", quota),
zap.Int("largeEnginesCount", len(largeEngines)),
zap.Int("inProgressLargeEnginesCount", inProgressLargeEngines))
if len(largeEngines) == 0 && inProgressLargeEngines == 0 {
logger.Debug("disk quota respected")
return
}
if locker == nil {
// blocks all writers when we detected disk quota being exceeded.
rc.diskQuotaLock.Lock()
locker = rc.diskQuotaLock
}
logger.Warn("disk quota exceeded")
if len(largeEngines) == 0 {
logger.Warn("all large engines are already importing, keep blocking all writes")
continue
}
// flush all engines so that checkpoints can be updated.
if err := rc.backend.FlushAll(ctx); err != nil {
logger.Error("flush engine for disk quota failed, check again later", log.ShortError(err))
return
}
// at this point, all engines are synchronized on disk.
// we then import every large engines one by one and complete.
// if any engine failed to import, we just try again next time, since the data are still intact.
rc.diskQuotaState.Store(diskQuotaStateImporting)
task := logger.Begin(zap.WarnLevel, "importing large engines for disk quota")
var importErr error
for _, engine := range largeEngines {
// Use a larger split region size to avoid split the same region by many times.
if err := rc.backend.UnsafeImportAndReset(ctx, engine, int64(config.SplitRegionSize)*int64(config.MaxSplitRegionSizeRatio)); err != nil {
importErr = multierr.Append(importErr, err)
}
}
task.End(zap.ErrorLevel, importErr)
return
}
}()
}
func (rc *Controller) setGlobalVariables(ctx context.Context) error {
// skip for tidb backend to be compatible with MySQL
if rc.isTiDBBackend() {
return nil
}
// set new collation flag base on tidb config
enabled := ObtainNewCollationEnabled(ctx, rc.tidbGlue.GetSQLExecutor())
// we should enable/disable new collation here since in server mode, tidb config
// may be different in different tasks
collate.SetNewCollationEnabledForTest(enabled)
return nil
}
func (rc *Controller) waitCheckpointFinish() {
// wait checkpoint process finish so that we can do cleanup safely
close(rc.saveCpCh)
rc.checkpointsWg.Wait()
}
func (rc *Controller) cleanCheckpoints(ctx context.Context) error {
rc.waitCheckpointFinish()
if !rc.cfg.Checkpoint.Enable {
return nil
}
logger := log.With(
zap.Stringer("keepAfterSuccess", rc.cfg.Checkpoint.KeepAfterSuccess),
zap.Int64("taskID", rc.cfg.TaskID),
)
task := logger.Begin(zap.InfoLevel, "clean checkpoints")
var err error
switch rc.cfg.Checkpoint.KeepAfterSuccess {
case config.CheckpointRename:
err = rc.checkpointsDB.MoveCheckpoints(ctx, rc.cfg.TaskID)
case config.CheckpointRemove:
err = rc.checkpointsDB.RemoveCheckpoint(ctx, "all")
}
task.End(zap.ErrorLevel, err)
return errors.Annotate(err, "clean checkpoints")
}
func (rc *Controller) isLocalBackend() bool {
return rc.cfg.TikvImporter.Backend == config.BackendLocal
}
func (rc *Controller) isTiDBBackend() bool {
return rc.cfg.TikvImporter.Backend == config.BackendTiDB
}
// preCheckRequirements checks
// 1. Cluster resource
// 2. Local node resource
// 3. Cluster region
// 4. Lightning configuration
// before restore tables start.
func (rc *Controller) preCheckRequirements(ctx context.Context) error {
if rc.cfg.App.CheckRequirements {
if err := rc.ClusterIsAvailable(ctx); err != nil {
return errors.Trace(err)
}
if err := rc.StoragePermission(ctx); err != nil {
return errors.Trace(err)
}
}
if err := rc.metaMgrBuilder.Init(ctx); err != nil {
return err
}
taskExist := false
// We still need to sample source data even if this task has existed, because we need to judge whether the
// source is in order as row key to decide how to sort local data.
source, err := rc.estimateSourceData(ctx)
if err != nil {
return errors.Trace(err)
}
if rc.isLocalBackend() {
pdController, err := pdutil.NewPdController(ctx, rc.cfg.TiDB.PdAddr,
rc.tls.TLSConfig(), rc.tls.ToPDSecurityOption())
if err != nil {
return errors.Trace(err)
}
// PdController will be closed when `taskMetaMgr` closes.
rc.taskMgr = rc.metaMgrBuilder.TaskMetaMgr(pdController)
taskExist, err = rc.taskMgr.CheckTaskExist(ctx)
if err != nil {
return errors.Trace(err)
}
if !taskExist {
if err = rc.taskMgr.InitTask(ctx, source); err != nil {
return errors.Trace(err)
}
if rc.cfg.App.CheckRequirements {
err = rc.localResource(source)
if err != nil {
return errors.Trace(err)
}
if err := rc.clusterResource(ctx, source); err != nil {
rc.taskMgr.CleanupTask(ctx)
return errors.Trace(err)
}
if err := rc.checkClusterRegion(ctx); err != nil {
return errors.Trace(err)
}
}
}
}
if rc.tidbGlue.OwnsSQLExecutor() && rc.cfg.App.CheckRequirements {
fmt.Print(rc.checkTemplate.Output())
}
if !rc.checkTemplate.Success() {
if !taskExist && rc.taskMgr != nil {
rc.taskMgr.CleanupTask(ctx)
}
return errors.Errorf("tidb-lightning check failed."+
" Please fix the failed check(s):\n %s", rc.checkTemplate.FailedMsg())
}
return nil
}
// DataCheck checks the data schema which needs #rc.restoreSchema finished.
func (rc *Controller) DataCheck(ctx context.Context) error {
var err error
if rc.cfg.App.CheckRequirements {
err = rc.HasLargeCSV(rc.dbMetas)
if err != nil {
return errors.Trace(err)
}
}
checkPointCriticalMsgs := make([]string, 0, len(rc.dbMetas))
schemaCriticalMsgs := make([]string, 0, len(rc.dbMetas))
var msgs []string
for _, dbInfo := range rc.dbMetas {
for _, tableInfo := range dbInfo.Tables {
// if hasCheckpoint is true, the table will start import from the checkpoint
// so we can skip TableHasDataInCluster and SchemaIsValid check.
noCheckpoint := true
if rc.cfg.Checkpoint.Enable {
if msgs, noCheckpoint, err = rc.CheckpointIsValid(ctx, tableInfo); err != nil {
return errors.Trace(err)
}
if len(msgs) != 0 {
checkPointCriticalMsgs = append(checkPointCriticalMsgs, msgs...)
}
}
if rc.cfg.App.CheckRequirements && noCheckpoint && rc.cfg.TikvImporter.Backend != config.BackendTiDB {
if msgs, err = rc.SchemaIsValid(ctx, tableInfo); err != nil {
return errors.Trace(err)
}
if len(msgs) != 0 {
schemaCriticalMsgs = append(schemaCriticalMsgs, msgs...)
}
}
}
}
err = rc.checkCSVHeader(ctx, rc.dbMetas)
if err != nil {
return err
}
if len(checkPointCriticalMsgs) != 0 {
rc.checkTemplate.Collect(Critical, false, strings.Join(checkPointCriticalMsgs, "\n"))
} else {
rc.checkTemplate.Collect(Critical, true, "checkpoints are valid")
}
if len(schemaCriticalMsgs) != 0 {
rc.checkTemplate.Collect(Critical, false, strings.Join(schemaCriticalMsgs, "\n"))
} else {
rc.checkTemplate.Collect(Critical, true, "table schemas are valid")
}
return nil
}
type chunkRestore struct {
parser mydump.Parser
index int
chunk *checkpoints.ChunkCheckpoint
}
func newChunkRestore(
ctx context.Context,
index int,
cfg *config.Config,
chunk *checkpoints.ChunkCheckpoint,
ioWorkers *worker.Pool,
store storage.ExternalStorage,
tableInfo *checkpoints.TidbTableInfo,
) (*chunkRestore, error) {
blockBufSize := int64(cfg.Mydumper.ReadBlockSize)
var reader storage.ReadSeekCloser
var err error
if chunk.FileMeta.Type == mydump.SourceTypeParquet {
reader, err = mydump.OpenParquetReader(ctx, store, chunk.FileMeta.Path, chunk.FileMeta.FileSize)
} else {
reader, err = store.Open(ctx, chunk.FileMeta.Path)
}
if err != nil {
return nil, errors.Trace(err)
}
var parser mydump.Parser
switch chunk.FileMeta.Type {
case mydump.SourceTypeCSV:
hasHeader := cfg.Mydumper.CSV.Header && chunk.Chunk.Offset == 0
// Create a utf8mb4 convertor to encode and decode data with the charset of CSV files.
charsetConvertor, err := mydump.NewCharsetConvertor(cfg.Mydumper.DataCharacterSet, cfg.Mydumper.DataInvalidCharReplace)
if err != nil {
return nil, err
}
parser, err = mydump.NewCSVParser(&cfg.Mydumper.CSV, reader, blockBufSize, ioWorkers, hasHeader, charsetConvertor)
if err != nil {
return nil, errors.Trace(err)
}
case mydump.SourceTypeSQL:
parser = mydump.NewChunkParser(cfg.TiDB.SQLMode, reader, blockBufSize, ioWorkers)
case mydump.SourceTypeParquet:
parser, err = mydump.NewParquetParser(ctx, store, reader, chunk.FileMeta.Path)
if err != nil {
return nil, errors.Trace(err)
}
default:
panic(fmt.Sprintf("file '%s' with unknown source type '%s'", chunk.Key.Path, chunk.FileMeta.Type.String()))
}
if err = parser.SetPos(chunk.Chunk.Offset, chunk.Chunk.PrevRowIDMax); err != nil {
return nil, errors.Trace(err)
}
if len(chunk.ColumnPermutation) > 0 {
parser.SetColumns(getColumnNames(tableInfo.Core, chunk.ColumnPermutation))
}
return &chunkRestore{
parser: parser,
index: index,
chunk: chunk,
}, nil
}
func (cr *chunkRestore) close() {
cr.parser.Close()
}
func getColumnNames(tableInfo *model.TableInfo, permutation []int) []string {
colIndexes := make([]int, 0, len(permutation))
for i := 0; i < len(permutation); i++ {
colIndexes = append(colIndexes, -1)
}
colCnt := 0
for i, p := range permutation {
if p >= 0 {
colIndexes[p] = i
colCnt++
}
}
names := make([]string, 0, colCnt)
for _, idx := range colIndexes {
// skip columns with index -1
if idx >= 0 {
// original fields contains _tidb_rowid field
if idx == len(tableInfo.Columns) {
names = append(names, model.ExtraHandleName.O)
} else {
names = append(names, tableInfo.Columns[idx].Name.O)
}
}
}
return names
}
var (
maxKVQueueSize = 32 // Cache at most this number of rows before blocking the encode loop
minDeliverBytes uint64 = 96 * units.KiB // 96 KB (data + index). batch at least this amount of bytes to reduce number of messages
)
type deliveredKVs struct {
kvs kv.Row // if kvs is nil, this indicated we've got the last message.
columns []string
offset int64
rowID int64
}
type deliverResult struct {
totalDur time.Duration
err error
}
//nolint:nakedret // TODO: refactor
func (cr *chunkRestore) deliverLoop(
ctx context.Context,
kvsCh <-chan []deliveredKVs,
t *TableRestore,
engineID int32,
dataEngine, indexEngine *backend.LocalEngineWriter,
rc *Controller,
) (deliverTotalDur time.Duration, err error) {
var channelClosed bool
deliverLogger := t.logger.With(
zap.Int32("engineNumber", engineID),
zap.Int("fileIndex", cr.index),
zap.Stringer("path", &cr.chunk.Key),
zap.String("task", "deliver"),
)
// Fetch enough KV pairs from the source.
dataKVs := rc.backend.MakeEmptyRows()
indexKVs := rc.backend.MakeEmptyRows()
dataSynced := true
for !channelClosed {
var dataChecksum, indexChecksum verify.KVChecksum
var columns []string
var kvPacket []deliveredKVs
// init these two field as checkpoint current value, so even if there are no kv pairs delivered,
// chunk checkpoint should stay the same
offset := cr.chunk.Chunk.Offset
rowID := cr.chunk.Chunk.PrevRowIDMax
populate:
for dataChecksum.SumSize()+indexChecksum.SumSize() < minDeliverBytes {
select {
case kvPacket = <-kvsCh:
if len(kvPacket) == 0 {
channelClosed = true
break populate
}
for _, p := range kvPacket {
p.kvs.ClassifyAndAppend(&dataKVs, &dataChecksum, &indexKVs, &indexChecksum)
columns = p.columns
offset = p.offset
rowID = p.rowID
}
case <-ctx.Done():
err = ctx.Err()
return
}
}
err = func() error {
// We use `TryRLock` with sleep here to avoid blocking current goroutine during importing when disk-quota is
// triggered, so that we can save chunkCheckpoint as soon as possible after `FlushEngine` is called.
// This implementation may not be very elegant or even completely correct, but it is currently a relatively
// simple and effective solution.
for !rc.diskQuotaLock.TryRLock() {
// try to update chunk checkpoint, this can help save checkpoint after importing when disk-quota is triggered
if !dataSynced {
dataSynced = cr.maybeSaveCheckpoint(rc, t, engineID, cr.chunk, dataEngine, indexEngine)
}
time.Sleep(time.Millisecond)
}
defer rc.diskQuotaLock.RUnlock()
// Write KVs into the engine
start := time.Now()
if err = dataEngine.WriteRows(ctx, columns, dataKVs); err != nil {
if !common.IsContextCanceledError(err) {
deliverLogger.Error("write to data engine failed", log.ShortError(err))
}
return errors.Trace(err)
}
if err = indexEngine.WriteRows(ctx, columns, indexKVs); err != nil {
if !common.IsContextCanceledError(err) {
deliverLogger.Error("write to index engine failed", log.ShortError(err))
}
return errors.Trace(err)
}
deliverDur := time.Since(start)
deliverTotalDur += deliverDur
metric.BlockDeliverSecondsHistogram.Observe(deliverDur.Seconds())
metric.BlockDeliverBytesHistogram.WithLabelValues(metric.BlockDeliverKindData).Observe(float64(dataChecksum.SumSize()))
metric.BlockDeliverBytesHistogram.WithLabelValues(metric.BlockDeliverKindIndex).Observe(float64(indexChecksum.SumSize()))
metric.BlockDeliverKVPairsHistogram.WithLabelValues(metric.BlockDeliverKindData).Observe(float64(dataChecksum.SumKVS()))
metric.BlockDeliverKVPairsHistogram.WithLabelValues(metric.BlockDeliverKindIndex).Observe(float64(indexChecksum.SumKVS()))
return nil
}()
if err != nil {
return
}
dataSynced = false
dataKVs = dataKVs.Clear()
indexKVs = indexKVs.Clear()
// Update the table, and save a checkpoint.
// (the write to the importer is effective immediately, thus update these here)
// No need to apply a lock since this is the only thread updating `cr.chunk.**`.
// In local mode, we should write these checkpoint after engine flushed.
cr.chunk.Checksum.Add(&dataChecksum)
cr.chunk.Checksum.Add(&indexChecksum)
cr.chunk.Chunk.Offset = offset
cr.chunk.Chunk.PrevRowIDMax = rowID
if dataChecksum.SumKVS() != 0 || indexChecksum.SumKVS() != 0 {
// No need to save checkpoint if nothing was delivered.
dataSynced = cr.maybeSaveCheckpoint(rc, t, engineID, cr.chunk, dataEngine, indexEngine)
}
failpoint.Inject("SlowDownWriteRows", func() {
deliverLogger.Warn("Slowed down write rows")
})
failpoint.Inject("FailAfterWriteRows", nil)
// TODO: for local backend, we may save checkpoint more frequently, e.g. after written
// 10GB kv pairs to data engine, we can do a flush for both data & index engine, then we
// can safely update current checkpoint.
failpoint.Inject("LocalBackendSaveCheckpoint", func() {
if !rc.isLocalBackend() && (dataChecksum.SumKVS() != 0 || indexChecksum.SumKVS() != 0) {
// No need to save checkpoint if nothing was delivered.
saveCheckpoint(rc, t, engineID, cr.chunk)
}
})
}
return
}
func (cr *chunkRestore) maybeSaveCheckpoint(
rc *Controller,
t *TableRestore,
engineID int32,
chunk *checkpoints.ChunkCheckpoint,
data, index *backend.LocalEngineWriter,
) bool {
if data.IsSynced() && index.IsSynced() {
saveCheckpoint(rc, t, engineID, chunk)
return true
}
return false
}
func saveCheckpoint(rc *Controller, t *TableRestore, engineID int32, chunk *checkpoints.ChunkCheckpoint) {
// We need to update the AllocBase every time we've finished a file.
// The AllocBase is determined by the maximum of the "handle" (_tidb_rowid
// or integer primary key), which can only be obtained by reading all data.
var base int64
if t.tableInfo.Core.PKIsHandle && t.tableInfo.Core.ContainsAutoRandomBits() {
base = t.alloc.Get(autoid.AutoRandomType).Base() + 1
} else {
base = t.alloc.Get(autoid.RowIDAllocType).Base() + 1
}
rc.saveCpCh <- saveCp{
tableName: t.tableName,
merger: &checkpoints.RebaseCheckpointMerger{
AllocBase: base,
},
}
rc.saveCpCh <- saveCp{
tableName: t.tableName,
merger: &checkpoints.ChunkCheckpointMerger{
EngineID: engineID,
Key: chunk.Key,
Checksum: chunk.Checksum,
Pos: chunk.Chunk.Offset,
RowID: chunk.Chunk.PrevRowIDMax,
ColumnPermutation: chunk.ColumnPermutation,
},
}
}
//nolint:nakedret // TODO: refactor
func (cr *chunkRestore) encodeLoop(
ctx context.Context,
kvsCh chan<- []deliveredKVs,
t *TableRestore,
logger log.Logger,
kvEncoder kv.Encoder,
deliverCompleteCh <-chan deliverResult,
rc *Controller,
) (readTotalDur time.Duration, encodeTotalDur time.Duration, err error) {
send := func(kvs []deliveredKVs) error {
select {
case kvsCh <- kvs:
return nil
case <-ctx.Done():
return ctx.Err()
case deliverResult, ok := <-deliverCompleteCh:
if deliverResult.err == nil && !ok {
deliverResult.err = ctx.Err()
}
if deliverResult.err == nil {
deliverResult.err = errors.New("unexpected premature fulfillment")
logger.DPanic("unexpected: deliverCompleteCh prematurely fulfilled with no error", zap.Bool("chIsOpen", ok))
}
return errors.Trace(deliverResult.err)
}
}
pauser, maxKvPairsCnt := rc.pauser, rc.cfg.TikvImporter.MaxKVPairs
initializedColumns, reachEOF := false, false
for !reachEOF {
if err = pauser.Wait(ctx); err != nil {
return
}
offset, _ := cr.parser.Pos()
if offset >= cr.chunk.Chunk.EndOffset {
break
}
var readDur, encodeDur time.Duration
canDeliver := false
kvPacket := make([]deliveredKVs, 0, maxKvPairsCnt)
curOffset := offset
var newOffset, rowID int64
var kvSize uint64
outLoop:
for !canDeliver {
readDurStart := time.Now()
err = cr.parser.ReadRow()
columnNames := cr.parser.Columns()
newOffset, rowID = cr.parser.Pos()
switch errors.Cause(err) {
case nil:
if !initializedColumns {
if len(cr.chunk.ColumnPermutation) == 0 {
if err = t.initializeColumns(columnNames, cr.chunk); err != nil {
return
}
}
initializedColumns = true
}
case io.EOF:
reachEOF = true
break outLoop
default:
err = errors.Annotatef(err, "in file %s at offset %d", &cr.chunk.Key, newOffset)
return
}
readDur += time.Since(readDurStart)
encodeDurStart := time.Now()
lastRow := cr.parser.LastRow()
// sql -> kv
kvs, encodeErr := kvEncoder.Encode(logger, lastRow.Row, lastRow.RowID, cr.chunk.ColumnPermutation, cr.chunk.Key.Path, curOffset)
encodeDur += time.Since(encodeDurStart)
hasIgnoredEncodeErr := false
if encodeErr != nil {
rowText := tidb.EncodeRowForRecord(t.encTable, rc.cfg.TiDB.SQLMode, lastRow.Row, cr.chunk.ColumnPermutation)
encodeErr = rc.errorMgr.RecordTypeError(ctx, logger, t.tableName, cr.chunk.Key.Path, newOffset, rowText, encodeErr)
err = errors.Annotatef(encodeErr, "in file %s at offset %d", &cr.chunk.Key, newOffset)
hasIgnoredEncodeErr = true
}
cr.parser.RecycleRow(lastRow)
curOffset = newOffset
if err != nil {
return
}
if hasIgnoredEncodeErr {
continue
}
kvPacket = append(kvPacket, deliveredKVs{kvs: kvs, columns: columnNames, offset: newOffset, rowID: rowID})
kvSize += kvs.Size()
failpoint.Inject("mock-kv-size", func(val failpoint.Value) {
kvSize += uint64(val.(int))
})
// pebble cannot allow > 4.0G kv in one batch.
// we will meet pebble panic when import sql file and each kv has the size larger than 4G / maxKvPairsCnt.
// so add this check.
if kvSize >= minDeliverBytes || len(kvPacket) >= maxKvPairsCnt || newOffset == cr.chunk.Chunk.EndOffset {
canDeliver = true
kvSize = 0
}
}
encodeTotalDur += encodeDur
metric.RowEncodeSecondsHistogram.Observe(encodeDur.Seconds())
readTotalDur += readDur
metric.RowReadSecondsHistogram.Observe(readDur.Seconds())
metric.RowReadBytesHistogram.Observe(float64(newOffset - offset))
if len(kvPacket) != 0 {
deliverKvStart := time.Now()
if err = send(kvPacket); err != nil {
return
}
metric.RowKVDeliverSecondsHistogram.Observe(time.Since(deliverKvStart).Seconds())
}
}
err = send([]deliveredKVs{})
return
}
func (cr *chunkRestore) restore(
ctx context.Context,
t *TableRestore,
engineID int32,
dataEngine, indexEngine *backend.LocalEngineWriter,
rc *Controller,
) error {
// Create the encoder.
kvEncoder, err := rc.backend.NewEncoder(t.encTable, &kv.SessionOptions{
SQLMode: rc.cfg.TiDB.SQLMode,
Timestamp: cr.chunk.Timestamp,
SysVars: rc.sysVars,
// use chunk.PrevRowIDMax as the auto random seed, so it can stay the same value after recover from checkpoint.
AutoRandomSeed: cr.chunk.Chunk.PrevRowIDMax,
})
if err != nil {
return err
}
kvsCh := make(chan []deliveredKVs, maxKVQueueSize)
deliverCompleteCh := make(chan deliverResult)
defer func() {
kvEncoder.Close()
kvEncoder = nil
close(kvsCh)
}()
go func() {
defer close(deliverCompleteCh)
dur, err := cr.deliverLoop(ctx, kvsCh, t, engineID, dataEngine, indexEngine, rc)
select {
case <-ctx.Done():
case deliverCompleteCh <- deliverResult{dur, err}:
}
}()
logTask := t.logger.With(
zap.Int32("engineNumber", engineID),
zap.Int("fileIndex", cr.index),
zap.Stringer("path", &cr.chunk.Key),
).Begin(zap.InfoLevel, "restore file")
readTotalDur, encodeTotalDur, err := cr.encodeLoop(ctx, kvsCh, t, logTask.Logger, kvEncoder, deliverCompleteCh, rc)
if err != nil {
return err
}
select {
case deliverResult, ok := <-deliverCompleteCh:
if ok {
logTask.End(zap.ErrorLevel, deliverResult.err,
zap.Duration("readDur", readTotalDur),
zap.Duration("encodeDur", encodeTotalDur),
zap.Duration("deliverDur", deliverResult.totalDur),
zap.Object("checksum", &cr.chunk.Checksum),
)
return errors.Trace(deliverResult.err)
}
// else, this must cause by ctx cancel
return ctx.Err()
case <-ctx.Done():
return ctx.Err()
}
}
| c4pt0r/tidb | br/pkg/lightning/restore/restore.go | GO | apache-2.0 | 75,726 |
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2016, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
define(function(require) {
/*
DEPENDENCIES
*/
// require('foundation.tab');
var BaseFormPanel = require('utils/form-panels/form-panel');
var Sunstone = require('sunstone');
var Locale = require('utils/locale');
//var Tips = require('utils/tips');
var TemplateUtils = require('utils/template-utils');
var WizardFields = require('utils/wizard-fields');
var RoleTab = require('tabs/vmgroup-tab/utils/role-tab');
var AffinityRoleTab = require('tabs/vmgroup-tab/utils/affinity-role-tab');
var Notifier = require('utils/notifier');
var Utils = require('../utils/common');
/*
TEMPLATES
*/
var TemplateWizardHTML = require('hbs!./create/wizard');
var TemplateAdvancedHTML = require('hbs!./create/advanced');
/*
CONSTANTS
*/
var FORM_PANEL_ID = require('./create/formPanelId');
var TAB_ID = require('../tabId');
/*
CONSTRUCTOR
*/
function FormPanel() {
this.formPanelId = FORM_PANEL_ID;
this.tabId = TAB_ID;
this.affinity_role_tab = new AffinityRoleTab([]);
this.actions = {
'create': {
'title': Locale.tr("Create Virtual Machine Group"),
'buttonText': Locale.tr("Create"),
'resetButton': true
},
'update': {
'title': Locale.tr("Update Virtual Machine Group"),
'buttonText': Locale.tr("Update"),
'resetButton': false
}
};
BaseFormPanel.call(this);
}
FormPanel.FORM_PANEL_ID = FORM_PANEL_ID;
FormPanel.prototype = Object.create(BaseFormPanel.prototype);
FormPanel.prototype.constructor = FormPanel;
FormPanel.prototype.htmlWizard = _htmlWizard;
FormPanel.prototype.htmlAdvanced = _htmlAdvanced;
FormPanel.prototype.submitWizard = _submitWizard;
FormPanel.prototype.submitAdvanced = _submitAdvanced;
FormPanel.prototype.onShow = _onShow;
FormPanel.prototype.fill = _fill;
FormPanel.prototype.setup = _setup;
FormPanel.prototype.addRoleTab = _add_role_tab;
return FormPanel;
/*
FUNCTION DEFINITIONS
*/
function _htmlWizard() {
var opts = {
info: false,
select: true
};
return TemplateWizardHTML({
'affinity-role-tab': this.affinity_role_tab.html(),
'formPanelId': this.formPanelId
});
}
function _htmlAdvanced() {
return TemplateAdvancedHTML({formPanelId: this.formPanelId});
}
function _setup(context) {
this.roleTabObjects = {};
var that = this;
var roles_index = 0;
this.affinity_role_tab.setup(context);
// Fill parents table
// Each time a tab is clicked the table is filled with existing tabs (roles)
// Selected roles are kept
// TODO If the name of a role is changed and is selected, selection will be lost
$("#roles_tabs", context).on("click", "a", function() {
var tab_id = "#"+this.id+"Tab";
var str = "";
$(tab_id+" .parent_roles").hide();
var parent_role_available = false;
$("#roles_tabs_content #role_name", context).each(function(){
if ($(this).val() != "" && ($(this).val() != $(tab_id+" #role_name", context).val())) {
parent_role_available = true;
str += "<tr>\
<td style='width:10%'>\
<input class='check_item' type='checkbox' value='"+$(this).val()+"' id='"+$(this).val()+"'/>\
</td>\
<td>"+$(this).val()+"</td>\
</tr>";
}
});
if (parent_role_available) {
$(tab_id+" .parent_roles", context).show();
}
var selected_parents = [];
$(tab_id+" .parent_roles_body input:checked", context).each(function(){
selected_parents.push($(this).val());
});
$(tab_id+" .parent_roles_body", context).html(str);
$.each(selected_parents, function(){
$(tab_id+" .parent_roles_body #"+this, context).attr('checked', true);
});
});
$("#tf_btn_roles", context).bind("click", function(){
that.addRoleTab(roles_index, context);
roles_index++;
return false;
});
/*$("#btn_refresh_roles", context).bind("click", function(){
$("#btn_refresh_roles", context).html("<i class='fa fa-angle-double-down'></i> "+Locale.tr("Refresh roles"));
that.affinity_role_tab.refresh(context, that.roleTabObjects);
});*/
//---------btn_group_vm_roles
Foundation.reflow(context, 'tabs');
// Add first role
$("#tf_btn_roles", context).trigger("click");
//Tips.setup();
return false;
}
function _submitWizard(context) {
that = this;
var name = WizardFields.retrieveInput($('#vm_group_name', context));
var description = WizardFields.retrieveInput($('#vm_group_description', context));
var role = [];
$('.role_content', context).each(function() {
var role_id = $(this).attr("role_id");
role.push(that.roleTabObjects[role_id].retrieve($(this)));
});
//call to role-tab.js for retrieve data
var roles_affinity = this.affinity_role_tab.retrieve(context);
var vm_group_json = {
"NAME" : name,
"DESCRIPTION": description,
"ROLE" : role,
};
vm_group_json = $.extend(vm_group_json, roles_affinity);
if (this.action == "create") {
vm_group_json = {
"vm_group" : vm_group_json
};
Sunstone.runAction("VMGroup.create",JSON.parse(JSON.stringify(vm_group_json)));
return false;
} else if (this.action == "update") {
delete vm_group_json["NAME"];
Sunstone.runAction(
"VMGroup.update",
this.resourceId,
TemplateUtils.templateToString(vm_group_json));
return false;
}
}
function _submitAdvanced(context) {
if (this.action == "create") {
var template = $('textarea#template', context).val();
var vm_group_json = {vm_group: {vm_group_raw: template}};
Sunstone.runAction("VMGroup.create",vm_group_json);
return false;
} else if (this.action == "update") {
var template_raw = $('textarea#template', context).val();
Sunstone.runAction("VMGroup.update_template", this.resourceId, template_raw);
return false;
}
}
function _onShow(context) {
var that = this;
$('.role_content', context).each(function() {
var role_id = $(this).attr("role_id");
that.roleTabObjects[role_id].onShow();
});
}
function _fill(context, element) {
$("#new_role", context)[0].parentElement.remove();
var that = this;
this.setHeader(element);
this.resourceId = element.ID;
$('#template', context).val(TemplateUtils.templateToString(element.TEMPLATE));
WizardFields.fillInput($('#vm_group_name',context), element.NAME);
$('#vm_group_name',context).prop("disabled", true);
WizardFields.fillInput($('#vm_group_description', context), element.TEMPLATE.DESCRIPTION );
//Remove row of roles-----------------------------------------------------------------
$.each(element.ROLES.ROLE, function(index, value){
var name = value.NAME;
if(name){
var html = "<option id='" + name + "' class='roles' value=" + name + "> " + name + "</option>";
$("#list_roles_select").append(html);
$("select #" + name).mousedown(function(e) {
e.preventDefault();
$(this).prop('selected', !$(this).prop('selected'));
return false;
});
}
});
this.affinity_role_tab.fill(context, element);
$("#btn_refresh_roles", context).remove();
$("#affinity",context).show();
//Remove row of roles------------------------------------------------------------------
/*var role_context_first = $('.role_content', context).first();
var role_id_first = $(role_context_first).attr("role_id");
delete that.roleTabObjects[role_id_first];
// Populates the Avanced mode Tab
var roles_names = [];
var data = [];
if(Array.isArray(element.ROLES.ROLE))
data = element.ROLES.ROLE;
else
data.push(element.ROLES.ROLE);
$.each(data, function(index, value){
roles_names.push(value.NAME);
$("#tf_btn_roles", context).click();
var role_context = $('.role_content', context).last();
var role_id = $(role_context).attr("role_id");
that.roleTabObjects[role_id].fill(role_context, value,element);
});
$.each(data, function(index, value){
var role_context = $('.role_content', context)[index];
var str = "";
$.each(roles_names, function(){
if (this != value.NAME) {
str += "<tr>\
<td style='width:10%'>\
<input class='check_item' type='checkbox' value='"+this+"' id='"+this+"'/>\
</td>\
<td>"+this+"</td>\
</tr>";
}
});
$(".parent_roles_body", role_context).html(str);
if (value.parents) {
$.each(value.parents, function(index, value){
$(".parent_roles_body #"+this, role_context).attr('checked', true);
});
}
});*/
//Remove first tab role, is empty.
//$('i.remove-tab', context).first().click();
//$("#tf_btn_roles", context).click();
}
function _add_role_tab(role_id, dialog) {
var that = this;
var html_role_id = 'role' + role_id;
var role_tab = new RoleTab(html_role_id);
that.roleTabObjects[role_id] = role_tab;
// Append the new div containing the tab and add the tab to the list
var role_section = $('<div id="'+html_role_id+'Tab" class="tabs-panel role_content wizard_internal_tab" role_id="'+role_id+'">'+
role_tab.html() +
'</div>').appendTo($("#roles_tabs_content", dialog));
_redo_service_vmgroup_selector_role(dialog, role_section);
role_section.on("change", "#role_name", function(){
var val = true;
var chars = ['/','*','&','|',':', String.fromCharCode(92),'"', ';', '/',String.fromCharCode(39),'#','{','}','$','<','>','*'];
var newName = $(this).val();
$.each(chars, function(index, value){
if(newName.indexOf(value) != -1 && val){
val = false;
}
});
if(val){
that.affinity_role_tab.refresh($(this).val(), role_tab.oldName());
role_tab.changeNameTab(newName);
} else {
Notifier.notifyError(Locale.tr("The new role name contains invalid characters."));
}
});
//Tips.setup(role_section);
var a = $("<li class='tabs-title'>\
<a class='text-center' id='"+html_role_id+"' href='#"+html_role_id+"Tab'>\
<span>\
<i class='off-color fa fa-cube fa-3x'/>\
<br>\
<span id='role_name_text'>"+Locale.tr("Role ")+role_id+"</span>\
</span>\
<i class='fa fa-times-circle remove-tab'></i>\
</a>\
</li>").appendTo($("ul#roles_tabs", dialog));
Foundation.reInit($("ul#roles_tabs", dialog));
$("a", a).trigger("click");
// close icon: removing the tab on click
a.on("click", "i.remove-tab", function() {
var target = $(this).parent().attr("href");
var li = $(this).closest('li');
var ul = $(this).closest('ul');
var content = $(target);
var role_id = content.attr("role_id");
li.remove();
content.remove();
if (li.hasClass('is-active')) {
$('a', ul.children('li').last()).click();
}
that.affinity_role_tab.removeRole(role_tab.oldName());
delete that.roleTabObjects[role_id];
return false;
});
role_tab.setup(role_section);
role_tab.onShow();
}
function _redo_service_vmgroup_selector_role(dialog, role_section){
$('#roles_tabs_content .role_content', dialog).each(function(){
var role_section = this;
var role_tab_id = $(role_section).attr('id');
});
}
});
| goberle/one | src/sunstone/public/app/tabs/vmgroup-tab/form-panels/create.js | JavaScript | apache-2.0 | 12,941 |
import sys
from drone.actions.emr_launcher import launch_emr_task
from drone.actions.ssh_launcher import launch_ssh_task
from drone.job_runner.dependency_manager import dependencies_are_met
from drone.job_runner.job_progress_checker import check_running_job_progress
from drone.metadata.metadata import get_job_info, job_status, set_ready, set_running, set_failed
task_launcher = {'ssh': launch_ssh_task,
'emr': launch_emr_task}
def process(job_config, settings):
for job_id, schedule_time, execution_time, status, runs, uid in get_job_info(job_config.get('id'),
db_name=settings.metadata):
if status == job_status.get('failed'):
if (int(job_config.get('retry')) if job_config.get('retry') else 0) > int(runs):
settings.logger.debug(
'%s runs %s. set retries %s.' % (job_config.get('id'), runs, job_config.get('retry')))
if dependencies_are_met(job_config, schedule_time, settings):
set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
run(job_config, schedule_time, settings)
continue
else:
continue
else:
continue
elif status == job_status.get('running'):
check_running_job_progress(job_config, schedule_time, uid, settings)
continue
elif status == job_status.get('ready'):
run(job_config, schedule_time, settings)
elif status == job_status.get('succeeded'):
continue
elif status == job_status.get('not_ready'):
if dependencies_are_met(job_config, schedule_time, settings):
set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
run(job_config, schedule_time, settings)
else:
continue
else:
settings.logger.error('Unknown job status "%s"' % status)
sys.exit(1)
def run(job_config, schedule_time, settings):
settings.logger.info('Starting job "%s" "%s"' % (job_config.get('id'), schedule_time))
job_type = job_config.get('type')
try:
assert job_type in settings.supported_job_types
except:
settings.logger.warning(
'Unsupported job type %s. Valid types are %s' % (job_type, str(settings.supported_job_types)))
task_lauched_successfully, uid = task_launcher.get(job_type)(job_config, schedule_time, settings)
if task_lauched_successfully:
set_running(job_config.get('id'), schedule_time, uid, db_name=settings.metadata)
settings.logger.info('Started job "%s" "%s"' % (job_config.get('id'), schedule_time))
else:
set_failed(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.warning('Failed to start job "%s" "%s"' % (job_config.get('id'), schedule_time))
| grafke/Drone-workflow-controller | drone/job_runner/job_runner.py | Python | apache-2.0 | 3,220 |
function Controller() {
function __alloyId24() {
__alloyId24.opts || {};
var models = __alloyId23.models;
var len = models.length;
var rows = [];
for (var i = 0; len > i; i++) {
var __alloyId9 = models[i];
__alloyId9.__transform = {};
var __alloyId10 = Ti.UI.createTableViewRow({
layout: "vertical",
font: {
fontSize: "16dp"
},
height: "auto",
title: "undefined" != typeof __alloyId9.__transform["nome"] ? __alloyId9.__transform["nome"] : __alloyId9.get("nome"),
model: "undefined" != typeof __alloyId9.__transform["alloy_id"] ? __alloyId9.__transform["alloy_id"] : __alloyId9.get("alloy_id"),
editable: "true"
});
rows.push(__alloyId10);
var __alloyId12 = Ti.UI.createView({
layout: "vertical"
});
__alloyId10.add(__alloyId12);
var __alloyId14 = Ti.UI.createLabel({
width: Ti.UI.SIZE,
height: Ti.UI.SIZE,
right: "10dp",
color: "blue",
font: {
fontSize: "16dp"
},
text: "undefined" != typeof __alloyId9.__transform["nome"] ? __alloyId9.__transform["nome"] : __alloyId9.get("nome")
});
__alloyId12.add(__alloyId14);
var __alloyId16 = Ti.UI.createView({
height: Ti.UI.SIZE,
width: Ti.UI.FILL
});
__alloyId12.add(__alloyId16);
var __alloyId18 = Ti.UI.createScrollView({
scrollType: "horizontal",
layout: "horizontal",
horizontalWrap: "false"
});
__alloyId16.add(__alloyId18);
var __alloyId20 = Ti.UI.createImageView({
top: "15dp",
image: "undefined" != typeof __alloyId9.__transform["foto1"] ? __alloyId9.__transform["foto1"] : __alloyId9.get("foto1"),
height: "180dp",
width: "320dp"
});
__alloyId18.add(__alloyId20);
var __alloyId22 = Ti.UI.createImageView({
top: "15dp",
image: "undefined" != typeof __alloyId9.__transform["foto2"] ? __alloyId9.__transform["foto2"] : __alloyId9.get("foto2"),
height: "180dp",
width: "320dp"
});
__alloyId18.add(__alloyId22);
}
$.__views.tableviewContatos.setData(rows);
}
function openAdd1() {
var add1 = Alloy.createController("add1");
add1.getView().open({
modal: true
});
}
function maisDetalhes(e) {
var contato = Alloy.Collections.contato.get(e.rowData.model);
var ctrl = Alloy.createController("detalhesContato", contato);
$.homeTab.open(ctrl.getView());
}
require("alloy/controllers/BaseController").apply(this, Array.prototype.slice.call(arguments));
this.__controllerPath = "home";
arguments[0] ? arguments[0]["__parentSymbol"] : null;
arguments[0] ? arguments[0]["$model"] : null;
arguments[0] ? arguments[0]["__itemTemplate"] : null;
var $ = this;
var exports = {};
var __defers = {};
$.__views.homeWindow = Ti.UI.createWindow({
backgroundColor: "white",
layout: "vertical",
id: "homeWindow",
titleid: "home"
});
$.__views.contatosSearch = Ti.UI.createSearchBar({
hinttextid: "procurarText",
height: "50dp",
id: "contatosSearch",
showCancel: "false"
});
$.__views.homeWindow.add($.__views.contatosSearch);
$.__views.Btadd = Ti.UI.createButton({
top: "10dp",
width: "200dp",
height: "auto",
borderRadius: "10dp",
font: {
fontSize: "17dp"
},
title: L("adicionar"),
id: "Btadd"
});
$.__views.homeWindow.add($.__views.Btadd);
openAdd1 ? $.__views.Btadd.addEventListener("click", openAdd1) : __defers["$.__views.Btadd!click!openAdd1"] = true;
$.__views.tableviewContatos = Ti.UI.createTableView({
id: "tableviewContatos"
});
$.__views.homeWindow.add($.__views.tableviewContatos);
var __alloyId23 = Alloy.Collections["contato"] || contato;
__alloyId23.on("fetch destroy change add remove reset", __alloyId24);
maisDetalhes ? $.__views.tableviewContatos.addEventListener("click", maisDetalhes) : __defers["$.__views.tableviewContatos!click!maisDetalhes"] = true;
$.__views.homeTab = Ti.UI.createTab({
backgroundSelectedColor: "#C8C8C8 ",
backgroundFocusedColor: "#999",
icon: "/images/ic_home.png",
window: $.__views.homeWindow,
id: "homeTab",
titleid: "home"
});
$.__views.homeTab && $.addTopLevelView($.__views.homeTab);
exports.destroy = function() {
__alloyId23.off("fetch destroy change add remove reset", __alloyId24);
};
_.extend($, $.__views);
Alloy.Collections.contato.fetch();
var contatos = Alloy.Collections.contato;
contatos.fetch();
$.tableviewContatos.search = $.contatosSearch;
__defers["$.__views.Btadd!click!openAdd1"] && $.__views.Btadd.addEventListener("click", openAdd1);
__defers["$.__views.tableviewContatos!click!maisDetalhes"] && $.__views.tableviewContatos.addEventListener("click", maisDetalhes);
_.extend($, exports);
}
var Alloy = require("alloy"), Backbone = Alloy.Backbone, _ = Alloy._;
module.exports = Controller; | Geeosp/SnapContacts | Resources/alloy/controllers/home.js | JavaScript | apache-2.0 | 5,638 |
package com.xinfan.msgbox.service.dao.dialect;
public class MysqlDialect extends Dialect {
@Override
public boolean supportsLimit() {
return true;
}
@Override
public boolean supportsLimitOffset() {
return true;
}
@Override
public String getLimitString(String sql, int offset, String offsetPlaceholder, int limit, String limitPlaceholder) {
sql = sql.trim();
boolean isForUpdate = false;
if (sql.toLowerCase().endsWith(" for update")) {
sql = sql.substring(0, sql.length() - 11);
isForUpdate = true;
}
StringBuffer pagingSelect = new StringBuffer(sql.length() + 100);
pagingSelect.append("select * from ( ");
pagingSelect.append(sql);
int endInt = Integer.parseInt(offsetPlaceholder) + + + Integer.parseInt(limitPlaceholder);
pagingSelect.append(" ) _t limit " + offset + "," + endInt);
if (isForUpdate) {
pagingSelect.append(" for update");
}
return pagingSelect.toString();
}
public String getCountSql(String sql)
{
sql = sql.trim();
if (sql.toLowerCase().endsWith(" for update")) {
sql = sql.substring(0, sql.length() - 11);
}
StringBuffer countSelect = new StringBuffer(sql.length() + 100);
countSelect.append("select count(*) from ( ");
countSelect.append(sql);
countSelect.append(" ) _t ");
return countSelect.toString();
}
}
| xinfan123/blue-server | bulu-service/src/main/java/com/xinfan/msgbox/service/dao/dialect/MysqlDialect.java | Java | apache-2.0 | 1,374 |
package uk.ac.ebi.embl.api.validation.fixer.entry;
import uk.ac.ebi.embl.api.entry.Entry;
import uk.ac.ebi.embl.api.entry.Text;
import uk.ac.ebi.embl.api.entry.feature.Feature;
import uk.ac.ebi.embl.api.entry.qualifier.Qualifier;
import uk.ac.ebi.embl.api.entry.reference.Person;
import uk.ac.ebi.embl.api.entry.reference.Reference;
import uk.ac.ebi.embl.api.validation.Severity;
import uk.ac.ebi.embl.api.validation.ValidationResult;
import uk.ac.ebi.embl.api.validation.ValidationScope;
import uk.ac.ebi.embl.api.validation.annotation.Description;
import uk.ac.ebi.embl.api.validation.annotation.ExcludeScope;
import uk.ac.ebi.embl.api.validation.check.entry.EntryValidationCheck;
import uk.ac.ebi.embl.api.validation.helper.Utils;
/**
* Fix works for certain non-ascii characters only. Check Utils.removeAccents limitations.
* If it is not possible to transliterate certain chars, it will be caught in and rejected
* by AsciiCharacterCheck.
*/
@Description("Non-ascii characters fixed from \"{0}\" to \"{1}\".")
@ExcludeScope(validationScope = {ValidationScope.NCBI, ValidationScope.NCBI_MASTER})
public class NonAsciiCharacterFix extends EntryValidationCheck {
private static final String ASCII_CHARACTER_FIX = "AsciiCharacterFix_1";
public ValidationResult check(Entry entry) {
result = new ValidationResult();
if (entry == null)
return result;
attemptFix(entry.getComment());
attemptFix(entry.getDescription());
for (Reference reference : entry.getReferences()) {
if (reference.getPublication() != null) {
String pubTitle = reference.getPublication().getTitle();
if (pubTitle != null) {
String fixedPubTitle = fixedStr(pubTitle);
if (!fixedPubTitle.equals(pubTitle)) {
reference.getPublication().setTitle(fixedPubTitle);
reportMessage(Severity.FIX, reference.getOrigin(), ASCII_CHARACTER_FIX, pubTitle, fixedPubTitle);
}
}
if (reference.getPublication().getAuthors() != null) {
for (Person author : reference.getPublication().getAuthors()) {
String firstName = author.getFirstName();
if (firstName != null) {
String fixedFirstName = fixedStr(firstName);
if (!fixedFirstName.equals(firstName)) {
author.setFirstName(fixedFirstName);
reportMessage(Severity.FIX, reference.getOrigin(), ASCII_CHARACTER_FIX, firstName, fixedFirstName);
}
}
String surname = author.getSurname();
if (surname != null) {
String fixedSurname = fixedStr(surname);
if (!fixedSurname.equals(surname)) {
author.setSurname(fixedSurname);
reportMessage(Severity.FIX, reference.getOrigin(), ASCII_CHARACTER_FIX, surname, fixedSurname);
}
}
}
}
}
}
for (Feature feature : entry.getFeatures()) {
for (Qualifier qualifier : feature.getQualifiers()) {
if (qualifier.getName().equals(Qualifier.COUNTRY_QUALIFIER_NAME)
|| qualifier.getName().equals(Qualifier.ISOLATE_QUALIFIER_NAME) ) {
String qualifierValue = qualifier.getValue();
if (qualifierValue != null) {
String fixedVal = fixedStr(qualifierValue);
if (!fixedVal.equals(qualifierValue)) {
qualifier.setValue(fixedVal);
reportMessage(Severity.FIX, qualifier.getOrigin(), ASCII_CHARACTER_FIX, qualifierValue, fixedVal);
}
}
}
}
}
return result;
}
private void attemptFix(Text text) {
if (text != null && text.getText() != null) {
if (Utils.hasNonAscii(text.getText())) {
String fixed = Utils.removeAccents(text.getText());
if (!fixed.equals(text.getText())) {
text.setText(fixed);
reportMessage(Severity.FIX, text.getOrigin(), ASCII_CHARACTER_FIX, text.getText(), fixed);
}
}
}
}
private String fixedStr(String str) {
if (Utils.hasNonAscii(str)) {
return Utils.removeAccents(str);
}
return str;
}
}
| enasequence/sequencetools | src/main/java/uk/ac/ebi/embl/api/validation/fixer/entry/NonAsciiCharacterFix.java | Java | apache-2.0 | 4,774 |
import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
import copy
import json
import re
VARIANT_JSON_REGEX = re.compile("product: ({.*}),")
class ShendronesSpider(CrawlSpider):
name = "shendrones"
allowed_domains = ["shendrones.myshopify.com"]
start_urls = ["http://shendrones.myshopify.com/collections/all"]
rules = (
Rule(LinkExtractor(restrict_css=[".grid-item"]), callback='parse_item'),
)
def parse_item(self, response):
item = Part()
item["site"] = self.name
variant = {}
item["variants"] = [variant]
base_url = response.url
item["manufacturer"] = "Shendrones"
# Find the json info for variants.
body = response.body_as_unicode()
m = VARIANT_JSON_REGEX.search(body)
if m:
shopify_info = json.loads(m.group(1))
global_title = shopify_info["title"]
preorder = False
if global_title.endswith("Pre Order"):
global_title = global_title[:-len("Pre Order")].strip()
variant["stock_state"] = "backordered"
preorder = True
for v in shopify_info["variants"]:
if v["title"] != "Default Title":
item["name"] = global_title + " " + v["title"]
variant["url"] = base_url + "?variant=" + str(v["id"])
else:
item["name"] = global_title
variant["url"] = base_url
variant["price"] = "${:.2f}".format(v["price"] / 100)
if not preorder:
if v["inventory_quantity"] <= 0:
if v["inventory_policy"] == "deny":
variant["stock_state"] = "out_of_stock"
else:
variant["stock_state"] = "backordered"
elif v["inventory_quantity"] < 3:
variant["stock_state"] = "low_stock"
variant["stock_text"] = "Only " + str(v["inventory_quantity"]) + " left!"
else:
variant["stock_state"] = "in_stock"
yield item
item = copy.deepcopy(item)
variant = item["variants"][0]
| rcbuild-info/scrape | rcbi/rcbi/spiders/Shendrones.py | Python | apache-2.0 | 2,069 |
import * as React from 'react';
import {camelCase} from 'change-case';
import {IVueComponent} from '../ReactifyVue';
import {createReactElement} from '../react-element-creation/CreateReactElements';
export const copyMethodsToVueComponent = (vueComponent: IVueComponent) => {
if (vueComponent.methods) {
Object.keys(vueComponent.methods)
.forEach(methodName => vueComponent[methodName] = vueComponent.methods[methodName]);
delete vueComponent.methods;
}
};
export const copyPropsToVueComponent = (vueComponent: IVueComponent, props: any) => {
if (props) {
Object.keys(props)
.forEach(propName => {
if (typeof vueComponent[propName] !== 'function' || typeof vueComponent[propName] === 'function' && !vueComponent[propName]) {
vueComponent[propName] = props[propName];
}
});
}
};
export const getComponentTag = (component: any) => {
if (component.type && component.type.tag) {
return component.type.tag;
} else if (component.type && typeof component.type === 'string') {
return component.type;
} else {
return undefined;
}
};
export const copySlotsToVueComponent = (vueComponent: IVueComponent, slotMapping, props) => {
const reactChildrenArray = props && props.children && React.Children.toArray(props.children) as (React.ReactElement<any>)[];
const slots = {
default: (reactChildrenArray && reactChildrenArray.length) ? reactChildrenArray : null
};
if (slotMapping && props) {
Object.keys(slotMapping)
.forEach(slotName => {
slots[slotName] = props[slotMapping[slotName]] || [];
});
}
Object.keys(slots)
.forEach(slotName => {
const slot = slots[slotName];
if (Array.isArray(slot)) {
slot.forEach((slotChild, index) => {
if (typeof slotChild !== 'string') {
slots[slotName][index] = {...slotChild, tag: getComponentTag(slotChild)};
}
});
}
});
vueComponent.$slots = slots;
}
export const copyArgsToVueComponent = (vueComponent: IVueComponent, args: any) => {
if (args) {
Object.keys(args)
.forEach(argName => vueComponent[argName] = args[argName]);
}
}
export const handleWatchedProperties = (vueComponent: IVueComponent, currentProps: any, nextProps: any) => {
if (vueComponent.watch) {
copyPropsToVueComponent(vueComponent,nextProps);
handleComputedProperties(vueComponent);
Object.keys(vueComponent.watch)
.forEach(watchedProperty => {
if (currentProps[watchedProperty] !== nextProps[watchedProperty]) {
vueComponent.watch[watchedProperty].apply(vueComponent, [nextProps[watchedProperty]]);
}
});
}
};
export const handleComputedProperties = (vueComponent: IVueComponent) => {
if (vueComponent.computed) {
Object.keys(vueComponent.computed)
.forEach(propertyName => {
vueComponent[propertyName] = vueComponent.computed[propertyName].apply(vueComponent, [])
});
}
}
export const getDefaultProps = (vueComponent: IVueComponent) => {
if (vueComponent.props) {
const defaultProps = Object.keys(vueComponent.props).reduce((defaultProps, propName) => {
const propDef = vueComponent.props[propName];
if (propDef.default) {
return {
...defaultProps,
[camelCase(propName)]: propDef.default
};
} else {
return defaultProps;
}
}, {});
return Object.keys(defaultProps).length ? defaultProps : null;
} else {
return null;
}
};
export const addCompiledTemplateFunctionsToVueComponent = (vueComponent: any, createElement: Function) => {
vueComponent._self = { _c: createElement.bind(vueComponent) };
vueComponent._t = (slotName: string, fallback) => {
const slotValue = vueComponent.$slots[slotName];
if (fallback && (!slotValue || !slotValue.length)) {
return fallback;
} else {
return slotValue;
}
};
vueComponent._v = (text: string) => text || '';
vueComponent._s = (text: string) => text || '';
vueComponent._e = () => null;
};
export const generateCreateElementFunctionForClass = (classVueComponentInstance, instantiatedComponents, vueComponent) => {
return (element, args, children) => {
if (typeof args !== 'object' || Array.isArray(args)) {
//Children passed in as second argument
return createReactElement(element, {}, args, instantiatedComponents, vueComponent);
} else {
return createReactElement(element, args, children, instantiatedComponents, vueComponent);
}
};
};
export const applyPropOverridesToTopLevelElement = (reactElement: React.ReactElement<any>, tag: string, self) => {
const refFunc = (e: HTMLElement) => {
(reactElement as any).ref(e);
self.element = e;
self.nextTickCallbacks.forEach(callback => callback.apply(this.vueComponent, []));
self.nextTickCallbacks = [];
self.hasUnrenderedStateChanges = false;
};
const elementWithPropOverrides = {...reactElement, props: { ...reactElement.props}, tag: tag, ref: refFunc};
if (self.vueComponent.className) {
const existingClassName = elementWithPropOverrides.props.className || '';
elementWithPropOverrides.props.className = [existingClassName, ' ', self.vueComponent.className].join('');
}
if (self.vueComponent.style) {
const existingStyles = elementWithPropOverrides.props.style || {};
elementWithPropOverrides.props.style = {
...existingStyles,
...self.vueComponent.style
};
}
if (self.vueComponent.id) {
elementWithPropOverrides.props.id = self.vueComponent.id;
}
return elementWithPropOverrides;
};
export const initData = (vueComponent) => {
let state = null;
if (vueComponent.data) {
state = vueComponent.data();
Object.keys(state).forEach(stateKey => {
vueComponent[stateKey] = state[stateKey];
});
}
return state;
}; | bencompton/framework7-react | src/utils/reactify-vue/react-class-creation-and-runtime/ReactClassRuntime.ts | TypeScript | apache-2.0 | 6,487 |
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.sunshine.app;
import android.content.ContentUris;
import android.content.ContentValues;
import android.content.Context;
import android.database.Cursor;
import android.net.Uri;
import android.os.AsyncTask;
import android.text.format.Time;
import android.util.Log;
import com.example.android.sunshine.app.data.WeatherContract;
import com.example.android.sunshine.app.data.WeatherContract.WeatherEntry;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Vector;
public class FetchWeatherTask extends AsyncTask<String, Void, Void>
{
private final String LOG_TAG = FetchWeatherTask.class.getSimpleName();
private final Context mContext;
public FetchWeatherTask (Context context)
{
mContext = context;
}
private boolean DEBUG = true;
/**
* Helper method to handle insertion of a new location in the weather database.
*
* @param locationSetting The location string used to request updates from the server.
* @param cityName A human-readable city name, e.g "Mountain View"
* @param lat the latitude of the city
* @param lon the longitude of the city
* @return the row ID of the added location.
*/
long addLocation (String locationSetting, String cityName, double lat, double lon)
{
long locationId;
// First, check if the location with this city name exists in the db
Cursor locationCursor = mContext.getContentResolver().query(
WeatherContract.LocationEntry.CONTENT_URI,
new String[] {WeatherContract.LocationEntry._ID},
WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ?",
new String[] {locationSetting},
null);
if (locationCursor.moveToFirst())
{
int locationIdIndex = locationCursor.getColumnIndex(WeatherContract.LocationEntry._ID);
locationId = locationCursor.getLong(locationIdIndex);
}
else
{
// Now that the content provider is set up, inserting rows of data is pretty simple.
// First create a ContentValues object to hold the data you want to insert.
ContentValues locationValues = new ContentValues();
// Then add the data, along with the corresponding name of the data type,
// so the content provider knows what kind of value is being inserted.
locationValues.put(WeatherContract.LocationEntry.COLUMN_CITY_NAME, cityName);
locationValues.put(WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING, locationSetting);
locationValues.put(WeatherContract.LocationEntry.COLUMN_COORD_LAT, lat);
locationValues.put(WeatherContract.LocationEntry.COLUMN_COORD_LONG, lon);
// Finally, insert location data into the database.
Uri insertedUri = mContext.getContentResolver().insert(
WeatherContract.LocationEntry.CONTENT_URI,
locationValues
);
// The resulting URI contains the ID for the row. Extract the locationId from the Uri.
locationId = ContentUris.parseId(insertedUri);
}
locationCursor.close();
// Wait, that worked? Yes!
return locationId;
}
/**
* Take the String representing the complete forecast in JSON Format and
* pull out the data we need to construct the Strings needed for the wireframes.
* <p>
* Fortunately parsing is easy: constructor takes the JSON string and converts it
* into an Object hierarchy for us.
*/
private void getWeatherDataFromJson (String forecastJsonStr, String locationSetting) throws JSONException
{
// Now we have a String representing the complete forecast in JSON Format.
// Fortunately parsing is easy: constructor takes the JSON string and converts it
// into an Object hierarchy for us.
// These are the names of the JSON objects that need to be extracted.
// Location information
final String OWM_CITY = "city";
final String OWM_CITY_NAME = "name";
final String OWM_COORD = "coord";
// Location coordinate
final String OWM_LATITUDE = "lat";
final String OWM_LONGITUDE = "lon";
// Weather information. Each day's forecast info is an element of the "list" array.
final String OWM_LIST = "list";
final String OWM_PRESSURE = "pressure";
final String OWM_HUMIDITY = "humidity";
final String OWM_WINDSPEED = "speed";
final String OWM_WIND_DIRECTION = "deg";
// All temperatures are children of the "temp" object.
final String OWM_TEMPERATURE = "temp";
final String OWM_MAX = "max";
final String OWM_MIN = "min";
final String OWM_WEATHER = "weather";
final String OWM_DESCRIPTION = "main";
final String OWM_WEATHER_ID = "id";
try
{
JSONObject forecastJson = new JSONObject(forecastJsonStr);
JSONArray weatherArray = forecastJson.getJSONArray(OWM_LIST);
JSONObject cityJson = forecastJson.getJSONObject(OWM_CITY);
String cityName = cityJson.getString(OWM_CITY_NAME);
JSONObject cityCoord = cityJson.getJSONObject(OWM_COORD);
double cityLatitude = cityCoord.getDouble(OWM_LATITUDE);
double cityLongitude = cityCoord.getDouble(OWM_LONGITUDE);
long locationId = addLocation(locationSetting, cityName, cityLatitude, cityLongitude);
// Insert the new weather information into the database
Vector<ContentValues> cVVector = new Vector<>(weatherArray.length());
// OWM returns daily forecasts based upon the local time of the city that is being
// asked for, which means that we need to know the GMT offset to translate this data
// properly.
// Since this data is also sent in-order and the first day is always the
// current day, we're going to take advantage of that to get a nice
// normalized UTC date for all of our weather.
Time dayTime = new Time();
dayTime.setToNow();
// we start at the day returned by local time. Otherwise this is a mess.
int julianStartDay = Time.getJulianDay(System.currentTimeMillis(), dayTime.gmtoff);
// now we work exclusively in UTC
dayTime = new Time();
for (int i = 0; i < weatherArray.length(); i++)
{
// These are the values that will be collected.
long dateTime;
double pressure;
int humidity;
double windSpeed;
double windDirection;
double high;
double low;
String description;
int weatherId;
// Get the JSON object representing the day
JSONObject dayForecast = weatherArray.getJSONObject(i);
// Cheating to convert this to UTC time, which is what we want anyhow
dateTime = dayTime.setJulianDay(julianStartDay + i);
pressure = dayForecast.getDouble(OWM_PRESSURE);
humidity = dayForecast.getInt(OWM_HUMIDITY);
windSpeed = dayForecast.getDouble(OWM_WINDSPEED);
windDirection = dayForecast.getDouble(OWM_WIND_DIRECTION);
// Description is in a child array called "weather", which is 1 element long.
// That element also contains a weather code.
JSONObject weatherObject = dayForecast.getJSONArray(OWM_WEATHER).getJSONObject(0);
description = weatherObject.getString(OWM_DESCRIPTION);
weatherId = weatherObject.getInt(OWM_WEATHER_ID);
// Temperatures are in a child object called "temp". Try not to name variables
// "temp" when working with temperature. It confuses everybody.
JSONObject temperatureObject = dayForecast.getJSONObject(OWM_TEMPERATURE);
high = temperatureObject.getDouble(OWM_MAX);
low = temperatureObject.getDouble(OWM_MIN);
ContentValues weatherValues = new ContentValues();
weatherValues.put(WeatherEntry.COLUMN_LOC_KEY, locationId);
weatherValues.put(WeatherEntry.COLUMN_DATE, dateTime);
weatherValues.put(WeatherEntry.COLUMN_HUMIDITY, humidity);
weatherValues.put(WeatherEntry.COLUMN_PRESSURE, pressure);
weatherValues.put(WeatherEntry.COLUMN_WIND_SPEED, windSpeed);
weatherValues.put(WeatherEntry.COLUMN_DEGREES, windDirection);
weatherValues.put(WeatherEntry.COLUMN_MAX_TEMP, high);
weatherValues.put(WeatherEntry.COLUMN_MIN_TEMP, low);
weatherValues.put(WeatherEntry.COLUMN_SHORT_DESC, description);
weatherValues.put(WeatherEntry.COLUMN_WEATHER_ID, weatherId);
cVVector.add(weatherValues);
}
int inserted = 0;
// add to database
if (cVVector.size() > 0)
{
ContentValues[] cvArray = new ContentValues[cVVector.size()];
cVVector.toArray(cvArray);
inserted = mContext.getContentResolver().bulkInsert(WeatherEntry.CONTENT_URI, cvArray);
}
Log.d(LOG_TAG, "FetchWeatherTask Complete. " + inserted + " Inserted");
}
catch (JSONException e)
{
Log.e(LOG_TAG, e.getMessage(), e);
e.printStackTrace();
}
}
@Override
protected Void doInBackground (String... params)
{
// If there's no zip code, there's nothing to look up. Verify size of params.
if (params.length == 0)
{
return null;
}
String locationQuery = params[0];
// These two need to be declared outside the try/catch
// so that they can be closed in the finally block.
HttpURLConnection urlConnection = null;
BufferedReader reader = null;
// Will contain the raw JSON response as a string.
String forecastJsonStr = null;
String format = "json";
String units = "metric";
int numDays = 14;
try
{
// Construct the URL for the OpenWeatherMap query
// Possible parameters are avaiable at OWM's forecast API page, at
// http://openweathermap.org/API#forecast
final String FORECAST_BASE_URL = "http://api.openweathermap.org/data/2.5/forecast/daily?";
final String ZIP_CODE_PARAM = "zip";
final String FORMAT_PARAM = "mode";
final String UNITS_PARAM = "units";
final String DAYS_PARAM = "cnt";
final String APP_ID_PARAM = "appid";
Uri builtUri = Uri.parse(FORECAST_BASE_URL).buildUpon()
.appendQueryParameter(APP_ID_PARAM, BuildConfig.OPEN_WEATHER_MAP_API_KEY)
.appendQueryParameter(ZIP_CODE_PARAM, params[0])
.appendQueryParameter(FORMAT_PARAM, format)
.appendQueryParameter(UNITS_PARAM, units)
.appendQueryParameter(DAYS_PARAM, Integer.toString(numDays))
.build();
URL url = new URL(builtUri.toString());
// Create the request to OpenWeatherMap, and open the connection
urlConnection = (HttpURLConnection) url.openConnection();
urlConnection.setRequestMethod("GET");
urlConnection.connect();
// Read the input stream into a String
InputStream inputStream = urlConnection.getInputStream();
StringBuffer buffer = new StringBuffer();
if (inputStream == null)
{
// Nothing to do.
return null;
}
reader = new BufferedReader(new InputStreamReader(inputStream));
String line;
while ((line = reader.readLine()) != null)
{
// Since it's JSON, adding a newline isn't necessary (it won't affect parsing)
// But it does make debugging a *lot* easier if you print out the completed
// buffer for debugging.
buffer.append(line + "\n");
}
if (buffer.length() == 0)
{
// Stream was empty. No point in parsing.
return null;
}
forecastJsonStr = buffer.toString();
getWeatherDataFromJson(forecastJsonStr, locationQuery);
}
catch (IOException e)
{
Log.e(LOG_TAG, "Error ", e);
// If the code didn't successfully get the weather data, there's no point in attemping
// to parse it.
return null;
}
catch (JSONException e)
{
Log.e(LOG_TAG, e.getMessage(), e);
e.printStackTrace();
}
finally
{
if (urlConnection != null)
{
urlConnection.disconnect();
}
if (reader != null)
{
try
{
reader.close();
}
catch (final IOException e)
{
Log.e(LOG_TAG, "Error closing stream", e);
}
}
}
return null;
}
}
| denis-evteev/udacity-android-sunshine-app | app/src/main/java/com/example/android/sunshine/app/FetchWeatherTask.java | Java | apache-2.0 | 12,246 |
using System;
using System.Linq;
namespace NBi.Core.Analysis.Request
{
public class CaptionFilter: IFilter
{
protected readonly string captionFilter;
protected readonly DiscoveryTarget targetFilter;
public CaptionFilter(string caption, DiscoveryTarget target)
{
captionFilter = caption;
targetFilter = target;
}
public string Value { get { return captionFilter; } }
public DiscoveryTarget Target { get { return targetFilter; } }
}
}
| Seddryck/NBi | NBi.Core/Analysis/Request/CaptionFilter.cs | C# | apache-2.0 | 553 |
/**
* @license Copyright 2017 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';
/**
* Expected Lighthouse audit values for redirects tests
*/
const cacheBuster = Number(new Date());
module.exports = [
{
initialUrl: `http://localhost:10200/online-only.html?delay=500&redirect=%2Foffline-only.html%3Fcb=${cacheBuster}%26delay=500%26redirect%3D%2Fredirects-final.html`,
url: 'http://localhost:10200/redirects-final.html',
audits: {
'redirects': {
score: '<100',
rawValue: '>=500',
details: {
items: {
length: 3,
},
},
},
},
},
{
initialUrl: `http://localhost:10200/online-only.html?delay=300&redirect=%2Fredirects-final.html`,
url: 'http://localhost:10200/redirects-final.html',
audits: {
'redirects': {
score: 100,
rawValue: '>=250',
details: {
items: {
length: 2,
},
},
},
},
},
];
| tkadlec/lighthouse | lighthouse-cli/test/smokehouse/redirects/expectations.js | JavaScript | apache-2.0 | 1,503 |
package org.gsonformat.intellij.process;
import com.intellij.psi.*;
import org.apache.http.util.TextUtils;
import org.gsonformat.intellij.config.Config;
import org.gsonformat.intellij.config.Constant;
import org.gsonformat.intellij.entity.FieldEntity;
import org.gsonformat.intellij.entity.ClassEntity;
import java.util.regex.Pattern;
/**
* Created by dim on 16/11/7.
*/
class AutoValueProcessor extends Processor {
@Override
public void onStarProcess(ClassEntity classEntity, PsiElementFactory factory, PsiClass cls,IProcessor visitor) {
super.onStarProcess(classEntity, factory, cls, visitor);
injectAutoAnnotation(factory, cls);
}
private void injectAutoAnnotation(PsiElementFactory factory, PsiClass cls) {
PsiModifierList modifierList = cls.getModifierList();
PsiElement firstChild = modifierList.getFirstChild();
Pattern pattern = Pattern.compile("@.*?AutoValue");
if (firstChild != null && !pattern.matcher(firstChild.getText()).find()) {
PsiAnnotation annotationFromText = factory.createAnnotationFromText("@com.google.auto.value.AutoValue", cls);
modifierList.addBefore(annotationFromText, firstChild);
}
if (!modifierList.hasModifierProperty(PsiModifier.ABSTRACT)) {
modifierList.setModifierProperty(PsiModifier.ABSTRACT, true);
}
}
@Override
public void generateField(PsiElementFactory factory, FieldEntity fieldEntity, PsiClass cls, ClassEntity classEntity) {
if (fieldEntity.isGenerate()) {
StringBuilder fieldSb = new StringBuilder();
String filedName = fieldEntity.getGenerateFieldName();
if (!TextUtils.isEmpty(classEntity.getExtra())) {
fieldSb.append(classEntity.getExtra()).append("\n");
classEntity.setExtra(null);
}
if (fieldEntity.getTargetClass() != null) {
fieldEntity.getTargetClass().setGenerate(true);
}
fieldSb.append(String.format("public abstract %s %s() ; ", fieldEntity.getFullNameType(), filedName));
cls.add(factory.createMethodFromText(fieldSb.toString(), cls));
}
}
@Override
public void generateGetterAndSetter(PsiElementFactory factory, PsiClass cls, ClassEntity classEntity) {
}
@Override
public void generateConvertMethod(PsiElementFactory factory, PsiClass cls, ClassEntity classEntity) {
super.generateConvertMethod(factory, cls, classEntity);
createMethod(factory, Constant.autoValueMethodTemplate.replace("$className$", cls.getName()).trim(), cls);
}
@Override
protected void onEndGenerateClass(PsiElementFactory factory, ClassEntity classEntity, PsiClass parentClass, PsiClass generateClass, IProcessor visitor) {
super.onEndGenerateClass(factory, classEntity, parentClass, generateClass, visitor);
injectAutoAnnotation(factory, generateClass);
}
}
| gengjiawen/GsonFormat | src/main/java/org/gsonformat/intellij/process/AutoValueProcessor.java | Java | apache-2.0 | 2,976 |
#include <bits/stdc++.h>
template<typename T> T gcd(T a, T b) {
if(!b) return a;
return gcd(b, a % b);
}
template<typename T> T lcm(T a, T b) {
return a * b / gcd(a, b);
}
template<typename T> void chmin(T& a, T b) { a = (a > b) ? b : a; }
template<typename T> void chmax(T& a, T b) { a = (a < b) ? b : a; }
int in() { int x; scanf("%d", &x); return x; }
using namespace std;
typedef long long Int;
typedef unsigned uint;
int TL[10];
char S[110];
int map_key[10];
string key[10];
int main(void) {
key[1] = "";
key[2] = "abc";
key[3] = "def";
key[4] = "ghi";
key[5] = "jkl";
key[6] = "mno";
key[7] = "pqrs";
key[8] = "tuv";
key[9] = "wxyz";
for (int i = 1; i <= 9; i++) {
scanf("%d", &TL[i]);
map_key[i] = TL[i];
}
scanf("%s", S);
int N = strlen(S);
int last = -1;
for (int i = 0; i < N; i++) {
int id = -1, press = 0;
bool sharp = false;
for (int j = 1; j <= 9; j++) {
if (key[map_key[j]].find(S[i]) != string::npos) {
//cout << "\n" << j << " " << S[i] << " " << key[map_key[j]] << "\n";
id = j;
for (int k = 0; k < key[map_key[j]].size(); k++) {
if (key[map_key[j]][k] == S[i]) {
press = k;
break;
}
}
if (i > 0) {
if (id == last) {
sharp = true;
}
}
last = j;
break;
}
}
if (sharp) {
putchar('#');
}
for (int i = 0; i <= press; i++) {
printf("%d", id);
}
}
printf("\n");
return 0;
}
| aajjbb/contest-files | COCI/Mobitel.cpp | C++ | apache-2.0 | 1,433 |
/* Copyright 2008, 2009, 2010 by the Oxford University Computing Laboratory
This file is part of HermiT.
HermiT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
HermiT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with HermiT. If not, see <http://www.gnu.org/licenses/>.
*/
package org.semanticweb.HermiT.datatypes.owlreal;
public final class PlusInfinity extends Number {
private static final long serialVersionUID = -205551124673073593L;
public static final PlusInfinity INSTANCE = new PlusInfinity();
private PlusInfinity() {
}
public boolean equals(Object that) {
return this == that;
}
public String toString() {
return "+INF";
}
public double doubleValue() {
throw new UnsupportedOperationException();
}
public float floatValue() {
throw new UnsupportedOperationException();
}
public int intValue() {
throw new UnsupportedOperationException();
}
public long longValue() {
throw new UnsupportedOperationException();
}
protected Object readResolve() {
return INSTANCE;
}
}
| CPoirot3/OWL-Reasoner | project/src/org/semanticweb/HermiT/datatypes/owlreal/PlusInfinity.java | Java | apache-2.0 | 1,607 |
/*
* Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.mturk.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.mturk.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* HITLayoutParameter JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class HITLayoutParameterJsonUnmarshaller implements Unmarshaller<HITLayoutParameter, JsonUnmarshallerContext> {
public HITLayoutParameter unmarshall(JsonUnmarshallerContext context) throws Exception {
HITLayoutParameter hITLayoutParameter = new HITLayoutParameter();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return null;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("Name", targetDepth)) {
context.nextToken();
hITLayoutParameter.setName(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("Value", targetDepth)) {
context.nextToken();
hITLayoutParameter.setValue(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return hITLayoutParameter;
}
private static HITLayoutParameterJsonUnmarshaller instance;
public static HITLayoutParameterJsonUnmarshaller getInstance() {
if (instance == null)
instance = new HITLayoutParameterJsonUnmarshaller();
return instance;
}
}
| dagnir/aws-sdk-java | aws-java-sdk-mechanicalturkrequester/src/main/java/com/amazonaws/services/mturk/model/transform/HITLayoutParameterJsonUnmarshaller.java | Java | apache-2.0 | 2,995 |
package com.nhpatt.myconference.entities;
import com.google.gson.JsonArray;
/**
* @author Javier Gamarra
*/
public class TalkEvent {
private final JsonArray talks;
public TalkEvent(JsonArray talks) {
this.talks = talks;
}
public JsonArray getTalks() {
return talks;
}
}
| nhpatt/MyConference | app/src/main/java/com/nhpatt/myconference/entities/TalkEvent.java | Java | apache-2.0 | 312 |