code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
// Declare internals
var internals = {};
// Plugin registration
exports.register = function (plugin, options, next) {
plugin.route({ path: '/test2', method: 'GET', handler: function (request, reply) { reply('testing123'); } });
plugin.route({ path: '/test2/path', method: 'GET', handler: function (request, reply) { reply(plugin.path); } });
plugin.log('test', 'abc');
return next();
};
| thebillkidy/WebRTC-Stream | server/node_modules/hapi/test/integration/pack/--test2/lib/index.js | JavaScript | apache-2.0 | 409 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.cache.query.internal.parse;
import antlr.*;
import org.apache.geode.cache.query.internal.QCompiler;
public class ASTTypeCast extends GemFireAST {
private static final long serialVersionUID = -6368577668325776355L;
public ASTTypeCast() {}
public ASTTypeCast(Token t) {
super(t);
}
@Override
public void compile(QCompiler compiler) {
super.compile(compiler);
// there's a type on the stack now
compiler.typecast();
}
}
| pivotal-amurmann/geode | geode-core/src/main/java/org/apache/geode/cache/query/internal/parse/ASTTypeCast.java | Java | apache-2.0 | 1,274 |
// Copyright (C) 2015 André Bargull. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
info: Compound Assignment Operator evaluates its operands from left to right.
description: >
The left-hand side expression is evaluated before the right-hand side.
Left-hand side expression is MemberExpression: base[prop]. base is the
undefined value.
Check operator is "x <<= y".
---*/
function DummyError() { }
assert.throws(DummyError, function() {
var base = undefined;
var prop = function() {
throw new DummyError();
};
var expr = function() {
$ERROR("right-hand side expression evaluated");
};
base[prop()] <<= expr();
});
assert.throws(TypeError, function() {
var base = undefined;
var prop = {
toString: function() {
$ERROR("property key evaluated");
}
};
var expr = function() {
$ERROR("right-hand side expression evaluated");
};
base[prop] <<= expr();
});
| sebastienros/jint | Jint.Tests.Test262/test/language/expressions/compound-assignment/S11.13.2_A7.6_T2.js | JavaScript | bsd-2-clause | 974 |
/*!
* FileInput Polish Translations
*
* This file must be loaded after 'fileinput.js'. Patterns in braces '{}', or
* any HTML markup tags in the messages must not be converted or translated.
*
* @see http://github.com/kartik-v/bootstrap-fileinput
*
* NOTE: this file must be saved in UTF-8 encoding.
*/
(function ($) {
"use strict";
$.fn.fileinputLocales['pl'] = {
fileSingle: 'plik',
filePlural: 'pliki',
browseLabel: 'Przeglądaj …',
removeLabel: 'Usuń',
removeTitle: 'Usuń zaznaczone pliki',
cancelLabel: 'Przerwij',
cancelTitle: 'Anuluj wysyłanie',
pauseLabel: 'Wstrzymaj',
pauseTitle: 'Wstrzymaj trwające przesyłanie',
uploadLabel: 'Wgraj',
uploadTitle: 'Wgraj zaznaczone pliki',
msgNo: 'Nie',
msgNoFilesSelected: 'Brak zaznaczonych plików',
msgPaused: 'Wstrzymano',
msgCancelled: 'Odwołany',
msgPlaceholder: 'Wybierz {files} ...',
msgZoomModalHeading: 'Szczegółowy podgląd',
msgFileRequired: 'Musisz wybrać plik do wgrania.',
msgSizeTooSmall: 'Plik "{name}" (<b>{size} KB</b>) jest zbyt mały i musi być większy niż <b>{minSize} KB</b>.',
msgSizeTooLarge: 'Plik o nazwie "{name}" (<b>{size} KB</b>) przekroczył maksymalną dopuszczalną wielkość pliku wynoszącą <b>{maxSize} KB</b>.',
msgFilesTooLess: 'Minimalna liczba plików do wgrania: <b>{n}</b>.',
msgFilesTooMany: 'Liczba plików wybranych do wgrania w liczbie <b>({n})</b>, przekracza maksymalny dozwolony limit wynoszący <b>{m}</b>.',
msgTotalFilesTooMany: 'Możesz wgrać maksymalnie <b>{m}</b> plików (wykryto <b>{n}</b>).',
msgFileNotFound: 'Plik "{name}" nie istnieje!',
msgFileSecured: 'Ustawienia zabezpieczeń uniemożliwiają odczyt pliku "{name}".',
msgFileNotReadable: 'Plik "{name}" nie jest plikiem do odczytu.',
msgFilePreviewAborted: 'Podgląd pliku "{name}" został przerwany.',
msgFilePreviewError: 'Wystąpił błąd w czasie odczytu pliku "{name}".',
msgInvalidFileName: 'Nieprawidłowe lub nieobsługiwane znaki w nazwie pliku "{name}".',
msgInvalidFileType: 'Nieznany typ pliku "{name}". Tylko następujące rodzaje plików są dozwolone: "{types}".',
msgInvalidFileExtension: 'Złe rozszerzenie dla pliku "{name}". Tylko następujące rozszerzenia plików są dozwolone: "{extensions}".',
msgUploadAborted: 'Przesyłanie pliku zostało przerwane',
msgUploadThreshold: 'Przetwarzanie …',
msgUploadBegin: 'Rozpoczynanie …',
msgUploadEnd: 'Gotowe!',
msgUploadResume: 'Wznawianie przesyłania …',
msgUploadEmpty: 'Brak poprawnych danych do przesłania.',
msgUploadError: 'Błąd przesyłania',
msgDeleteError: 'Błąd usuwania',
msgProgressError: 'Błąd',
msgValidationError: 'Błąd walidacji',
msgLoading: 'Wczytywanie pliku {index} z {files} …',
msgProgress: 'Wczytywanie pliku {index} z {files} - {name} - {percent}% zakończone.',
msgSelected: '{n} Plików zaznaczonych',
msgFoldersNotAllowed: 'Metodą przeciągnij i upuść, można przenosić tylko pliki. Pominięto {n} katalogów.',
msgImageWidthSmall: 'Szerokość pliku obrazu "{name}" musi być co najmniej {size} px.',
msgImageHeightSmall: 'Wysokość pliku obrazu "{name}" musi być co najmniej {size} px.',
msgImageWidthLarge: 'Szerokość pliku obrazu "{name}" nie może przekraczać {size} px.',
msgImageHeightLarge: 'Wysokość pliku obrazu "{name}" nie może przekraczać {size} px.',
msgImageResizeError: 'Nie udało się uzyskać wymiaru obrazu, aby zmienić rozmiar.',
msgImageResizeException: 'Błąd podczas zmiany rozmiaru obrazu.<pre>{errors}</pre>',
msgAjaxError: 'Coś poczło nie tak podczas {operation}. Spróbuj ponownie!',
msgAjaxProgressError: '{operation} nie powiodło się',
msgDuplicateFile: 'Plik "{name}" o identycznym rozmiarze "{size} KB" został wgrany wcześniej. Pomijanie zduplikowanego pliku.',
msgResumableUploadRetriesExceeded: 'Przekroczono limit <b>{max}</b> prób wgrania pliku <b>{file}</b>! Szczegóły błędu: <pre>{error}</pre>',
msgPendingTime: 'Pozostało {time}',
msgCalculatingTime: 'obliczanie pozostałego czasu',
ajaxOperations: {
deleteThumb: 'usuwanie pliku',
uploadThumb: 'przesyłanie pliku',
uploadBatch: 'masowe przesyłanie plików',
uploadExtra: 'przesyłanie danych formularza'
},
dropZoneTitle: 'Przeciągnij i upuść pliki tutaj …',
dropZoneClickTitle: '<br>(lub kliknij tutaj i wybierz {files} z komputera)',
fileActionSettings: {
removeTitle: 'Usuń plik',
uploadTitle: 'Przesyłanie pliku',
uploadRetryTitle: 'Ponów',
downloadTitle: 'Pobierz plik',
zoomTitle: 'Pokaż szczegóły',
dragTitle: 'Przenies / Ponownie zaaranżuj',
indicatorNewTitle: 'Jeszcze nie przesłany',
indicatorSuccessTitle: 'Dodane',
indicatorErrorTitle: 'Błąd',
indicatorPausedTitle: 'Przesyłanie zatrzymane',
indicatorLoadingTitle: 'Przesyłanie …'
},
previewZoomButtonTitles: {
prev: 'Pokaż poprzedni plik',
next: 'Pokaż następny plik',
toggleheader: 'Włącz / wyłącz nagłówek',
fullscreen: 'Włącz / wyłącz pełny ekran',
borderless: 'Włącz / wyłącz tryb bez ramek',
close: 'Zamknij szczegółowy widok'
}
};
})(window.jQuery);
| OlliL/lalaMoneyflow | client/contrib/bootstrap-fileinput/js/locales/pl.js | JavaScript | bsd-2-clause | 5,901 |
using UnityEngine;
using System.Collections;
/// <summary>
/// Key input enumeration for easy input sending.
/// </summary>
public enum KeyInput
{
GoLeft = 0,
GoRight,
GoDown,
Jump,
Count
} | tutsplus/A-Star-Pathfinding-for-Platformers | OneWayPlatforms/Assets/Scripts/KeyInput.cs | C# | bsd-2-clause | 198 |
<?php
namespace Aura\Web\Response;
class CookiesTest extends \PHPUnit_Framework_TestCase
{
protected $cookies;
protected function setUp()
{
$this->cookies = new Cookies;
}
public function testSetAndGet()
{
$this->cookies->set('foo', 'bar', '88', '/path', 'example.com');
$expect = array(
'value' => 'bar',
'expire' => 88,
'path' => '/path',
'domain' => 'example.com',
'secure' => false,
'httponly' => true,
);
$actual = $this->cookies->get('foo');
$this->assertSame($expect, $actual);
}
public function testGetAll()
{
$this->cookies->set('foo', 'bar', '88', '/path', 'example.com');
$this->cookies->set('baz', 'dib', date('Y-m-d H:i:s', '88'), '/path', 'example.com');
$expect = array(
'foo' => array(
'value' => 'bar',
'expire' => 88,
'path' => '/path',
'domain' => 'example.com',
'secure' => false,
'httponly' => true,
),
'baz' => array(
'value' => 'dib',
'expire' => 88,
'path' => '/path',
'domain' => 'example.com',
'secure' => false,
'httponly' => true,
),
);
$actual = $this->cookies->get();
$this->assertSame($expect, $actual);
}
public function testDefault()
{
// set a cookie name and value
$this->cookies->set('foo', 'bar');
// get before defaults
$expect = array(
'value' => 'bar',
'expire' => 0,
'path' => '',
'domain' => '',
'secure' => false,
'httponly' => true,
);
$actual = $this->cookies->get('foo');
$this->assertSame($expect, $actual);
// set and get defaults
$this->cookies->setExpire(88);
$this->cookies->setPath('/path');
$this->cookies->setDomain('example.com');
$this->cookies->setSecure(true);
$this->cookies->setHttponly(false);
// get after defaults
$expect = array(
'value' => null,
'expire' => 88,
'path' => '/path',
'domain' => 'example.com',
'secure' => true,
'httponly' => false,
);
$actual = $this->cookies->getDefault();
$this->assertSame($expect, $actual);
}
}
| auraphp/Aura.Web | tests/Response/CookiesTest.php | PHP | bsd-2-clause | 2,511 |
var path = require('path');
var url = require('url');
var closure = require('closure-util');
var nomnom = require('nomnom');
var log = closure.log;
var options = nomnom.options({
port: {
abbr: 'p',
'default': 4000,
help: 'Port for incoming connections',
metavar: 'PORT'
},
loglevel: {
abbr: 'l',
choices: ['silly', 'verbose', 'info', 'warn', 'error'],
'default': 'info',
help: 'Log level',
metavar: 'LEVEL'
}
}).parse();
/** @type {string} */
log.level = options.loglevel;
log.info('ol3-cesium', 'Parsing dependencies ...');
var manager = new closure.Manager({
closure: true, // use the bundled Closure Library
lib: [
'src/**/*.js'
],
ignoreRequires: '^ol\\.'
});
manager.on('error', function(e) {
log.error('ol3-cesium', e.message);
});
manager.on('ready', function() {
var server = new closure.Server({
manager: manager,
loader: '/@loader'
});
server.listen(options.port, function() {
log.info('ol3-cesium', 'Listening on http://localhost:' +
options.port + '/ (Ctrl+C to stop)');
});
server.on('error', function(err) {
log.error('ol3-cesium', 'Server failed to start: ' + err.message);
process.exit(1);
});
});
| GistdaDev/ol3-cesium | build/serve.js | JavaScript | bsd-2-clause | 1,218 |
#region BSD License
/*
Copyright (c) 2012, Clarius Consulting
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#endregion
namespace Clide.VisualStudio
{
using Microsoft.VisualStudio;
using Microsoft.VisualStudio.Shell.Interop;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
internal static class VsServiceProviderExtensions
{
public static VsToolWindow ToolWindow(this IServiceProvider serviceProvider, Guid toolWindowId)
{
return new VsToolWindow(serviceProvider, toolWindowId);
}
public static IVsHierarchy GetSelectedHierarchy(this IVsMonitorSelection monitorSelection, IUIThread uiThread)
{
var hierarchyPtr = IntPtr.Zero;
var selectionContainer = IntPtr.Zero;
return uiThread.Invoke(() =>
{
try
{
// Get the current project hierarchy, project item, and selection container for the current selection
// If the selection spans multiple hierarchies, hierarchyPtr is Zero.
// So fast path is for non-zero result (most common case of single active project/item).
uint itemid;
IVsMultiItemSelect multiItemSelect = null;
ErrorHandler.ThrowOnFailure(monitorSelection.GetCurrentSelection(out hierarchyPtr, out itemid, out multiItemSelect, out selectionContainer));
// There may be no selection at all.
if (itemid == VSConstants.VSITEMID_NIL)
return null;
if (itemid == VSConstants.VSITEMID_ROOT)
{
// The root selection could be the solution itself, so no project is active.
if (hierarchyPtr == IntPtr.Zero)
return null;
else
return (IVsHierarchy)Marshal.GetTypedObjectForIUnknown(hierarchyPtr, typeof(IVsHierarchy));
}
// We may have a single item selection, so we can safely pick its owning project/hierarchy.
if (itemid != VSConstants.VSITEMID_SELECTION)
return (IVsHierarchy)Marshal.GetTypedObjectForIUnknown(hierarchyPtr, typeof(IVsHierarchy));
// Otherwise, this is a multiple item selection within the same hierarchy,
// we select he hierarchy.
uint numberOfSelectedItems;
int isSingleHierarchyInt;
ErrorHandler.ThrowOnFailure(multiItemSelect.GetSelectionInfo(out numberOfSelectedItems, out isSingleHierarchyInt));
var isSingleHierarchy = (isSingleHierarchyInt != 0);
if (isSingleHierarchy)
return (IVsHierarchy)Marshal.GetTypedObjectForIUnknown(hierarchyPtr, typeof(IVsHierarchy));
return null;
}
finally
{
if (hierarchyPtr != IntPtr.Zero)
{
Marshal.Release(hierarchyPtr);
}
if (selectionContainer != IntPtr.Zero)
{
Marshal.Release(selectionContainer);
}
}
});
}
public static IEnumerable<Tuple<IVsHierarchy, uint>> GetSelection(this IVsMonitorSelection monitorSelection, IUIThread uiThread, IVsHierarchy solution)
{
var hierarchyPtr = IntPtr.Zero;
var selectionContainer = IntPtr.Zero;
return uiThread.Invoke(() =>
{
try
{
// Get the current project hierarchy, project item, and selection container for the current selection
// If the selection spans multiple hierarchies, hierarchyPtr is Zero
uint itemid;
IVsMultiItemSelect multiItemSelect = null;
ErrorHandler.ThrowOnFailure(monitorSelection.GetCurrentSelection(out hierarchyPtr, out itemid, out multiItemSelect, out selectionContainer));
if (itemid == VSConstants.VSITEMID_NIL)
return Enumerable.Empty<Tuple<IVsHierarchy, uint>>();
if (itemid == VSConstants.VSITEMID_ROOT)
{
if (hierarchyPtr == IntPtr.Zero)
return new[] { Tuple.Create(solution, VSConstants.VSITEMID_ROOT) };
else
return new[] { Tuple.Create(
(IVsHierarchy)Marshal.GetTypedObjectForIUnknown(hierarchyPtr, typeof(IVsHierarchy)),
VSConstants.VSITEMID_ROOT) };
}
if (itemid != VSConstants.VSITEMID_SELECTION)
return new[] { Tuple.Create(
(IVsHierarchy)Marshal.GetTypedObjectForIUnknown(hierarchyPtr, typeof(IVsHierarchy)),
itemid) };
// This is a multiple item selection.
uint numberOfSelectedItems;
int isSingleHierarchyInt;
ErrorHandler.ThrowOnFailure(multiItemSelect.GetSelectionInfo(out numberOfSelectedItems, out isSingleHierarchyInt));
var isSingleHierarchy = (isSingleHierarchyInt != 0);
var vsItemSelections = new VSITEMSELECTION[numberOfSelectedItems];
var flags = (isSingleHierarchy) ? (uint)__VSGSIFLAGS.GSI_fOmitHierPtrs : 0;
ErrorHandler.ThrowOnFailure(multiItemSelect.GetSelectedItems(flags, numberOfSelectedItems, vsItemSelections));
return vsItemSelections.Where(sel => sel.pHier != null)
// NOTE: we can return lazy results here, since
// the GetSelectedItems has already returned in the UI thread
// the array of results. We're just delaying the creation of the tuples
// in case they aren't all needed.
.Select(sel => Tuple.Create(sel.pHier, sel.itemid));
}
finally
{
if (hierarchyPtr != IntPtr.Zero)
{
Marshal.Release(hierarchyPtr);
}
if (selectionContainer != IntPtr.Zero)
{
Marshal.Release(selectionContainer);
}
}
});
}
}
}
| austinvernsonger/clide | Src/Clide/VisualStudio/VsServiceProviderExtensions.cs | C# | bsd-2-clause | 8,017 |
/**
* This is specifically for the builder where the
* dependencies have been resolved and you just want
* to access the component.jsons locally.
*/
var semver = require('semver');
var fs = require('graceful-fs');
var join = require('path').join;
var resolve = require('path').resolve;
var debug = require('debug')('remotes:local');
var Remote = require('../remote')
module.exports = Local
Remote.extend(Local)
function Local(options) {
if (!(this instanceof Local))
return new Local(options)
options = Object.create(options || {});
this.out = resolve(options.out
|| options.dir
|| 'components')
debug('checking local components at %s', this.out);
Remote.call(this, options)
}
Local.prototype.name = 'local';
/**
* Local resolution is a little different than other remotes.
* In particular, if no `ref` is set,
* we check for any version.
*
* @param {String} repo
* @return {this}
* @api public
*/
Local.prototype.resolve = function* (remotes, repo, ref) {
debug('resolving local remote');
if (typeof remotes === 'string') {
ref = repo;
repo = remotes;
} else if (Array.isArray(remotes) && !~remotes.indexOf('local')) {
// if the current remote is not in this list,
// then it's obviously not valid.
return;
}
var folders = yield* this.folders(repo);
// none installed
if (!folders || !folders.length) return;
// no specific version we care about
if (!ref) return this;
// exact tag version
if (~folders.indexOf(ref)) return this;
// check for equal semantic versions
if (semver.maxSatisfying(folders.filter(valid), ref)) return this;
}
/**
* Get the currently downloaded versions of a repo.
*
* @param {String} repo
* @return {Array} folders
* @api public
*/
Local.prototype.folders = function* (repo) {
try {
var frags = repo.toLowerCase().split('/');
// ignore malformed repos for now
if (frags.length !== 2) return;
var folder = join(this.out, frags[0], frags[1]);
debug('checking folder: %s', folder);
var folders = yield readdir(folder);
debug('got folders: %s', folders.join(', '));
return folders.filter(noLeadingDot);
} catch (err) {
if (err.code === 'ENOENT') return;
throw err;
}
}
/**
* Return the currently downloaded components' semantic versions.
*
* @param {String} repo
* @return {Array} references
* @api public
*/
Local.prototype._versions = function* (repo) {
return yield* this.folders(repo);
}
/**
* Return the existing component.json, if any.
* @param {String} repo
* @param {String} reference
* @return {Object} component.json
* @api public
*/
Local.prototype._json = function* (repo, ref) {
var body;
var filename = join(this.out, repo, ref, 'component.json');
try {
body = yield read(filename);
} catch (err) {
if (err.code === 'ENOENT') return;
throw err;
}
try {
return JSON.parse(body);
} catch (_err) {
throw new Error('JSON parsing error with "' + filename + '"');
}
}
/**
* NOT RELEVANT WITH THIS REMOTE
*/
Local.prototype._tree = function* () {
/* jshint noyield:true */
}
function valid(x) {
return semver.valid(x, true);
}
function noLeadingDot(x) {
return x[0] !== '.';
}
function readdir(root) {
return function (done) {
fs.readdir(root, done)
}
}
function read(filename) {
return function (done) {
fs.readFile(filename, 'utf8', done)
}
}
| bmanth60/workflow-test | node_modules/component/node_modules/component-remotes/lib/remotes/local.js | JavaScript | bsd-3-clause | 3,407 |
/* ========================================================================= *
* *
* OpenMesh *
* Copyright (c) 2001-2015, RWTH-Aachen University *
* Department of Computer Graphics and Multimedia *
* All rights reserved. *
* www.openmesh.org *
* *
*---------------------------------------------------------------------------*
* This file is part of OpenMesh. *
*---------------------------------------------------------------------------*
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following conditions *
* are met: *
* *
* 1. Redistributions of source code must retain the above copyright notice, *
* this list of conditions and the following disclaimer. *
* *
* 2. Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in the *
* documentation and/or other materials provided with the distribution. *
* *
* 3. Neither the name of the copyright holder nor the names of its *
* contributors may be used to endorse or promote products derived from *
* this software without specific prior written permission. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, *
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR *
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
* *
* ========================================================================= */
/*===========================================================================*\
* *
* $Revision$ *
* $Date$ *
* *
\*===========================================================================*/
/** \file BaseDecimaterT.hh
*/
//=============================================================================
//
// CLASS McDecimaterT
//
//=============================================================================
#ifndef OPENMESH_BASE_DECIMATER_DECIMATERT_HH
#define OPENMESH_BASE_DECIMATER_DECIMATERT_HH
//== INCLUDES =================================================================
#include <memory>
#include <OpenMesh/Core/Utils/Property.hh>
#include <OpenMesh/Tools/Decimater/ModBaseT.hh>
#include <OpenMesh/Core/Utils/Noncopyable.hh>
#include <OpenMesh/Tools/Decimater/Observer.hh>
//== NAMESPACE ================================================================
namespace OpenMesh {
namespace Decimater {
//== CLASS DEFINITION =========================================================
/** base class decimater framework
\see BaseDecimaterT, \ref decimater_docu
*/
class BaseDecimaterModule
{
};
template < typename MeshT >
class BaseDecimaterT : private Utils::Noncopyable
{
public: //-------------------------------------------------------- public types
typedef BaseDecimaterT< MeshT > Self;
typedef MeshT Mesh;
typedef CollapseInfoT<MeshT> CollapseInfo;
typedef ModBaseT<MeshT> Module;
typedef std::vector< Module* > ModuleList;
typedef typename ModuleList::iterator ModuleListIterator;
public: //------------------------------------------------------ public methods
BaseDecimaterT(Mesh& _mesh);
virtual ~BaseDecimaterT();
/** Initialize decimater and decimating modules.
Return values:
true ok
false No ore more than one non-binary module exist. In that case
the decimater is uninitialized!
*/
bool initialize();
/// Returns whether decimater has been successfully initialized.
bool is_initialized() const { return initialized_; }
/// Print information about modules to _os
void info( std::ostream& _os );
public: //--------------------------------------------------- module management
/** \brief Add observer
*
* You can set an observer which is used as a callback to check the decimators progress and to
* abort it if necessary.
*
* @param _o Observer to be used
*/
void set_observer(Observer* _o)
{
observer_ = _o;
}
/// Get current observer of a decimater
Observer* observer()
{
return observer_;
}
/// access mesh. used in modules.
Mesh& mesh() { return mesh_; }
/// add module to decimater
template < typename _Module >
bool add( ModHandleT<_Module>& _mh )
{
if (_mh.is_valid())
return false;
_mh.init( new _Module(mesh()) );
all_modules_.push_back( _mh.module() );
set_uninitialized();
return true;
}
/// remove module
template < typename _Module >
bool remove( ModHandleT<_Module>& _mh )
{
if (!_mh.is_valid())
return false;
typename ModuleList::iterator it = std::find(all_modules_.begin(),
all_modules_.end(),
_mh.module() );
if ( it == all_modules_.end() ) // module not found
return false;
delete *it;
all_modules_.erase( it ); // finally remove from list
_mh.clear();
set_uninitialized();
return true;
}
/// get module referenced by handle _mh
template < typename Module >
Module& module( ModHandleT<Module>& _mh )
{
assert( _mh.is_valid() );
return *_mh.module();
}
protected:
/// returns false, if abort requested by observer
bool notify_observer(size_t _n_collapses)
{
if (observer() && _n_collapses % observer()->get_interval() == 0)
{
observer()->notify(_n_collapses);
return !observer()->abort();
}
return true;
}
/// Reset the initialized flag, and clear the bmodules_ and cmodule_
void set_uninitialized() {
initialized_ = false;
cmodule_ = 0;
bmodules_.clear();
}
void update_modules(CollapseInfo& _ci)
{
typename ModuleList::iterator m_it, m_end = bmodules_.end();
for (m_it = bmodules_.begin(); m_it != m_end; ++m_it)
(*m_it)->postprocess_collapse(_ci);
cmodule_->postprocess_collapse(_ci);
}
protected: //---------------------------------------------------- private methods
/// Is an edge collapse legal? Performs topological test only.
/// The method evaluates the status bit Locked, Deleted, and Feature.
/// \attention The method temporarily sets the bit Tagged. After usage
/// the bit will be disabled!
bool is_collapse_legal(const CollapseInfo& _ci);
/// Calculate priority of an halfedge collapse (using the modules)
float collapse_priority(const CollapseInfo& _ci);
/// Pre-process a collapse
void preprocess_collapse(CollapseInfo& _ci);
/// Post-process a collapse
void postprocess_collapse(CollapseInfo& _ci);
/**
* This provides a function that allows the setting of a percentage
* of the original constraint of the modules
*
* Note that some modules might re-initialize in their
* set_error_tolerance_factor function as necessary
* @param _factor has to be in the closed interval between 0.0 and 1.0
*/
void set_error_tolerance_factor(double _factor);
/** Reset the status of this class
*
* You have to call initialize again!!
*/
void reset(){ initialized_ = false; };
private: //------------------------------------------------------- private data
/// reference to mesh
Mesh& mesh_;
/// list of binary modules
ModuleList bmodules_;
/// the current priority module
Module* cmodule_;
/// list of all allocated modules (including cmodule_ and all of bmodules_)
ModuleList all_modules_;
/// Flag if all modules were initialized
bool initialized_;
/// observer
Observer* observer_;
};
//=============================================================================
} // END_NS_DECIMATER
} // END_NS_OPENMESH
//=============================================================================
#if defined(OM_INCLUDE_TEMPLATES) && !defined(OPENMESH_BASE_DECIMATER_DECIMATERT_CC)
#define OPENMESH_BASE_DECIMATER_TEMPLATES
#include "BaseDecimaterT.cc"
#endif
//=============================================================================
#endif // OPENMESH_BASE_DECIMATER_DECIMATERT_HH defined
//=============================================================================
| svn2github/OpenMesh2 | src/OpenMesh/Tools/Decimater/BaseDecimaterT.hh | C++ | bsd-3-clause | 10,044 |
namespace Org.BouncyCastle.Crypto.Tls
{
/// <summary>
/// RFC 2246 6.1
/// </summary>
public enum CompressionMethod : byte
{
NULL = 0,
/*
* RFC 3749 2
*/
DEFLATE = 1
/*
* Values from 224 decimal (0xE0) through 255 decimal (0xFF)
* inclusive are reserved for private use.
*/
}
}
| GaloisInc/hacrypto | src/C#/BouncyCastle/BouncyCastle-1.7/crypto/src/crypto/tls/CompressionMethod.cs | C# | bsd-3-clause | 332 |
None
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import os
# simple json is a python 2.5 library you need to install
import json
# json comes bundled with python 2.6. use one or the other
#import json
def run():
print "starting"
from receiver.models import Submission
from xformmanager.models import FormDefModel
# this part of the script walks through all the registered
# form definitions and bundles them with the original xsd
# schema for resubmission
domain = None
# you can manually set a single domain here. if you don't then
# all the data will be exported.
domain = "Grameen"
if domain:
all_schemas = FormDefModel.objects.filter(domain__name__iexact=domain)
else:
all_schemas = FormDefModel.objects.all()
for schema in all_schemas:
print "processsing %s" % schema
file_loc = schema.xsd_file_location
print "xsd file: %s" % file_loc
if file_loc:
headers = {
"original-submit-time" : str(schema.submit_time),
"original-submit-ip" : str(schema.submit_ip),
"bytes-received" : schema.bytes_received,
"form-name" : schema.form_name,
"form-display-name" : schema.form_display_name,
"target-namespace" : schema.target_namespace,
"date-created" : str(schema.date_created),
"domain" : str(schema.get_domain)
}
dir, filename = os.path.split(file_loc)
new_dir = os.path.join(dir, "export")
if not os.path.exists(new_dir):
os.makedirs(new_dir)
write_file = os.path.join(new_dir, filename.replace(".xml", ".xsdexport"))
fout = open(write_file, 'w')
jsoned = json.dumps(headers)
print jsoned
fout.write(jsoned)
fout.write("\n\n")
xsd_file = open(file_loc, "r")
payload = xsd_file.read()
xsd_file.close()
fout.write(payload)
fout.close()
# this part of the script walks through all the submissions
# and bundles them in an exportable format with the original
# submitting IP and time, as well as a reference to the
# original post
#all_submissions = Submission.objects.all()
if domain:
all_submissions = Submission.objects.filter(domain__name__iexact=domain)
else:
all_submissions = Submission.objects.all()
for submission in all_submissions:
#print "processing %s (%s)" % (submission,submission.raw_post)
post_file = open(submission.raw_post, "r")
submit_time = str(submission.submit_time)
# first line is content type
content_type = post_file.readline().split(":")[1].strip()
# second line is content length
content_length = post_file.readline().split(":")[1].strip()
# third line is empty
post_file.readline()
# the rest is the actual body of the post
headers = { "content-type" : content_type,
"content-length" : content_length,
"time-received" : str(submission.submit_time),
"original-ip" : str(submission.submit_ip),
"domain" : submission.domain.name
}
# check the directory and create it if it doesn't exist
dir, filename = os.path.split(submission.raw_post)
new_dir = os.path.join(dir, "export")
if not os.path.exists(new_dir):
os.makedirs(new_dir)
# the format will be:
# {headers} (dict)
# (empty line)
# <body>
write_file = os.path.join(new_dir, filename.replace("postdata", "postexport"))
fout = open(write_file, 'w')
jsoned = json.dumps(headers)
fout.write(jsoned)
fout.write("\n\n")
try:
payload = post_file.read()
fout.write(payload)
except Exception:
print "error processing %s" % write_file
fout.close()
print "done"
| commtrack/temp-aquatest | utilities/data_migration/data_export_script_new.py | Python | bsd-3-clause | 4,187 |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromecast/base/metrics/cast_metrics_test_helper.h"
#include "base/logging.h"
#include "base/macros.h"
#include "chromecast/base/metrics/cast_metrics_helper.h"
namespace chromecast {
namespace metrics {
namespace {
class CastMetricsHelperStub : public CastMetricsHelper {
public:
CastMetricsHelperStub();
~CastMetricsHelperStub() override;
void UpdateCurrentAppInfo(const std::string& app_id,
const std::string& session_id) override;
void UpdateSDKInfo(const std::string& sdk_version) override;
void LogMediaPlay() override;
void LogMediaPause() override;
void LogTimeToFirstAudio() override;
void LogTimeToBufferAv(BufferingType buffering_type,
base::TimeDelta time) override;
std::string GetMetricsNameWithAppName(
const std::string& prefix, const std::string& suffix) const override;
void SetMetricsSink(MetricsSink* delegate) override;
void RecordSimpleAction(const std::string& action) override;
private:
DISALLOW_COPY_AND_ASSIGN(CastMetricsHelperStub);
};
bool stub_instance_exists = false;
CastMetricsHelperStub::CastMetricsHelperStub()
: CastMetricsHelper() {
DCHECK(!stub_instance_exists);
stub_instance_exists = true;
}
CastMetricsHelperStub::~CastMetricsHelperStub() {
DCHECK(stub_instance_exists);
stub_instance_exists = false;
}
void CastMetricsHelperStub::UpdateCurrentAppInfo(
const std::string& app_id,
const std::string& session_id) {
}
void CastMetricsHelperStub::UpdateSDKInfo(const std::string& sdk_version) {
}
void CastMetricsHelperStub::LogMediaPlay() {
}
void CastMetricsHelperStub::LogMediaPause() {
}
void CastMetricsHelperStub::LogTimeToFirstAudio() {
}
void CastMetricsHelperStub::LogTimeToBufferAv(BufferingType buffering_type,
base::TimeDelta time) {
}
std::string CastMetricsHelperStub::GetMetricsNameWithAppName(
const std::string& prefix,
const std::string& suffix) const {
return "";
}
void CastMetricsHelperStub::SetMetricsSink(MetricsSink* delegate) {
}
} // namespace
void CastMetricsHelperStub::RecordSimpleAction(const std::string& action) {
}
void InitializeMetricsHelperForTesting() {
if (!stub_instance_exists) {
new CastMetricsHelperStub();
}
}
} // namespace metrics
} // namespace chromecast
| axinging/chromium-crosswalk | chromecast/base/metrics/cast_metrics_test_helper.cc | C++ | bsd-3-clause | 2,506 |
#include <xpcc/architecture/platform.hpp>
#include <xpcc/debug/logger.hpp>
// ----------------------------------------------------------------------------
// Set the log level
#undef XPCC_LOG_LEVEL
#define XPCC_LOG_LEVEL xpcc::log::INFO
typedef GpioInputC0 Adc1In;
typedef GpioInputC2 Adc2In;
typedef GpioInputB13 Adc3In;
typedef GpioInputB12 Adc4In;
xpcc::IODeviceWrapper< Usart2, xpcc::IOBuffer::BlockIfFull > loggerDevice;
xpcc::log::Logger xpcc::log::info(loggerDevice);
static void
printAdc()
{
const float maxVoltage = 3.3;
float voltage = 0.0;
int adcValue = 0;
adcValue = Adc1::getValue();
XPCC_LOG_INFO << "Adc1: value=" << adcValue;
voltage = adcValue * maxVoltage / 0xfff;
XPCC_LOG_INFO << "; voltage=" << voltage << xpcc::endl;
/*
adcValue = Adc2::getValue();
XPCC_LOG_INFO << "Adc2: value=" << adcValue;
voltage = adcValue * maxVoltage / 0xfff;
XPCC_LOG_INFO << "; voltage=" << voltage << xpcc::endl;
adcValue = Adc3::getValue();
XPCC_LOG_INFO << "Adc3: value=" << adcValue;
voltage = adcValue * maxVoltage / 0xfff;
XPCC_LOG_INFO << "; voltage=" << voltage << xpcc::endl;
adcValue = Adc4::getValue();
XPCC_LOG_INFO << "Adc4: value=" << adcValue;
voltage = adcValue * maxVoltage / 0xfff;
XPCC_LOG_INFO << "; voltage=" << voltage << xpcc::endl;
*/
}
// ----------------------------------------------------------------------------
int
main()
{
Board::initialize();
// initialize Uart2 for XPCC_LOG_INFO
GpioOutputA2::connect(Usart2::Tx);
GpioInputA3::connect(Usart2::Rx, Gpio::InputType::PullUp);
Usart2::initialize<Board::systemClock, 115200>(12);
// initialize Adc
Adc1::initialize(Adc1::ClockMode::Asynchronous, Adc1::Prescaler::Div128,
Adc1::CalibrationMode::SingleEndedInputsMode, true);
Adc1::setFreeRunningMode(true);
Adc1In::connect(Adc1::Channel6);
Adc1::setChannel(Adc1In::Adc1Channel, Adc1::SampleTime::Cycles2);
Adc1::startConversion();
Adc2::initialize(Adc2::ClockMode::Asynchronous, Adc2::Prescaler::Div128,
Adc2::CalibrationMode::SingleEndedInputsMode, true);
Adc2::setFreeRunningMode(true);
Adc2In::connect(Adc2::Channel8);
Adc2::setChannel(Adc2In::Adc2Channel, Adc2::SampleTime::Cycles2);
Adc2::startConversion();
Adc3::initialize(Adc3::ClockMode::Asynchronous, Adc3::Prescaler::Div128,
Adc3::CalibrationMode::SingleEndedInputsMode, true);
Adc3::setFreeRunningMode(true);
Adc3In::connect(Adc3::Channel5);
Adc3::setChannel(Adc3In::Adc3Channel, Adc3::SampleTime::Cycles2);
Adc3::startConversion();
Adc4::initialize(Adc4::ClockMode::Asynchronous, Adc4::Prescaler::Div128,
Adc4::CalibrationMode::SingleEndedInputsMode, true);
Adc4::setFreeRunningMode(true);
Adc4In::connect(Adc4::Channel3);
Adc4::setChannel(Adc4In::Adc4Channel, Adc4::SampleTime::Cycles2);
Adc4::startConversion();
while (1)
{
xpcc::delayMilliseconds(200);
printAdc();
}
return 0;
}
| dergraaf/xpcc | examples/stm32f3_discovery/adc/continous/main.cpp | C++ | bsd-3-clause | 2,861 |
///
/// \file
///
/// This file is a part of pattern matching testing suite.
///
/// \autor Yuriy Solodkyy <yuriy.solodkyy@gmail.com>
///
/// This file is a part of the XTL framework (http://parasol.tamu.edu/xtl/).
/// Copyright (C) 2005-2012 Texas A&M University.
/// All rights reserved.
///
#include "testshape.hpp"
#include "config.hpp"
#include "ptrtools.hpp"
//------------------------------------------------------------------------------
#if !XTL_USE_MEMOIZED_CAST
#define dynamic_cast constant_time_dynamic_cast
#endif
//------------------------------------------------------------------------------
static size_t fdc_id(size_t n);
//------------------------------------------------------------------------------
template <size_t N>
struct shape_kind : shape_kind<N/2>
{
typedef shape_kind<N/2> base_class;
shape_kind(size_t n = N) : base_class(n) {}
void accept(ShapeVisitor&) const;
};
template <>
struct shape_kind<0> : OtherBase, Shape
{
typedef Shape base_class;
shape_kind(size_t n = 0) : base_class(n,fdc_id(n)) {}
void accept(ShapeVisitor&) const;
};
//------------------------------------------------------------------------------
struct ShapeVisitor
{
virtual void visit(const shape_kind<0>&) {}
#define FOR_EACH_MAX NUMBER_OF_DERIVED-2
#define FOR_EACH_N(N) virtual void visit(const shape_kind<N+1>& s) { visit(static_cast<const shape_kind<N+1>::base_class&>(s)); }
#include "loop_over_numbers.hpp"
#undef FOR_EACH_N
#undef FOR_EACH_MAX
};
//------------------------------------------------------------------------------
template <size_t N> void shape_kind<N>::accept(ShapeVisitor& v) const { v.visit(*this); }
void shape_kind<0>::accept(ShapeVisitor& v) const { v.visit(*this); }
//------------------------------------------------------------------------------
enum { fdc_size = 10 };
/// Primes numbers for each level of the binary hierarchy
const size_t constant_time_dynamic_cast_primes[fdc_size][2] =
{
{ 2, 2}, // Because the root is 2
{ 3, 5},
{ 7,11},
{13,17},
{19,23},
{29,31},
{37,41},
{43,47},
{53,59},
{61,67}
};
//------------------------------------------------------------------------------
static size_t fdc_id(size_t n)
{
XTL_ASSERT(req_bits(n) < fdc_size);
size_t id = 1;
if (n)
for (size_t m = req_bits(n), i = m; i; --i)
id *= constant_time_dynamic_cast_primes[m-i][(n & (1 << (i-1))) != 0];
//std::cout << n << "->" << id << std::endl;
return id;
}
//------------------------------------------------------------------------------
inline size_t id(size_t n) { return fdc_id(n); }
const size_t shape_ids[100] =
{
id( 0), id( 1), id( 2), id( 3), id( 4), id( 5), id( 6), id( 7), id( 8), id( 9),
id(10), id(11), id(12), id(13), id(14), id(15), id(16), id(17), id(18), id(19),
id(20), id(21), id(22), id(23), id(24), id(25), id(26), id(27), id(28), id(29),
id(30), id(31), id(32), id(33), id(34), id(35), id(36), id(37), id(38), id(39),
id(40), id(41), id(42), id(43), id(44), id(45), id(46), id(47), id(48), id(49),
id(50), id(51), id(52), id(53), id(54), id(55), id(56), id(57), id(58), id(59),
id(60), id(61), id(62), id(63), id(64), id(65), id(66), id(67), id(68), id(69),
id(70), id(71), id(72), id(73), id(74), id(75), id(76), id(77), id(78), id(79),
id(80), id(81), id(82), id(83), id(84), id(85), id(86), id(87), id(88), id(89),
id(90), id(91), id(92), id(93), id(94), id(95), id(96), id(97), id(98), id(99),
};
//------------------------------------------------------------------------------
template <size_t N>
inline const shape_kind<N>* constant_time_dynamic_cast_ex(const shape_kind<N>*, const Shape* u)
{
return u->m_fdc_id % shape_ids[N] == 0
? static_cast<const shape_kind<N>*>(u)
: 0;
}
template <typename T>
inline T constant_time_dynamic_cast(const Shape* u)
{
return constant_time_dynamic_cast_ex(static_cast<T>(0), u);
}
//------------------------------------------------------------------------------
XTL_TIMED_FUNC_BEGIN
size_t do_match(const Shape& s, size_t)
{
if (const shape_kind< 0>* p0 = dynamic_cast<const shape_kind< 0>*>(&s))
{
if (const shape_kind< 1>* p1 = dynamic_cast<const shape_kind< 1>*>(p0))
if (const shape_kind< 2>* p2 = dynamic_cast<const shape_kind< 2>*>(p1))
if (const shape_kind< 4>* p4 = dynamic_cast<const shape_kind< 4>*>(p2))
if (const shape_kind< 8>* p8 = dynamic_cast<const shape_kind< 8>*>(p4))
if (const shape_kind<16>* p16 = dynamic_cast<const shape_kind<16>*>(p8))
if (const shape_kind<32>* p32 = dynamic_cast<const shape_kind<32>*>(p16))
if (const shape_kind<64>* p64 = dynamic_cast<const shape_kind<64>*>(p32))
return p64->m_member7 + 64 ;
else
if (const shape_kind<65>* p65 = dynamic_cast<const shape_kind<65>*>(p32))
return p65->m_member7 + 65 ;
else
return p32->m_member7 + 32 ;
else
if (const shape_kind<33>* p33 = dynamic_cast<const shape_kind<33>*>(p16))
if (const shape_kind<66>* p66 = dynamic_cast<const shape_kind<66>*>(p33))
return p66->m_member7 + 66 ;
else
if (const shape_kind<67>* p67 = dynamic_cast<const shape_kind<67>*>(p33))
return p67->m_member7 + 67 ;
else
return p33->m_member7 + 33 ;
else
return p16->m_member7 + 16 ;
else
if (const shape_kind<17>* p17 = dynamic_cast<const shape_kind<17>*>(p8))
if (const shape_kind<34>* p34 = dynamic_cast<const shape_kind<34>*>(p17))
if (const shape_kind<68>* p68 = dynamic_cast<const shape_kind<68>*>(p34))
return p68->m_member7 + 68 ;
else
if (const shape_kind<69>* p69 = dynamic_cast<const shape_kind<69>*>(p34))
return p69->m_member7 + 69 ;
else
return p34->m_member7 + 34 ;
else
if (const shape_kind<35>* p35 = dynamic_cast<const shape_kind<35>*>(p17))
if (const shape_kind<70>* p70 = dynamic_cast<const shape_kind<70>*>(p35))
return p70->m_member7 + 70 ;
else
if (const shape_kind<71>* p71 = dynamic_cast<const shape_kind<71>*>(p35))
return p71->m_member7 + 71 ;
else
return p35->m_member7 + 35 ;
else
return p17->m_member7 + 17 ;
else
return p8->m_member7 + 8 ;
else
if (const shape_kind< 9>* p9 = dynamic_cast<const shape_kind< 9>*>(p4))
if (const shape_kind<18>* p18 = dynamic_cast<const shape_kind<18>*>(p9))
if (const shape_kind<36>* p36 = dynamic_cast<const shape_kind<36>*>(p18))
if (const shape_kind<72>* p72 = dynamic_cast<const shape_kind<72>*>(p36))
return p72->m_member7 + 72 ;
else
if (const shape_kind<73>* p73 = dynamic_cast<const shape_kind<73>*>(p36))
return p73->m_member7 + 73 ;
else
return p36->m_member7 + 36 ;
else
if (const shape_kind<37>* p37 = dynamic_cast<const shape_kind<37>*>(p18))
if (const shape_kind<74>* p74 = dynamic_cast<const shape_kind<74>*>(p37))
return p74->m_member7 + 74 ;
else
if (const shape_kind<75>* p75 = dynamic_cast<const shape_kind<75>*>(p37))
return p75->m_member7 + 75 ;
else
return p37->m_member7 + 37 ;
else
return p18->m_member7 + 18 ;
else
if (const shape_kind<19>* p19 = dynamic_cast<const shape_kind<19>*>(p9))
if (const shape_kind<38>* p38 = dynamic_cast<const shape_kind<38>*>(p19))
if (const shape_kind<76>* p76 = dynamic_cast<const shape_kind<76>*>(p38))
return p76->m_member7 + 76 ;
else
if (const shape_kind<77>* p77 = dynamic_cast<const shape_kind<77>*>(p38))
return p77->m_member7 + 77 ;
else
return p38->m_member7 + 38 ;
else
if (const shape_kind<39>* p39 = dynamic_cast<const shape_kind<39>*>(p19))
if (const shape_kind<78>* p78 = dynamic_cast<const shape_kind<78>*>(p39))
return p78->m_member7 + 78 ;
else
if (const shape_kind<79>* p79 = dynamic_cast<const shape_kind<79>*>(p39))
return p79->m_member7 + 79 ;
else
return p39->m_member7 + 39 ;
else
return p19->m_member7 + 19 ;
else
return p9->m_member7 + 9 ;
else
return p4->m_member7 + 4 ;
else
if (const shape_kind< 5>* p5 = dynamic_cast<const shape_kind< 5>*>(p2))
if (const shape_kind<10>* p10 = dynamic_cast<const shape_kind<10>*>(p5))
if (const shape_kind<20>* p20 = dynamic_cast<const shape_kind<20>*>(p10))
if (const shape_kind<40>* p40 = dynamic_cast<const shape_kind<40>*>(p20))
if (const shape_kind<80>* p80 = dynamic_cast<const shape_kind<80>*>(p40))
return p80->m_member7 + 80 ;
else
if (const shape_kind<81>* p81 = dynamic_cast<const shape_kind<81>*>(p40))
return p81->m_member7 + 81 ;
else
return p40->m_member7 + 40 ;
else
if (const shape_kind<41>* p41 = dynamic_cast<const shape_kind<41>*>(p20))
if (const shape_kind<82>* p82 = dynamic_cast<const shape_kind<82>*>(p41))
return p82->m_member7 + 82 ;
else
if (const shape_kind<83>* p83 = dynamic_cast<const shape_kind<83>*>(p41))
return p83->m_member7 + 83 ;
else
return p41->m_member7 + 41 ;
else
return p20->m_member7 + 20 ;
else
if (const shape_kind<21>* p21 = dynamic_cast<const shape_kind<21>*>(p10))
if (const shape_kind<42>* p42 = dynamic_cast<const shape_kind<42>*>(p21))
if (const shape_kind<84>* p84 = dynamic_cast<const shape_kind<84>*>(p42))
return p84->m_member7 + 84 ;
else
if (const shape_kind<85>* p85 = dynamic_cast<const shape_kind<85>*>(p42))
return p85->m_member7 + 85 ;
else
return p42->m_member7 + 42 ;
else
if (const shape_kind<43>* p43 = dynamic_cast<const shape_kind<43>*>(p21))
if (const shape_kind<86>* p86 = dynamic_cast<const shape_kind<86>*>(p43))
return p86->m_member7 + 86 ;
else
if (const shape_kind<87>* p87 = dynamic_cast<const shape_kind<87>*>(p43))
return p87->m_member7 + 87 ;
else
return p43->m_member7 + 43 ;
else
return p21->m_member7 + 21 ;
else
return p10->m_member7 + 10 ;
else
if (const shape_kind<11>* p11 = dynamic_cast<const shape_kind<11>*>(p5))
if (const shape_kind<22>* p22 = dynamic_cast<const shape_kind<22>*>(p11))
if (const shape_kind<44>* p44 = dynamic_cast<const shape_kind<44>*>(p22))
if (const shape_kind<88>* p88 = dynamic_cast<const shape_kind<88>*>(p44))
return p88->m_member7 + 88 ;
else
if (const shape_kind<89>* p89 = dynamic_cast<const shape_kind<89>*>(p44))
return p89->m_member7 + 89 ;
else
return p44->m_member7 + 44 ;
else
if (const shape_kind<45>* p45 = dynamic_cast<const shape_kind<45>*>(p22))
if (const shape_kind<90>* p90 = dynamic_cast<const shape_kind<90>*>(p45))
return p90->m_member7 + 90 ;
else
if (const shape_kind<91>* p91 = dynamic_cast<const shape_kind<91>*>(p45))
return p91->m_member7 + 91 ;
else
return p45->m_member7 + 45 ;
else
return p22->m_member7 + 22 ;
else
if (const shape_kind<23>* p23 = dynamic_cast<const shape_kind<23>*>(p11))
if (const shape_kind<46>* p46 = dynamic_cast<const shape_kind<46>*>(p23))
if (const shape_kind<92>* p92 = dynamic_cast<const shape_kind<92>*>(p46))
return p92->m_member7 + 92 ;
else
if (const shape_kind<93>* p93 = dynamic_cast<const shape_kind<93>*>(p46))
return p93->m_member7 + 93 ;
else
return p46->m_member7 + 46 ;
else
if (const shape_kind<47>* p47 = dynamic_cast<const shape_kind<47>*>(p23))
if (const shape_kind<94>* p94 = dynamic_cast<const shape_kind<94>*>(p47))
return p94->m_member7 + 94 ;
else
if (const shape_kind<95>* p95 = dynamic_cast<const shape_kind<95>*>(p47))
return p95->m_member7 + 95 ;
else
return p47->m_member7 + 47 ;
else
return p23->m_member7 + 23 ;
else
return p11->m_member7 + 11 ;
else
return p5->m_member7 + 5 ;
else
return p2->m_member7 + 2 ;
else
if (const shape_kind< 3>* p3 = dynamic_cast<const shape_kind< 3>*>(p1))
if (const shape_kind< 6>* p6 = dynamic_cast<const shape_kind< 6>*>(p3))
if (const shape_kind<12>* p12 = dynamic_cast<const shape_kind<12>*>(p6))
if (const shape_kind<24>* p24 = dynamic_cast<const shape_kind<24>*>(p12))
if (const shape_kind<48>* p48 = dynamic_cast<const shape_kind<48>*>(p24))
if (const shape_kind<96>* p96 = dynamic_cast<const shape_kind<96>*>(p48))
return p96->m_member7 + 96 ;
else
if (const shape_kind<97>* p97 = dynamic_cast<const shape_kind<97>*>(p48))
return p97->m_member7 + 97 ;
else
return p48->m_member7 + 48 ;
else
if (const shape_kind<49>* p49 = dynamic_cast<const shape_kind<49>*>(p24))
if (const shape_kind<98>* p98 = dynamic_cast<const shape_kind<98>*>(p49))
return p98->m_member7 + 98 ;
else
if (const shape_kind<99>* p99 = dynamic_cast<const shape_kind<99>*>(p49))
return p99->m_member7 + 99 ;
else
return p49->m_member7 + 49 ;
else
return p24->m_member7 + 24 ;
else
if (const shape_kind<25>* p25 = dynamic_cast<const shape_kind<25>*>(p12))
if (const shape_kind<50>* p50 = dynamic_cast<const shape_kind<50>*>(p25))
return p50->m_member7 + 50 ;
else
if (const shape_kind<51>* p51 = dynamic_cast<const shape_kind<51>*>(p25))
return p51->m_member7 + 51 ;
else
return p25->m_member7 + 25 ;
else
return p12->m_member7 + 12 ;
else
if (const shape_kind<13>* p13 = dynamic_cast<const shape_kind<13>*>(p6))
if (const shape_kind<26>* p26 = dynamic_cast<const shape_kind<26>*>(p13))
if (const shape_kind<52>* p52 = dynamic_cast<const shape_kind<52>*>(p26))
return p52->m_member7 + 52 ;
else
if (const shape_kind<53>* p53 = dynamic_cast<const shape_kind<53>*>(p26))
return p53->m_member7 + 53 ;
else
return p26->m_member7 + 26 ;
else
if (const shape_kind<27>* p27 = dynamic_cast<const shape_kind<27>*>(p13))
if (const shape_kind<54>* p54 = dynamic_cast<const shape_kind<54>*>(p27))
return p54->m_member7 + 54 ;
else
if (const shape_kind<55>* p55 = dynamic_cast<const shape_kind<55>*>(p27))
return p55->m_member7 + 55 ;
else
return p27->m_member7 + 27 ;
else
return p13->m_member7 + 13 ;
else
return p6->m_member7 + 6 ;
else
if (const shape_kind< 7>* p7 = dynamic_cast<const shape_kind< 7>*>(p3))
if (const shape_kind<14>* p14 = dynamic_cast<const shape_kind<14>*>(p7))
if (const shape_kind<28>* p28 = dynamic_cast<const shape_kind<28>*>(p14))
if (const shape_kind<56>* p56 = dynamic_cast<const shape_kind<56>*>(p28))
return p56->m_member7 + 56 ;
else
if (const shape_kind<57>* p57 = dynamic_cast<const shape_kind<57>*>(p28))
return p57->m_member7 + 57 ;
else
return p28->m_member7 + 28 ;
else
if (const shape_kind<29>* p29 = dynamic_cast<const shape_kind<29>*>(p14))
if (const shape_kind<58>* p58 = dynamic_cast<const shape_kind<58>*>(p29))
return p58->m_member7 + 58 ;
else
if (const shape_kind<59>* p59 = dynamic_cast<const shape_kind<59>*>(p29))
return p59->m_member7 + 59 ;
else
return p29->m_member7 + 29 ;
else
return p14->m_member7 + 14 ;
else
if (const shape_kind<15>* p15 = dynamic_cast<const shape_kind<15>*>(p7))
if (const shape_kind<30>* p30 = dynamic_cast<const shape_kind<30>*>(p15))
if (const shape_kind<60>* p60 = dynamic_cast<const shape_kind<60>*>(p30))
return p60->m_member7 + 60 ;
else
if (const shape_kind<61>* p61 = dynamic_cast<const shape_kind<61>*>(p30))
return p61->m_member7 + 61 ;
else
return p30->m_member7 + 30 ;
else
if (const shape_kind<31>* p31 = dynamic_cast<const shape_kind<31>*>(p15))
if (const shape_kind<62>* p62 = dynamic_cast<const shape_kind<62>*>(p31))
return p62->m_member7 + 62 ;
else
if (const shape_kind<63>* p63 = dynamic_cast<const shape_kind<63>*>(p31))
return p63->m_member7 + 63 ;
else
return p31->m_member7 + 31 ;
else
return p15->m_member7 + 15 ;
else
return p7->m_member7 + 7 ;
else
return p3->m_member7 + 3 ;
else
return p1->m_member7 + 1 ;
else
return p0->m_member7 + 0 ;
}
return invalid;
}
XTL_TIMED_FUNC_END
//------------------------------------------------------------------------------
XTL_TIMED_FUNC_BEGIN
size_t do_visit(const Shape& s, size_t)
{
struct Visitor : ShapeVisitor
{
#define FOR_EACH_MAX NUMBER_OF_DERIVED-1
#define FOR_EACH_N(N) virtual void visit(const shape_kind<N>& s) { result = s.m_member7 + N; }
#include "loop_over_numbers.hpp"
#undef FOR_EACH_N
#undef FOR_EACH_MAX
size_t result;
};
Visitor v;
v.result = invalid;
s.accept(v);
return v.result;
}
XTL_TIMED_FUNC_END
//------------------------------------------------------------------------------
Shape* make_shape(size_t i)
{
switch (i % NUMBER_OF_DERIVED)
{
#define FOR_EACH_MAX NUMBER_OF_DERIVED-1
#define FOR_EACH_N(N) case N: return new shape_kind<N>;
#include "loop_over_numbers.hpp"
#undef FOR_EACH_N
#undef FOR_EACH_MAX
}
return 0;
}
//------------------------------------------------------------------------------
#include "testvismat.hpp" // Utilities for timing tests
//------------------------------------------------------------------------------
int main()
{
verdict pp = test_repetitive();
verdict ps = test_sequential();
verdict pr = test_randomized();
std::cout << "OVERALL: "
<< "Repetitive: " << pp << "; "
<< "Sequential: " << ps << "; "
<< "Random: " << pr
<< std::endl;
}
//------------------------------------------------------------------------------
| vscharf/Mach7 | code/typeswitch/2012-06-11/synthetic_dynamic_cast_fast.cpp | C++ | bsd-3-clause | 25,748 |
"""
Control global computation context
"""
from collections import defaultdict
_globals = defaultdict(lambda: None)
_globals['callbacks'] = set()
class set_options(object):
""" Set global state within controled context
This lets you specify various global settings in a tightly controlled with
block
Valid keyword arguments currently include:
get - the scheduler to use
pool - a thread or process pool
cache - Cache to use for intermediate results
func_loads/func_dumps - loads/dumps functions for serialization of data
likely to contain functions. Defaults to dill.loads/dill.dumps
rerun_exceptions_locally - rerun failed tasks in master process
Example
-------
>>> with set_options(get=dask.get): # doctest: +SKIP
... x = np.array(x) # uses dask.get internally
"""
def __init__(self, **kwargs):
self.old = _globals.copy()
_globals.update(kwargs)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
_globals.clear()
_globals.update(self.old)
| wiso/dask | dask/context.py | Python | bsd-3-clause | 1,121 |
package uatparse
import (
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"strconv"
"strings"
)
const (
UPLINK_BLOCK_DATA_BITS = 576
UPLINK_BLOCK_BITS = (UPLINK_BLOCK_DATA_BITS + 160)
UPLINK_BLOCK_DATA_BYTES = (UPLINK_BLOCK_DATA_BITS / 8)
UPLINK_BLOCK_BYTES = (UPLINK_BLOCK_BITS / 8)
UPLINK_FRAME_BLOCKS = 6
UPLINK_FRAME_DATA_BITS = (UPLINK_FRAME_BLOCKS * UPLINK_BLOCK_DATA_BITS)
UPLINK_FRAME_BITS = (UPLINK_FRAME_BLOCKS * UPLINK_BLOCK_BITS)
UPLINK_FRAME_DATA_BYTES = (UPLINK_FRAME_DATA_BITS / 8)
UPLINK_FRAME_BYTES = (UPLINK_FRAME_BITS / 8)
// assume 6 byte frames: 2 header bytes, 4 byte payload
// (TIS-B heartbeat with one address, or empty FIS-B APDU)
UPLINK_MAX_INFO_FRAMES = (424 / 6)
dlac_alpha = "\x03ABCDEFGHIJKLMNOPQRSTUVWXYZ\x1A\t\x1E\n| !\"#$%&'()*+,-./0123456789:;<=>?"
)
type UATFrame struct {
Raw_data []byte
FISB_data []byte
FISB_month uint32
FISB_day uint32
FISB_hours uint32
FISB_minutes uint32
FISB_seconds uint32
FISB_length uint32
frame_length uint32
Frame_type uint32
Product_id uint32
// Text data, if applicable.
Text_data []string
// Flags.
a_f bool
g_f bool
p_f bool
s_f bool //TODO: Segmentation.
// For AIRMET/NOTAM.
//FIXME: Temporary.
Points []GeoPoint
ReportNumber uint16
ReportYear uint16
LocationIdentifier string
RecordFormat uint8
ReportStart string
ReportEnd string
}
type UATMsg struct {
// Metadata from demodulation.
RS_Err int
SignalStrength int
msg []byte
decoded bool
// Station location for uplink frames, aircraft position for downlink frames.
Lat float64
Lon float64
Frames []*UATFrame
}
func dlac_decode(data []byte, data_len uint32) string {
step := 0
tab := false
ret := ""
for i := uint32(0); i < data_len; i++ {
var ch uint32
switch step {
case 0:
ch = uint32(data[i+0]) >> 2
case 1:
ch = ((uint32(data[i-1]) & 0x03) << 4) | (uint32(data[i+0]) >> 4)
case 2:
ch = ((uint32(data[i-1]) & 0x0f) << 2) | (uint32(data[i+0]) >> 6)
i = i - 1
case 3:
ch = uint32(data[i+0]) & 0x3f
}
if tab {
for ch > 0 {
ret += " "
ch--
}
tab = false
} else if ch == 28 { // tab
tab = true
} else {
ret += string(dlac_alpha[ch])
}
step = (step + 1) % 4
}
return ret
}
// Decodes the time format and aligns 'FISB_data' accordingly.
//TODO: Make a new "FISB Time" structure that also encodes the type of timestamp received.
//TODO: pass up error.
func (f *UATFrame) decodeTimeFormat() {
if len(f.Raw_data) < 3 {
return // Can't determine time format.
}
t_opt := ((uint32(f.Raw_data[1]) & 0x01) << 1) | (uint32(f.Raw_data[2]) >> 7)
var fisb_data []byte
switch t_opt {
case 0: // Hours, Minutes.
if f.frame_length < 4 {
return
}
f.FISB_hours = (uint32(f.Raw_data[2]) & 0x7c) >> 2
f.FISB_minutes = ((uint32(f.Raw_data[2]) & 0x03) << 4) | (uint32(f.Raw_data[3]) >> 4)
f.FISB_length = f.frame_length - 4
fisb_data = f.Raw_data[4:]
case 1: // Hours, Minutes, Seconds.
if f.frame_length < 5 {
return
}
f.FISB_hours = (uint32(f.Raw_data[2]) & 0x7c) >> 2
f.FISB_minutes = ((uint32(f.Raw_data[2]) & 0x03) << 4) | (uint32(f.Raw_data[3]) >> 4)
f.FISB_seconds = ((uint32(f.Raw_data[3]) & 0x0f) << 2) | (uint32(f.Raw_data[4]) >> 6)
f.FISB_length = f.frame_length - 5
fisb_data = f.Raw_data[5:]
case 2: // Month, Day, Hours, Minutes.
if f.frame_length < 5 {
return
}
f.FISB_month = (uint32(f.Raw_data[2]) & 0x78) >> 3
f.FISB_day = ((uint32(f.Raw_data[2]) & 0x07) << 2) | (uint32(f.Raw_data[3]) >> 6)
f.FISB_hours = (uint32(f.Raw_data[3]) & 0x3e) >> 1
f.FISB_minutes = ((uint32(f.Raw_data[3]) & 0x01) << 5) | (uint32(f.Raw_data[4]) >> 3)
f.FISB_length = f.frame_length - 5
fisb_data = f.Raw_data[5:]
case 3: // Month, Day, Hours, Minutes, Seconds.
if f.frame_length < 6 {
return
}
f.FISB_month = (uint32(f.Raw_data[2]) & 0x78) >> 3
f.FISB_day = ((uint32(f.Raw_data[2]) & 0x07) << 2) | (uint32(f.Raw_data[3]) >> 6)
f.FISB_hours = (uint32(f.Raw_data[3]) & 0x3e) >> 1
f.FISB_minutes = ((uint32(f.Raw_data[3]) & 0x01) << 5) | (uint32(f.Raw_data[4]) >> 3)
f.FISB_seconds = ((uint32(f.Raw_data[4]) & 0x03) << 3) | (uint32(f.Raw_data[5]) >> 5)
f.FISB_length = f.frame_length - 6
fisb_data = f.Raw_data[6:]
default:
return // Should never reach this.
}
f.FISB_data = fisb_data
if (uint16(f.Raw_data[1]) & 0x02) != 0 {
f.s_f = true // Default false.
}
}
// Format newlines.
func formatDLACData(p string) []string {
ret := make([]string, 0)
for {
pos := strings.Index(p, "\x1E")
if pos == -1 {
pos = strings.Index(p, "\x03")
if pos == -1 {
ret = append(ret, p)
break
}
}
ret = append(ret, p[:pos])
p = p[pos+1:]
}
return ret
}
// Whole frame contents is DLAC encoded text.
func (f *UATFrame) decodeTextFrame() {
if len(f.FISB_data) < int(f.FISB_length) {
return
}
p := dlac_decode(f.FISB_data, f.FISB_length)
f.Text_data = formatDLACData(p)
}
// Gets month, day, hours, minutes.
// Formats into a string.
func airmetParseDate(b []byte, date_time_format uint8) string {
switch date_time_format {
case 0: // No date/time used.
return ""
case 1: // Month, Day, Hours, Minutes.
month := uint8(b[0])
day := uint8(b[1])
hours := uint8(b[2])
minutes := uint8(b[3])
return fmt.Sprintf("%02d-%02d %02d:%02d", month, day, hours, minutes)
case 2: // Day, Hours, Minutes.
day := uint8(b[0])
hours := uint8(b[1])
minutes := uint8(b[2])
return fmt.Sprintf("%02d %02d:%02d", day, hours, minutes)
case 3: // Hours, Minutes.
hours := uint8(b[0])
minutes := uint8(b[1])
return fmt.Sprintf("%02d:%02d", hours, minutes)
}
return ""
}
func airmetLatLng(lat_raw, lng_raw int32, alt bool) (float64, float64) {
fct := float64(0.000687)
if alt {
fct = float64(0.001373)
}
lat := fct * float64(lat_raw)
lng := fct * float64(lng_raw)
if lat > 90.0 {
lat = lat - 180.0
}
if lng > 180.0 {
lng = lng - 360.0
}
return lat, lng
}
//TODO: Ignoring flags (segmentation, etc.)
// Aero_FISB_ProdDef_Rev4.pdf
// Decode product IDs 8-13.
func (f *UATFrame) decodeAirmet() {
// APDU header: 48 bits (3-3) - assume no segmentation.
record_format := (uint8(f.FISB_data[0]) & 0xF0) >> 4
f.RecordFormat = record_format
fmt.Fprintf(ioutil.Discard, "record_format=%d\n", record_format)
product_version := (uint8(f.FISB_data[0]) & 0x0F)
fmt.Fprintf(ioutil.Discard, "product_version=%d\n", product_version)
record_count := (uint8(f.FISB_data[1]) & 0xF0) >> 4
fmt.Fprintf(ioutil.Discard, "record_count=%d\n", record_count)
location_identifier := dlac_decode(f.FISB_data[2:], 3)
fmt.Fprintf(ioutil.Discard, "%s\n", hex.Dump(f.FISB_data))
f.LocationIdentifier = location_identifier
fmt.Fprintf(ioutil.Discard, "location_identifier=%s\n", location_identifier)
record_reference := (uint8(f.FISB_data[5])) //FIXME: Special values. 0x00 means "use location_identifier". 0xFF means "use different reference". (4-3).
fmt.Fprintf(ioutil.Discard, "record_reference=%d\n", record_reference)
// Not sure when this is even used.
// rwy_designator := (record_reference & FC) >> 4
// parallel_rwy_designator := record_reference & 0x03 // 0 = NA, 1 = R, 2 = L, 3 = C (Figure 4-2).
//FIXME: Assume one record.
if record_count != 1 {
fmt.Fprintf(ioutil.Discard, "record_count=%d, != 1\n", record_count)
return
}
/*
0 - No data
1 - Unformatted ASCII Text
2 - Unformatted DLAC Text
3 - Unformatted DLAC Text w/ dictionary
4 - Formatted Text using ASN.1/PER
5-7 - Future Use
8 - Graphical Overlay
9-15 - Future Use
*/
switch record_format {
case 2:
record_length := (uint16(f.FISB_data[6]) << 8) | uint16(f.FISB_data[7])
if len(f.FISB_data)-int(record_length) < 6 {
fmt.Fprintf(ioutil.Discard, "FISB record not long enough: record_length=%d, len(f.FISB_data)=%d\n", record_length, len(f.FISB_data))
return
}
fmt.Fprintf(ioutil.Discard, "record_length=%d\n", record_length)
// Report identifier = report number + report year.
report_number := (uint16(f.FISB_data[8]) << 6) | ((uint16(f.FISB_data[9]) & 0xFC) >> 2)
f.ReportNumber = report_number
fmt.Fprintf(ioutil.Discard, "report_number=%d\n", report_number)
report_year := ((uint16(f.FISB_data[9]) & 0x03) << 5) | ((uint16(f.FISB_data[10]) & 0xF8) >> 3)
f.ReportYear = report_year
fmt.Fprintf(ioutil.Discard, "report_year=%d\n", report_year)
report_status := (uint8(f.FISB_data[10]) & 0x04) >> 2 //TODO: 0 = cancelled, 1 = active.
fmt.Fprintf(ioutil.Discard, "report_status=%d\n", report_status)
fmt.Fprintf(ioutil.Discard, "record_length=%d,len=%d\n", record_length, len(f.FISB_data))
text_data_len := record_length - 5
text_data := dlac_decode(f.FISB_data[11:], uint32(text_data_len))
fmt.Fprintf(ioutil.Discard, "text_data=%s\n", text_data)
f.Text_data = formatDLACData(text_data)
case 8:
// (6-1). (6.22 - Graphical Overlay Record Format).
record_data := f.FISB_data[6:] // Start after the record header.
record_length := (uint16(record_data[0]) << 2) | ((uint16(record_data[1]) & 0xC0) >> 6)
fmt.Fprintf(ioutil.Discard, "record_length=%d\n", record_length)
// Report identifier = report number + report year.
report_number := ((uint16(record_data[1]) & 0x3F) << 8) | uint16(record_data[2])
f.ReportNumber = report_number
fmt.Fprintf(ioutil.Discard, "report_number=%d\n", report_number)
report_year := (uint16(record_data[3]) & 0xFE) >> 1
f.ReportYear = report_year
fmt.Fprintf(ioutil.Discard, "report_year=%d\n", report_year)
overlay_record_identifier := ((uint8(record_data[4]) & 0x1E) >> 1) + 1 // Document instructs to add 1.
fmt.Fprintf(ioutil.Discard, "overlay_record_identifier=%d\n", overlay_record_identifier)
object_label_flag := uint8(record_data[4] & 0x01)
fmt.Fprintf(ioutil.Discard, "object_label_flag=%d\n", object_label_flag)
if object_label_flag == 0 { // Numeric index.
object_label := (uint8(record_data[5]) << 8) | uint8(record_data[6])
record_data = record_data[7:]
fmt.Fprintf(ioutil.Discard, "object_label=%d\n", object_label)
} else {
object_label := dlac_decode(record_data[5:], 9)
record_data = record_data[14:]
fmt.Fprintf(ioutil.Discard, "object_label=%s\n", object_label)
}
element_flag := (uint8(record_data[0]) & 0x80) >> 7
fmt.Fprintf(ioutil.Discard, "element_flag=%d\n", element_flag)
qualifier_flag := (uint8(record_data[0]) & 0x40) >> 6
fmt.Fprintf(ioutil.Discard, "qualifier_flag=%d\n", qualifier_flag)
param_flag := (uint8(record_data[0]) & 0x20) >> 5
fmt.Fprintf(ioutil.Discard, "param_flag=%d\n", param_flag)
object_element := uint8(record_data[0]) & 0x1F
fmt.Fprintf(ioutil.Discard, "object_element=%d\n", object_element)
object_type := (uint8(record_data[1]) & 0xF0) >> 4
fmt.Fprintf(ioutil.Discard, "object_type=%d\n", object_type)
object_status := uint8(record_data[1]) & 0x0F
fmt.Fprintf(ioutil.Discard, "object_status=%d\n", object_status)
//FIXME
if qualifier_flag == 0 { //TODO: Check.
record_data = record_data[2:]
} else {
object_qualifier := (uint32(record_data[2]) << 16) | (uint32(record_data[3]) << 8) | uint32(record_data[4])
fmt.Fprintf(ioutil.Discard, "object_qualifier=%d\n", object_qualifier)
fmt.Fprintf(ioutil.Discard, "%02x%02x%02x\n", record_data[2], record_data[3], record_data[4])
record_data = record_data[5:]
}
//FIXME
//if param_flag == 0 { //TODO: Check.
// record_data = record_data[2:]
//} else {
// //TODO.
// // record_data = record_data[4:]
//}
record_applicability_options := (uint8(record_data[0]) & 0xC0) >> 6
fmt.Fprintf(ioutil.Discard, "record_applicability_options=%d\n", record_applicability_options)
date_time_format := (uint8(record_data[0]) & 0x30) >> 4
fmt.Fprintf(ioutil.Discard, "date_time_format=%d\n", date_time_format)
geometry_overlay_options := uint8(record_data[0]) & 0x0F
fmt.Fprintf(ioutil.Discard, "geometry_overlay_options=%d\n", geometry_overlay_options)
overlay_operator := (uint8(record_data[1]) & 0xC0) >> 6
fmt.Fprintf(ioutil.Discard, "overlay_operator=%d\n", overlay_operator)
overlay_vertices_count := (uint8(record_data[1]) & 0x3F) + 1 // Document instructs to add 1. (6.20).
fmt.Fprintf(ioutil.Discard, "overlay_vertices_count=%d\n", overlay_vertices_count)
// Parse all of the dates.
switch record_applicability_options {
case 0: // No times given. UFN.
record_data = record_data[2:]
case 1: // Start time only. WEF.
f.ReportStart = airmetParseDate(record_data[2:], date_time_format)
record_data = record_data[6:]
case 2: // End time only. TIL.
f.ReportEnd = airmetParseDate(record_data[2:], date_time_format)
record_data = record_data[6:]
case 3: // Both start and end times. WEF.
f.ReportStart = airmetParseDate(record_data[2:], date_time_format)
f.ReportEnd = airmetParseDate(record_data[6:], date_time_format)
record_data = record_data[10:]
}
// Now we have the vertices.
switch geometry_overlay_options {
case 3: // Extended Range 3D Polygon (MSL).
points := make([]GeoPoint, 0) // Slice containing all of the points.
fmt.Fprintf(ioutil.Discard, "%d\n", len(record_data))
for i := 0; i < int(overlay_vertices_count); i++ {
lng_raw := (int32(record_data[6*i]) << 11) | (int32(record_data[6*i+1]) << 3) | (int32(record_data[6*i+2]) & 0xE0 >> 5)
lat_raw := ((int32(record_data[6*i+2]) & 0x1F) << 14) | (int32(record_data[6*i+3]) << 6) | ((int32(record_data[6*i+4]) & 0xFC) >> 2)
alt_raw := ((int32(record_data[6*i+4]) & 0x03) << 8) | int32(record_data[6*i+5])
fmt.Fprintf(ioutil.Discard, "lat_raw=%d, lng_raw=%d, alt_raw=%d\n", lat_raw, lng_raw, alt_raw)
lat, lng := airmetLatLng(lat_raw, lng_raw, false)
alt := alt_raw * 100
fmt.Fprintf(ioutil.Discard, "lat=%f,lng=%f,alt=%d\n", lat, lng, alt)
fmt.Fprintf(ioutil.Discard, "coord:%f,%f\n", lat, lng)
var point GeoPoint
point.Lat = lat
point.Lon = lng
point.Alt = alt
points = append(points, point)
f.Points = points
}
case 9: // Extended Range 3D Point (AGL). p.47.
if len(record_data) < 6 {
fmt.Fprintf(ioutil.Discard, "invalid data: Extended Range 3D Point. Should be 6 bytes; % seen.\n", len(record_data))
} else {
lng_raw := (int32(record_data[0]) << 11) | (int32(record_data[1]) << 3) | (int32(record_data[2]) & 0xE0 >> 5)
lat_raw := ((int32(record_data[2]) & 0x1F) << 14) | (int32(record_data[3]) << 6) | ((int32(record_data[4]) & 0xFC) >> 2)
alt_raw := ((int32(record_data[4]) & 0x03) << 8) | int32(record_data[5])
fmt.Fprintf(ioutil.Discard, "lat_raw=%d, lng_raw=%d, alt_raw=%d\n", lat_raw, lng_raw, alt_raw)
lat, lng := airmetLatLng(lat_raw, lng_raw, false)
alt := alt_raw * 100
fmt.Fprintf(ioutil.Discard, "lat=%f,lng=%f,alt=%d\n", lat, lng, alt)
fmt.Fprintf(ioutil.Discard, "coord:%f,%f\n", lat, lng)
var point GeoPoint
point.Lat = lat
point.Lon = lng
point.Alt = alt
f.Points = []GeoPoint{point}
}
case 7, 8: // Extended Range Circular Prism (7 = MSL, 8 = AGL)
if len(record_data) < 14 {
fmt.Fprintf(ioutil.Discard, "invalid data: Extended Range Circular Prism. Should be 14 bytes; % seen.\n", len(record_data))
} else {
lng_bot_raw := (int32(record_data[0]) << 10) | (int32(record_data[1]) << 2) | (int32(record_data[2]) & 0xC0 >> 6)
lat_bot_raw := ((int32(record_data[2]) & 0x3F) << 12) | (int32(record_data[3]) << 4) | ((int32(record_data[4]) & 0xF0) >> 4)
lng_top_raw := ((int32(record_data[4]) & 0x0F) << 14) | (int32(record_data[5]) << 6) | ((int32(record_data[6]) & 0xFC) >> 2)
lat_top_raw := ((int32(record_data[6]) & 0x03) << 16) | (int32(record_data[7]) << 8) | int32(record_data[8])
alt_bot_raw := (int32(record_data[9]) & 0xFE) >> 1
alt_top_raw := ((int32(record_data[9]) & 0x01) << 6) | ((int32(record_data[10]) & 0xFC) >> 2)
r_lng_raw := ((int32(record_data[10]) & 0x03) << 7) | ((int32(record_data[11]) & 0xFE) >> 1)
r_lat_raw := ((int32(record_data[11]) & 0x01) << 8) | int32(record_data[12])
alpha := int32(record_data[13])
lat_bot, lng_bot := airmetLatLng(lat_bot_raw, lng_bot_raw, true)
lat_top, lng_top := airmetLatLng(lat_top_raw, lng_top_raw, true)
alt_bot := alt_bot_raw * 5
alt_top := alt_top_raw * 500
r_lng := float64(r_lng_raw) * float64(0.2)
r_lat := float64(r_lat_raw) * float64(0.2)
fmt.Fprintf(ioutil.Discard, "lat_bot, lng_bot = %f, %f\n", lat_bot, lng_bot)
fmt.Fprintf(ioutil.Discard, "lat_top, lng_top = %f, %f\n", lat_top, lng_top)
if geometry_overlay_options == 8 {
fmt.Fprintf(ioutil.Discard, "alt_bot, alt_top = %d AGL, %d AGL\n", alt_bot, alt_top)
} else {
fmt.Fprintf(ioutil.Discard, "alt_bot, alt_top = %d MSL, %d MSL\n", alt_bot, alt_top)
}
fmt.Fprintf(ioutil.Discard, "r_lng, r_lat = %f, %f\n", r_lng, r_lat)
fmt.Fprintf(ioutil.Discard, "alpha=%d\n", alpha)
}
default:
fmt.Fprintf(ioutil.Discard, "unknown geometry: %d\n", geometry_overlay_options)
}
//case 1: // Unformatted ASCII Text.
default:
fmt.Fprintf(ioutil.Discard, "unknown record format: %d\n", record_format)
}
fmt.Fprintf(ioutil.Discard, "\n\n\n")
}
func (f *UATFrame) decodeInfoFrame() {
if len(f.Raw_data) < 2 {
return // Can't determine Product_id.
}
f.Product_id = ((uint32(f.Raw_data[0]) & 0x1f) << 6) | (uint32(f.Raw_data[1]) >> 2)
if f.Frame_type != 0 {
return // Not FIS-B.
}
f.decodeTimeFormat()
switch f.Product_id {
case 413:
f.decodeTextFrame()
/*
case 8, 11, 13:
f.decodeAirmet()
*/
default:
fmt.Fprintf(ioutil.Discard, "don't know what to do with product id: %d\n", f.Product_id)
}
// logger.Printf("pos=%d,len=%d,t_opt=%d,product_id=%d, time=%d:%d\n", frame_start, frame_len, t_opt, product_id, fisb_hours, fisb_minutes)
}
func (u *UATMsg) DecodeUplink() error {
// position_valid := (uint32(frame[5]) & 0x01) != 0
frame := u.msg
if len(frame) < UPLINK_FRAME_DATA_BYTES {
return errors.New(fmt.Sprintf("DecodeUplink: short read (%d).", len(frame)))
}
raw_lat := (uint32(frame[0]) << 15) | (uint32(frame[1]) << 7) | (uint32(frame[2]) >> 1)
raw_lon := ((uint32(frame[2]) & 0x01) << 23) | (uint32(frame[3]) << 15) | (uint32(frame[4]) << 7) | (uint32(frame[5]) >> 1)
lat := float64(raw_lat) * 360.0 / 16777216.0
lon := float64(raw_lon) * 360.0 / 16777216.0
if lat > 90 {
lat = lat - 180
}
if lon > 180 {
lon = lon - 360
}
u.Lat = lat
u.Lon = lon
// utc_coupled := (uint32(frame[6]) & 0x80) != 0
app_data_valid := (uint32(frame[6]) & 0x20) != 0
// slot_id := uint32(frame[6]) & 0x1f
// tisb_site_id := uint32(frame[7]) >> 4
// logger.Printf("position_valid=%t, %.04f, %.04f, %t, %t, %d, %d\n", position_valid, lat, lon, utc_coupled, app_data_valid, slot_id, tisb_site_id)
if !app_data_valid {
return nil // Not sure when this even happens?
}
app_data := frame[8:432]
num_info_frames := 0
pos := 0
total_len := len(app_data)
for (num_info_frames < UPLINK_MAX_INFO_FRAMES) && (pos+2 <= total_len) {
data := app_data[pos:]
frame_length := (uint32(data[0]) << 1) | (uint32(data[1]) >> 7)
frame_type := uint32(data[1]) & 0x0f
if pos+int(frame_length) > total_len {
break // Overrun?
}
if frame_length == 0 { // Empty frame. Quit here.
break
}
pos = pos + 2
data = data[2 : frame_length+2]
thisFrame := new(UATFrame)
thisFrame.Raw_data = data
thisFrame.frame_length = frame_length
thisFrame.Frame_type = frame_type
thisFrame.decodeInfoFrame()
// Save the decoded frame.
u.Frames = append(u.Frames, thisFrame)
pos = pos + int(frame_length)
}
u.decoded = true
return nil
}
/*
Aggregate all of the text rates across the frames in the message and return as an array.
*/
func (u *UATMsg) GetTextReports() ([]string, error) {
ret := make([]string, 0)
if !u.decoded {
err := u.DecodeUplink()
if err != nil {
return ret, err
}
}
for _, f := range u.Frames {
for _, m := range f.Text_data {
if len(m) > 0 {
ret = append(ret, m)
}
}
}
return ret, nil
}
/*
Parse out the message from the "dump978" output format.
*/
func New(buf string) (*UATMsg, error) {
ret := new(UATMsg)
buf = strings.Trim(buf, "\r\n") // Remove newlines.
x := strings.Split(buf, ";") // We want to discard everything before the first ';'.
if len(x) < 2 {
return ret, errors.New(fmt.Sprintf("New UATMsg: Invalid format (%s).", buf))
}
/*
Parse _;rs=?;ss=? - if available.
RS_Err int
SignalStrength int
*/
ret.SignalStrength = -1
ret.RS_Err = -1
for _, f := range x[1:] {
x2 := strings.Split(f, "=")
if len(x2) != 2 {
continue
}
i, err := strconv.Atoi(x2[1])
if err != nil {
continue
}
if x2[0] == "ss" {
ret.SignalStrength = i
} else if x2[0] == "rs" {
ret.RS_Err = i
}
}
s := x[0]
// Only want "long" uplink messages.
if (len(s)-1)%2 != 0 || (len(s)-1)/2 != UPLINK_FRAME_DATA_BYTES {
return ret, errors.New(fmt.Sprintf("New UATMsg: short read (%d).", len(s)))
}
if s[0] != '+' { // Only want + ("Uplink") messages currently. - (Downlink) or messages that start with other are discarded.
return ret, errors.New("New UATMsg: expecting uplink frame.")
}
s = s[1:] // Remove the preceding '+' or '-' character.
// Convert the hex string into a byte array.
frame := make([]byte, UPLINK_FRAME_DATA_BYTES)
hex.Decode(frame, []byte(s))
ret.msg = frame
return ret, nil
}
| ssokol/stratux | uatparse/uatparse.go | GO | bsd-3-clause | 21,443 |
/*
* Copyright 2011 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "gm.h"
#include "SkGradientShader.h"
namespace skiagm {
class FillTypePerspGM : public GM {
SkPath fPath;
public:
FillTypePerspGM() {}
void makePath() {
if (fPath.isEmpty()) {
const SkScalar radius = SkIntToScalar(45);
fPath.addCircle(SkIntToScalar(50), SkIntToScalar(50), radius);
fPath.addCircle(SkIntToScalar(100), SkIntToScalar(100), radius);
}
}
protected:
SkString onShortName() SK_OVERRIDE {
return SkString("filltypespersp");
}
SkISize onISize() SK_OVERRIDE {
return SkISize::Make(835, 840);
}
void showPath(SkCanvas* canvas, int x, int y, SkPath::FillType ft,
SkScalar scale, const SkPaint& paint) {
const SkRect r = { 0, 0, SkIntToScalar(150), SkIntToScalar(150) };
canvas->save();
canvas->translate(SkIntToScalar(x), SkIntToScalar(y));
canvas->clipRect(r);
canvas->drawColor(SK_ColorWHITE);
fPath.setFillType(ft);
canvas->translate(r.centerX(), r.centerY());
canvas->scale(scale, scale);
canvas->translate(-r.centerX(), -r.centerY());
canvas->drawPath(fPath, paint);
canvas->restore();
}
void showFour(SkCanvas* canvas, SkScalar scale, bool aa) {
SkPaint paint;
SkPoint center = SkPoint::Make(SkIntToScalar(100), SkIntToScalar(100));
SkColor colors[] = {SK_ColorBLUE, SK_ColorRED, SK_ColorGREEN};
SkScalar pos[] = {0, SK_ScalarHalf, SK_Scalar1};
SkShader* s = SkGradientShader::CreateRadial(center,
SkIntToScalar(100),
colors,
pos,
SK_ARRAY_COUNT(colors),
SkShader::kClamp_TileMode);
paint.setShader(s)->unref();
paint.setAntiAlias(aa);
showPath(canvas, 0, 0, SkPath::kWinding_FillType,
scale, paint);
showPath(canvas, 200, 0, SkPath::kEvenOdd_FillType,
scale, paint);
showPath(canvas, 00, 200, SkPath::kInverseWinding_FillType,
scale, paint);
showPath(canvas, 200, 200, SkPath::kInverseEvenOdd_FillType,
scale, paint);
}
void onDraw(SkCanvas* canvas) SK_OVERRIDE {
this->makePath();
// do perspective drawPaint as the background;
SkPaint bkgnrd;
SkPoint center = SkPoint::Make(SkIntToScalar(100),
SkIntToScalar(100));
SkColor colors[] = {SK_ColorBLACK, SK_ColorCYAN,
SK_ColorYELLOW, SK_ColorWHITE};
SkScalar pos[] = {0, SK_ScalarHalf / 2,
3 * SK_ScalarHalf / 2, SK_Scalar1};
SkShader* s = SkGradientShader::CreateRadial(center,
SkIntToScalar(1000),
colors,
pos,
SK_ARRAY_COUNT(colors),
SkShader::kClamp_TileMode);
bkgnrd.setShader(s)->unref();
canvas->save();
canvas->translate(SkIntToScalar(100), SkIntToScalar(100));
SkMatrix mat;
mat.reset();
mat.setPerspY(SK_Scalar1 / 1000);
canvas->concat(mat);
canvas->drawPaint(bkgnrd);
canvas->restore();
// draw the paths in perspective
SkMatrix persp;
persp.reset();
persp.setPerspX(-SK_Scalar1 / 1800);
persp.setPerspY(SK_Scalar1 / 500);
canvas->concat(persp);
canvas->translate(SkIntToScalar(20), SkIntToScalar(20));
const SkScalar scale = SkIntToScalar(5)/4;
showFour(canvas, SK_Scalar1, false);
canvas->translate(SkIntToScalar(450), 0);
showFour(canvas, scale, false);
canvas->translate(SkIntToScalar(-450), SkIntToScalar(450));
showFour(canvas, SK_Scalar1, true);
canvas->translate(SkIntToScalar(450), 0);
showFour(canvas, scale, true);
}
private:
typedef GM INHERITED;
};
//////////////////////////////////////////////////////////////////////////////
static GM* MyFactory(void*) { return new FillTypePerspGM; }
static GMRegistry reg(MyFactory);
}
| CTSRD-SOAAP/chromium-42.0.2311.135 | third_party/skia/gm/filltypespersp.cpp | C++ | bsd-3-clause | 4,675 |
/*
* Copyright (C) 2004, 2005, 2006, 2007 Nikolas Zimmermann <zimmermann@kde.org>
* Copyright (C) 2004, 2005 Rob Buis <buis@kde.org>
* Copyright (C) 2005 Eric Seidel <eric@webkit.org>
* Copyright (C) 2013 Google Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "platform/graphics/filters/FEDiffuseLighting.h"
#include "platform/graphics/filters/LightSource.h"
#include "platform/text/TextStream.h"
namespace blink {
FEDiffuseLighting::FEDiffuseLighting(Filter* filter, const Color& lightingColor, float surfaceScale,
float diffuseConstant, PassRefPtr<LightSource> lightSource)
: FELighting(filter, DiffuseLighting, lightingColor, surfaceScale, diffuseConstant, 0, 0, lightSource)
{
}
PassRefPtrWillBeRawPtr<FEDiffuseLighting> FEDiffuseLighting::create(Filter* filter, const Color& lightingColor,
float surfaceScale, float diffuseConstant, PassRefPtr<LightSource> lightSource)
{
return adoptRefWillBeNoop(new FEDiffuseLighting(filter, lightingColor, surfaceScale, diffuseConstant, lightSource));
}
FEDiffuseLighting::~FEDiffuseLighting()
{
}
Color FEDiffuseLighting::lightingColor() const
{
return m_lightingColor;
}
bool FEDiffuseLighting::setLightingColor(const Color& lightingColor)
{
if (m_lightingColor == lightingColor)
return false;
m_lightingColor = lightingColor;
return true;
}
float FEDiffuseLighting::surfaceScale() const
{
return m_surfaceScale;
}
bool FEDiffuseLighting::setSurfaceScale(float surfaceScale)
{
if (m_surfaceScale == surfaceScale)
return false;
m_surfaceScale = surfaceScale;
return true;
}
float FEDiffuseLighting::diffuseConstant() const
{
return m_diffuseConstant;
}
bool FEDiffuseLighting::setDiffuseConstant(float diffuseConstant)
{
diffuseConstant = std::max(diffuseConstant, 0.0f);
if (m_diffuseConstant == diffuseConstant)
return false;
m_diffuseConstant = diffuseConstant;
return true;
}
const LightSource* FEDiffuseLighting::lightSource() const
{
return m_lightSource.get();
}
void FEDiffuseLighting::setLightSource(PassRefPtr<LightSource> lightSource)
{
m_lightSource = lightSource;
}
TextStream& FEDiffuseLighting::externalRepresentation(TextStream& ts, int indent) const
{
writeIndent(ts, indent);
ts << "[feDiffuseLighting";
FilterEffect::externalRepresentation(ts);
ts << " surfaceScale=\"" << m_surfaceScale << "\" " << "diffuseConstant=\"" << m_diffuseConstant << "\"]\n";
inputEffect(0)->externalRepresentation(ts, indent + 1);
return ts;
}
} // namespace blink
| Workday/OpenFrame | third_party/WebKit/Source/platform/graphics/filters/FEDiffuseLighting.cpp | C++ | bsd-3-clause | 3,336 |
<?php
// Version: 2.0; ManageMembers
global $context;
$txt['groups'] = 'Groups';
$txt['viewing_groups'] = 'Viewing Membergroups';
$txt['membergroups_title'] = 'Manage Membergroups';
$txt['membergroups_description'] = 'Membergroups are groups of members that have similar permission settings, appearance, or access rights. Some membergroups are based on the amount of posts a user has made. You can assign someone to a membergroup by selecting their profile and changing their account settings.';
$txt['membergroups_modify'] = 'Modify';
$txt['membergroups_add_group'] = 'Add group';
$txt['membergroups_regular'] = 'Regular groups';
$txt['membergroups_post'] = 'Post count based groups';
$txt['membergroups_group_name'] = 'Membergroup name';
$txt['membergroups_new_board'] = 'Visible Boards';
$txt['membergroups_new_board_desc'] = 'Boards the membergroup can see';
$txt['membergroups_new_board_post_groups'] = '<em>Note: normally, post groups don\'t need access because the group the member is in will give them access.</em>';
$txt['membergroups_new_as_inherit'] = 'inherit from';
$txt['membergroups_new_as_type'] = 'by type';
$txt['membergroups_new_as_copy'] = 'based off of';
$txt['membergroups_new_copy_none'] = '(none)';
$txt['membergroups_can_edit_later'] = 'You can edit them later.';
$txt['membergroups_edit_group'] = 'Edit Membergroup';
$txt['membergroups_edit_name'] = 'Group name';
$txt['membergroups_edit_inherit_permissions'] = 'Inherit Permissions';
$txt['membergroups_edit_inherit_permissions_desc'] = 'Select "No" to enable group to have own permission set.';
$txt['membergroups_edit_inherit_permissions_no'] = 'No - Use Unique Permissions';
$txt['membergroups_edit_inherit_permissions_from'] = 'Inherit From';
$txt['membergroups_edit_hidden'] = 'Visibility';
$txt['membergroups_edit_hidden_no'] = 'Visible';
$txt['membergroups_edit_hidden_boardindex'] = 'Visible - Except in Group Key';
$txt['membergroups_edit_hidden_all'] = 'Invisible';
// Do not use numeric entities in the below string.
$txt['membergroups_edit_hidden_warning'] = 'Are you sure you want to disallow assignment of this group as a users primary group?\\n\\nDoing so will restrict assignment to additional groups only, and will update all current "primary" members to have it as an additional group only.';
$txt['membergroups_edit_desc'] = 'Group description';
$txt['membergroups_edit_group_type'] = 'Group Type';
$txt['membergroups_edit_select_group_type'] = 'Select Group Type';
$txt['membergroups_group_type_private'] = 'Private <span class="smalltext">(Membership must be assigned)</span>';
$txt['membergroups_group_type_protected'] = 'Protected <span class="smalltext">(Only administrators can manage and assign)</span>';
$txt['membergroups_group_type_request'] = 'Requestable <span class="smalltext">(User may request membership)</span>';
$txt['membergroups_group_type_free'] = 'Free <span class="smalltext">(User may leave and join group at will)</span>';
$txt['membergroups_group_type_post'] = 'Post Based <span class="smalltext">(Membership based on post count)</span>';
$txt['membergroups_min_posts'] = 'Required posts';
$txt['membergroups_online_color'] = 'Color in online list';
$txt['membergroups_star_count'] = 'Number of star images';
$txt['membergroups_star_image'] = 'Star image filename';
$txt['membergroups_star_image_note'] = 'you can use $language for the language of the user';
$txt['membergroups_max_messages'] = 'Max personal messages';
$txt['membergroups_max_messages_note'] = '0 = unlimited';
$txt['membergroups_edit_save'] = 'Save';
$txt['membergroups_delete'] = 'Delete';
$txt['membergroups_confirm_delete'] = 'Are you sure you want to delete this group?!';
$txt['membergroups_members_title'] = 'Viewing Group';
$txt['membergroups_members_group_members'] = 'Group Members';
$txt['membergroups_members_no_members'] = 'This group is currently empty';
$txt['membergroups_members_add_title'] = 'Add a member to this group';
$txt['membergroups_members_add_desc'] = 'List of Members to Add';
$txt['membergroups_members_add'] = 'Add Members';
$txt['membergroups_members_remove'] = 'Remove from Group';
$txt['membergroups_members_last_active'] = 'Last Active';
$txt['membergroups_members_additional_only'] = 'Add as additional group only.';
$txt['membergroups_members_group_moderators'] = 'Group Moderators';
$txt['membergroups_members_description'] = 'Description';
// Use javascript escaping in the below.
$txt['membergroups_members_deadmin_confirm'] = 'Are you sure you wish to remove yourself from the Administration group?';
$txt['membergroups_postgroups'] = 'Post groups';
$txt['membergroups_settings'] = 'Membergroup Settings';
$txt['groups_manage_membergroups'] = 'Groups allowed to change membergroups';
$txt['membergroups_select_permission_type'] = 'Select permission profile';
$txt['membergroups_images_url'] = '{theme URL}/images/';
$txt['membergroups_select_visible_boards'] = 'Show boards';
$txt['membergroups_members_top'] = 'Members';
$txt['membergroups_name'] = 'Name';
$txt['membergroups_stars'] = 'Stars';
$txt['admin_browse_approve'] = 'Members whose accounts are awaiting approval';
$txt['admin_browse_approve_desc'] = 'From here you can manage all members who are waiting to have their accounts approved.';
$txt['admin_browse_activate'] = 'Members whose accounts are awaiting activation';
$txt['admin_browse_activate_desc'] = 'This screen lists all the members who have still not activated their accounts at your forum.';
$txt['admin_browse_awaiting_approval'] = 'Awaiting Approval (%1$d)';
$txt['admin_browse_awaiting_activate'] = 'Awaiting Activation (%1$d)';
$txt['admin_browse_username'] = 'Username';
$txt['admin_browse_email'] = 'Email Address';
$txt['admin_browse_ip'] = 'IP Address';
$txt['admin_browse_registered'] = 'Registered';
$txt['admin_browse_id'] = 'ID';
$txt['admin_browse_with_selected'] = 'With Selected';
$txt['admin_browse_no_members_approval'] = 'No members currently await approval.';
$txt['admin_browse_no_members_activate'] = 'No members currently have not activated their accounts.';
// Don't use entities in the below strings, except the main ones. (lt, gt, quot.)
$txt['admin_browse_warn'] = 'all selected members?';
$txt['admin_browse_outstanding_warn'] = 'all affected members?';
$txt['admin_browse_w_approve'] = 'Approve';
$txt['admin_browse_w_activate'] = 'Activate';
$txt['admin_browse_w_delete'] = 'Delete';
$txt['admin_browse_w_reject'] = 'Reject';
$txt['admin_browse_w_remind'] = 'Remind';
$txt['admin_browse_w_approve_deletion'] = 'Approve (Delete Accounts)';
$txt['admin_browse_w_email'] = 'and send email';
$txt['admin_browse_w_approve_require_activate'] = 'Approve and Require Activation';
$txt['admin_browse_filter_by'] = 'Filter By';
$txt['admin_browse_filter_show'] = 'Displaying';
$txt['admin_browse_filter_type_0'] = 'Unactivated New Accounts';
$txt['admin_browse_filter_type_2'] = 'Unactivated Email Changes';
$txt['admin_browse_filter_type_3'] = 'Unapproved New Accounts';
$txt['admin_browse_filter_type_4'] = 'Unapproved Account Deletions';
$txt['admin_browse_filter_type_5'] = 'Unapproved "Under Age" Accounts';
$txt['admin_browse_outstanding'] = 'Outstanding Members';
$txt['admin_browse_outstanding_days_1'] = 'With all members who registered longer than';
$txt['admin_browse_outstanding_days_2'] = 'days ago';
$txt['admin_browse_outstanding_perform'] = 'Perform the following action';
$txt['admin_browse_outstanding_go'] = 'Perform Action';
$txt['check_for_duplicate'] = 'Check for Duplicates';
$txt['dont_check_for_duplicate'] = 'Don\'t Check for Duplicates';
$txt['duplicates'] = 'Duplicates';
$txt['not_activated'] = 'Not activated';
?> | Karpec/gizd | smf/Themes/default/languages/ManageMembers.english.php | PHP | bsd-3-clause | 7,650 |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import {createElementWithClassName} from 'chrome://resources/js/util.m.js';
/**
* Create by |LineChart.LineChart|.
* Create a dummy scrollbar to show the position of the line chart and to scroll
* the line chart, so we can draw the visible part of the line chart only
* instead of drawing the whole chart.
* @const
*/
export class Scrollbar {
constructor(/** function(): undefined */ callback) {
/** @const {function(): undefined} - Handle the scrolling event. */
this.callback_ = callback;
/** @type {number} - The range the scrollbar can scroll. */
this.range_ = 0;
/** @type {number} - The current position of the scrollbar. */
this.position_ = 0;
/** @type {number} - The real width of this scrollbar, in pixels. */
this.width_ = 0;
/** @type {Element} - The outer div to show the scrollbar. */
this.outerDiv_ =
createElementWithClassName('div', 'horizontal-scrollbar-outer');
this.outerDiv_.addEventListener('scroll', this.onScroll_.bind(this));
/** @type {Element} - The inner div to make outer div scrollable. */
this.innerDiv_ =
createElementWithClassName('div', 'horizontal-scrollbar-inner');
this.outerDiv_.appendChild(this.innerDiv_);
}
/**
* Scrolling event handler.
*/
onScroll_() {
const /** number */ newPosition = this.outerDiv_.scrollLeft;
if (newPosition == this.position_)
return;
this.position_ = newPosition;
this.callback_();
}
/** @return {Element} */
getRootDiv() {
return this.outerDiv_;
}
/**
* Return the height of scrollbar element.
* @return {number}
*/
getHeight() {
return this.outerDiv_.offsetHeight;
}
/** @return {number} */
getRange() {
return this.range_;
}
/**
* Position may be float point number because |document.scrollLeft| may be
* float point number.
* @return {number}
*/
getPosition() {
return Math.round(this.position_);
}
/**
* Change the size of the outer div and update the scrollbar position.
* @param {number} width
*/
resize(width) {
if (this.width_ == width)
return;
this.width_ = width;
this.updateOuterDivWidth_();
}
updateOuterDivWidth_() {
this.constructor.setNodeWidth(this.outerDiv_, this.width_);
}
/**
* Set the scrollable range to |range|. Use the inner div's width to control
* the scrollable range. If position go out of range after range update, set
* it to the boundary value.
* @param {number} range
*/
setRange(range) {
this.range_ = range;
this.updateInnerDivWidth_();
if (range < this.position_) {
this.position_ = range;
this.updateScrollbarPosition_();
}
}
updateInnerDivWidth_() {
const width = this.outerDiv_.clientWidth;
this.constructor.setNodeWidth(this.innerDiv_, width + this.range_);
}
/**
* @param {Element} node
* @param {number} width
*/
static setNodeWidth(node, width) {
node.style.width = width + 'px';
}
/**
* Set the scrollbar position to |position|. If the new position go out of
* range, set it to the boundary value.
* @param {number} position
*/
setPosition(position) {
const /** number */ newPosition =
Math.max(0, Math.min(position, this.range_));
this.position_ = newPosition;
this.updateScrollbarPosition_();
}
/**
* Update the scrollbar position via Javascript scrollbar api. Position may
* not be the same value as what we assigned even if the value is in the
* range. See crbug.com/760425.
*/
updateScrollbarPosition_() {
if (this.outerDiv_.scrollLeft == this.position_)
return;
this.outerDiv_.scrollLeft = this.position_;
}
/**
* Return true if scrollbar is at the right edge of the chart.
* @return {boolean}
*/
isScrolledToRightEdge() {
/* |scrollLeft| may become a float point number even if we set it to some
* integer value. If the distance to the right edge less than 2 pixels, we
* consider that it is scrolled to the right edge.
*/
const scrollLeftErrorAmount = 2;
return this.position_ + scrollLeftErrorAmount > this.range_;
}
/**
* Scroll the scrollbar to the right edge.
*/
scrollToRightEdge() {
this.setPosition(this.range_);
}
}
| chromium/chromium | chrome/browser/resources/chromeos/sys_internals/line_chart/scrollbar.js | JavaScript | bsd-3-clause | 4,454 |
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
/****************************************************************************************\
* K-Nearest Neighbors Classifier *
\****************************************************************************************/
// k Nearest Neighbors
CvKNearest::CvKNearest()
{
samples = 0;
clear();
}
CvKNearest::~CvKNearest()
{
clear();
}
CvKNearest::CvKNearest( const CvMat* _train_data, const CvMat* _responses,
const CvMat* _sample_idx, bool _is_regression, int _max_k )
{
samples = 0;
train( _train_data, _responses, _sample_idx, _is_regression, _max_k, false );
}
void CvKNearest::clear()
{
while( samples )
{
CvVectors* next_samples = samples->next;
cvFree( &samples->data.fl );
cvFree( &samples );
samples = next_samples;
}
var_count = 0;
total = 0;
max_k = 0;
}
int CvKNearest::get_max_k() const { return max_k; }
int CvKNearest::get_var_count() const { return var_count; }
bool CvKNearest::is_regression() const { return regression; }
int CvKNearest::get_sample_count() const { return total; }
bool CvKNearest::train( const CvMat* _train_data, const CvMat* _responses,
const CvMat* _sample_idx, bool _is_regression,
int _max_k, bool _update_base )
{
bool ok = false;
CvMat* responses = 0;
CV_FUNCNAME( "CvKNearest::train" );
__BEGIN__;
CvVectors* _samples = 0;
float** _data = 0;
int _count = 0, _dims = 0, _dims_all = 0, _rsize = 0;
if( !_update_base )
clear();
// Prepare training data and related parameters.
// Treat categorical responses as ordered - to prevent class label compression and
// to enable entering new classes in the updates
CV_CALL( cvPrepareTrainData( "CvKNearest::train", _train_data, CV_ROW_SAMPLE,
_responses, CV_VAR_ORDERED, 0, _sample_idx, true, (const float***)&_data,
&_count, &_dims, &_dims_all, &responses, 0, 0 ));
if( !responses )
CV_ERROR( CV_StsNoMem, "Could not allocate memory for responses" );
if( _update_base && _dims != var_count )
CV_ERROR( CV_StsBadArg, "The newly added data have different dimensionality" );
if( !_update_base )
{
if( _max_k < 1 )
CV_ERROR( CV_StsOutOfRange, "max_k must be a positive number" );
regression = _is_regression;
var_count = _dims;
max_k = _max_k;
}
_rsize = _count*sizeof(float);
CV_CALL( _samples = (CvVectors*)cvAlloc( sizeof(*_samples) + _rsize ));
_samples->next = samples;
_samples->type = CV_32F;
_samples->data.fl = _data;
_samples->count = _count;
total += _count;
samples = _samples;
memcpy( _samples + 1, responses->data.fl, _rsize );
ok = true;
__END__;
if( responses && responses->data.ptr != _responses->data.ptr )
cvReleaseMat(&responses);
return ok;
}
void CvKNearest::find_neighbors_direct( const CvMat* _samples, int k, int start, int end,
float* neighbor_responses, const float** neighbors, float* dist ) const
{
int i, j, count = end - start, k1 = 0, k2 = 0, d = var_count;
CvVectors* s = samples;
for( ; s != 0; s = s->next )
{
int n = s->count;
for( j = 0; j < n; j++ )
{
for( i = 0; i < count; i++ )
{
double sum = 0;
Cv32suf si;
const float* v = s->data.fl[j];
const float* u = (float*)(_samples->data.ptr + _samples->step*(start + i));
Cv32suf* dd = (Cv32suf*)(dist + i*k);
float* nr;
const float** nn;
int t, ii, ii1;
for( t = 0; t <= d - 4; t += 4 )
{
double t0 = u[t] - v[t], t1 = u[t+1] - v[t+1];
double t2 = u[t+2] - v[t+2], t3 = u[t+3] - v[t+3];
sum += t0*t0 + t1*t1 + t2*t2 + t3*t3;
}
for( ; t < d; t++ )
{
double t0 = u[t] - v[t];
sum += t0*t0;
}
si.f = (float)sum;
for( ii = k1-1; ii >= 0; ii-- )
if( si.i > dd[ii].i )
break;
if( ii >= k-1 )
continue;
nr = neighbor_responses + i*k;
nn = neighbors ? neighbors + (start + i)*k : 0;
for( ii1 = k2 - 1; ii1 > ii; ii1-- )
{
dd[ii1+1].i = dd[ii1].i;
nr[ii1+1] = nr[ii1];
if( nn ) nn[ii1+1] = nn[ii1];
}
dd[ii+1].i = si.i;
nr[ii+1] = ((float*)(s + 1))[j];
if( nn )
nn[ii+1] = v;
}
k1 = MIN( k1+1, k );
k2 = MIN( k1, k-1 );
}
}
}
float CvKNearest::write_results( int k, int k1, int start, int end,
const float* neighbor_responses, const float* dist,
CvMat* _results, CvMat* _neighbor_responses,
CvMat* _dist, Cv32suf* sort_buf ) const
{
float result = 0.f;
int i, j, j1, count = end - start;
double inv_scale = 1./k1;
int rstep = _results && !CV_IS_MAT_CONT(_results->type) ? _results->step/sizeof(result) : 1;
for( i = 0; i < count; i++ )
{
const Cv32suf* nr = (const Cv32suf*)(neighbor_responses + i*k);
float* dst;
float r;
if( _results || start+i == 0 )
{
if( regression )
{
double s = 0;
for( j = 0; j < k1; j++ )
s += nr[j].f;
r = (float)(s*inv_scale);
}
else
{
int prev_start = 0, best_count = 0, cur_count;
Cv32suf best_val;
for( j = 0; j < k1; j++ )
sort_buf[j].i = nr[j].i;
for( j = k1-1; j > 0; j-- )
{
bool swap_fl = false;
for( j1 = 0; j1 < j; j1++ )
if( sort_buf[j1].i > sort_buf[j1+1].i )
{
int t;
CV_SWAP( sort_buf[j1].i, sort_buf[j1+1].i, t );
swap_fl = true;
}
if( !swap_fl )
break;
}
best_val.i = 0;
for( j = 1; j <= k1; j++ )
if( j == k1 || sort_buf[j].i != sort_buf[j-1].i )
{
cur_count = j - prev_start;
if( best_count < cur_count )
{
best_count = cur_count;
best_val.i = sort_buf[j-1].i;
}
prev_start = j;
}
r = best_val.f;
}
if( start+i == 0 )
result = r;
if( _results )
_results->data.fl[(start + i)*rstep] = r;
}
if( _neighbor_responses )
{
dst = (float*)(_neighbor_responses->data.ptr +
(start + i)*_neighbor_responses->step);
for( j = 0; j < k1; j++ )
dst[j] = nr[j].f;
for( ; j < k; j++ )
dst[j] = 0.f;
}
if( _dist )
{
dst = (float*)(_dist->data.ptr + (start + i)*_dist->step);
for( j = 0; j < k1; j++ )
dst[j] = dist[j + i*k];
for( ; j < k; j++ )
dst[j] = 0.f;
}
}
return result;
}
struct P1 : cv::ParallelLoopBody {
P1(const CvKNearest* _pointer, int _buf_sz, int _k, const CvMat* __samples, const float** __neighbors,
int _k1, CvMat* __results, CvMat* __neighbor_responses, CvMat* __dist, float* _result)
{
pointer = _pointer;
k = _k;
_samples = __samples;
_neighbors = __neighbors;
k1 = _k1;
_results = __results;
_neighbor_responses = __neighbor_responses;
_dist = __dist;
result = _result;
buf_sz = _buf_sz;
}
const CvKNearest* pointer;
int k;
const CvMat* _samples;
const float** _neighbors;
int k1;
CvMat* _results;
CvMat* _neighbor_responses;
CvMat* _dist;
float* result;
int buf_sz;
void operator()( const cv::Range& range ) const
{
cv::AutoBuffer<float> buf(buf_sz);
for(int i = range.start; i < range.end; i += 1 )
{
float* neighbor_responses = &buf[0];
float* dist = neighbor_responses + 1*k;
Cv32suf* sort_buf = (Cv32suf*)(dist + 1*k);
pointer->find_neighbors_direct( _samples, k, i, i + 1,
neighbor_responses, _neighbors, dist );
float r = pointer->write_results( k, k1, i, i + 1, neighbor_responses, dist,
_results, _neighbor_responses, _dist, sort_buf );
if( i == 0 )
*result = r;
}
}
};
float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
const float** _neighbors, CvMat* _neighbor_responses, CvMat* _dist ) const
{
float result = 0.f;
const int max_blk_count = 128, max_buf_sz = 1 << 12;
if( !samples )
CV_Error( CV_StsError, "The search tree must be constructed first using train method" );
if( !CV_IS_MAT(_samples) ||
CV_MAT_TYPE(_samples->type) != CV_32FC1 ||
_samples->cols != var_count )
CV_Error( CV_StsBadArg, "Input samples must be floating-point matrix (<num_samples>x<var_count>)" );
if( _results && (!CV_IS_MAT(_results) ||
(_results->cols != 1 && _results->rows != 1) ||
_results->cols + _results->rows - 1 != _samples->rows) )
CV_Error( CV_StsBadArg,
"The results must be 1d vector containing as much elements as the number of samples" );
if( _results && CV_MAT_TYPE(_results->type) != CV_32FC1 &&
(CV_MAT_TYPE(_results->type) != CV_32SC1 || regression))
CV_Error( CV_StsUnsupportedFormat,
"The results must be floating-point or integer (in case of classification) vector" );
if( k < 1 || k > max_k )
CV_Error( CV_StsOutOfRange, "k must be within 1..max_k range" );
if( _neighbor_responses )
{
if( !CV_IS_MAT(_neighbor_responses) || CV_MAT_TYPE(_neighbor_responses->type) != CV_32FC1 ||
_neighbor_responses->rows != _samples->rows || _neighbor_responses->cols != k )
CV_Error( CV_StsBadArg,
"The neighbor responses (if present) must be floating-point matrix of <num_samples> x <k> size" );
}
if( _dist )
{
if( !CV_IS_MAT(_dist) || CV_MAT_TYPE(_dist->type) != CV_32FC1 ||
_dist->rows != _samples->rows || _dist->cols != k )
CV_Error( CV_StsBadArg,
"The distances from the neighbors (if present) must be floating-point matrix of <num_samples> x <k> size" );
}
int count = _samples->rows;
int count_scale = k*2;
int blk_count0 = MIN( count, max_blk_count );
int buf_sz = MIN( blk_count0 * count_scale, max_buf_sz );
blk_count0 = MAX( buf_sz/count_scale, 1 );
blk_count0 += blk_count0 % 2;
blk_count0 = MIN( blk_count0, count );
buf_sz = blk_count0 * count_scale + k;
int k1 = get_sample_count();
k1 = MIN( k1, k );
cv::parallel_for_(cv::Range(0, count), P1(this, buf_sz, k, _samples, _neighbors, k1,
_results, _neighbor_responses, _dist, &result)
);
return result;
}
using namespace cv;
CvKNearest::CvKNearest( const Mat& _train_data, const Mat& _responses,
const Mat& _sample_idx, bool _is_regression, int _max_k )
{
samples = 0;
train(_train_data, _responses, _sample_idx, _is_regression, _max_k, false );
}
bool CvKNearest::train( const Mat& _train_data, const Mat& _responses,
const Mat& _sample_idx, bool _is_regression,
int _max_k, bool _update_base )
{
CvMat tdata = _train_data, responses = _responses, sidx = _sample_idx;
return train(&tdata, &responses, sidx.data.ptr ? &sidx : 0, _is_regression, _max_k, _update_base );
}
float CvKNearest::find_nearest( const Mat& _samples, int k, Mat* _results,
const float** _neighbors, Mat* _neighbor_responses,
Mat* _dist ) const
{
CvMat s = _samples, results, *presults = 0, nresponses, *pnresponses = 0, dist, *pdist = 0;
if( _results )
{
if(!(_results->data && (_results->type() == CV_32F ||
(_results->type() == CV_32S && regression)) &&
(_results->cols == 1 || _results->rows == 1) &&
_results->cols + _results->rows - 1 == _samples.rows) )
_results->create(_samples.rows, 1, CV_32F);
presults = &(results = *_results);
}
if( _neighbor_responses )
{
if(!(_neighbor_responses->data && _neighbor_responses->type() == CV_32F &&
_neighbor_responses->cols == k && _neighbor_responses->rows == _samples.rows) )
_neighbor_responses->create(_samples.rows, k, CV_32F);
pnresponses = &(nresponses = *_neighbor_responses);
}
if( _dist )
{
if(!(_dist->data && _dist->type() == CV_32F &&
_dist->cols == k && _dist->rows == _samples.rows) )
_dist->create(_samples.rows, k, CV_32F);
pdist = &(dist = *_dist);
}
return find_nearest(&s, k, presults, _neighbors, pnresponses, pdist );
}
float CvKNearest::find_nearest( const cv::Mat& _samples, int k, CV_OUT cv::Mat& results,
CV_OUT cv::Mat& neighborResponses, CV_OUT cv::Mat& dists) const
{
return find_nearest(_samples, k, &results, 0, &neighborResponses, &dists);
}
/* End of file */
| grace-/opencv-3.0.0-cvpr | opencv/modules/ml/src/knearest.cpp | C++ | bsd-3-clause | 16,081 |
// Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
import {
Context,
DocumentRegistry,
TextModelFactory
} from '@jupyterlab/docregistry';
import * as Mock from '@jupyterlab/testutils/lib/mock';
import { UUID } from '@lumino/coreutils';
import { CellRenderer, DataGrid, JSONModel } from '@lumino/datagrid';
import { CSVViewer, GridSearchService } from '../src';
function createContext(): Context<DocumentRegistry.IModel> {
const factory = new TextModelFactory();
const manager = new Mock.ServiceManagerMock();
const path = UUID.uuid4() + '.csv';
return new Context({ factory, manager, path });
}
describe('csvviewer/widget', () => {
const context = createContext();
describe('CSVViewer', () => {
describe('#constructor()', () => {
it('should instantiate a `CSVViewer`', () => {
const widget = new CSVViewer({ context });
expect(widget).toBeInstanceOf(CSVViewer);
widget.dispose();
});
});
describe('#context', () => {
it('should be the context for the file', () => {
const widget = new CSVViewer({ context });
expect(widget.context).toBe(context);
});
});
describe('#dispose()', () => {
it('should dispose of the resources held by the widget', () => {
const widget = new CSVViewer({ context });
expect(widget.isDisposed).toBe(false);
widget.dispose();
expect(widget.isDisposed).toBe(true);
});
it('should be safe to call multiple times', () => {
const widget = new CSVViewer({ context });
expect(widget.isDisposed).toBe(false);
widget.dispose();
widget.dispose();
expect(widget.isDisposed).toBe(true);
});
});
});
describe('GridSearchService', () => {
function createModel(): JSONModel {
return new JSONModel({
data: [
{ index: 0, a: 'other', b: 'match 1' },
{ index: 1, a: 'other', b: 'match 2' }
],
schema: {
primaryKey: ['index'],
fields: [
{
name: 'a'
},
{ name: 'b' }
]
}
});
}
function createGridSearchService(model: JSONModel): GridSearchService {
const grid = new DataGrid();
grid.dataModel = model;
return new GridSearchService(grid);
}
it('searches incrementally and set background color', () => {
const model = createModel();
const searchService = createGridSearchService(model);
const cellRenderer = searchService.cellBackgroundColorRendererFunc({
matchBackgroundColor: 'anotherMatch',
currentMatchBackgroundColor: 'currentMatch',
textColor: '',
horizontalAlignment: 'right'
});
/**
* fake rendering a cell and returns the background color for this coordinate.
*/
function fakeRenderCell(row: number, column: number) {
const cellConfig = {
value: model.data('body', row, column),
row,
column
} as CellRenderer.CellConfig;
return cellRenderer(cellConfig);
}
// searching for "match", cells at (0,1) and (1,1) should match.
// (0,1) is the current match
const query = /match/;
searchService.find(query);
expect(fakeRenderCell(0, 1)).toBe('currentMatch');
expect(fakeRenderCell(1, 1)).toBe('anotherMatch');
expect(fakeRenderCell(0, 0)).toBe('');
// search again, the current match "moves" to be (1,1)
searchService.find(query);
expect(fakeRenderCell(0, 1)).toBe('anotherMatch');
expect(fakeRenderCell(1, 1)).toBe('currentMatch');
});
});
});
| jupyter/jupyterlab | packages/csvviewer/test/widget.spec.ts | TypeScript | bsd-3-clause | 3,717 |
//===-- ProcessRunLock.cpp ------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _WIN32
#include "lldb/Host/ProcessRunLock.h"
namespace lldb_private {
ProcessRunLock::ProcessRunLock() : m_running(false) {
int err = ::pthread_rwlock_init(&m_rwlock, nullptr);
(void)err;
}
ProcessRunLock::~ProcessRunLock() {
int err = ::pthread_rwlock_destroy(&m_rwlock);
(void)err;
}
bool ProcessRunLock::ReadTryLock() {
::pthread_rwlock_rdlock(&m_rwlock);
if (!m_running) {
return true;
}
::pthread_rwlock_unlock(&m_rwlock);
return false;
}
bool ProcessRunLock::ReadUnlock() {
return ::pthread_rwlock_unlock(&m_rwlock) == 0;
}
bool ProcessRunLock::SetRunning() {
::pthread_rwlock_wrlock(&m_rwlock);
m_running = true;
::pthread_rwlock_unlock(&m_rwlock);
return true;
}
bool ProcessRunLock::TrySetRunning() {
bool r;
if (::pthread_rwlock_trywrlock(&m_rwlock) == 0) {
r = !m_running;
m_running = true;
::pthread_rwlock_unlock(&m_rwlock);
return r;
}
return false;
}
bool ProcessRunLock::SetStopped() {
::pthread_rwlock_wrlock(&m_rwlock);
m_running = false;
::pthread_rwlock_unlock(&m_rwlock);
return true;
}
}
#endif
| endlessm/chromium-browser | third_party/llvm/lldb/source/Host/common/ProcessRunLock.cpp | C++ | bsd-3-clause | 1,469 |
function safeMatchMedia(query) {
var m = window.matchMedia(query);
return !!m && m.matches;
}
define('capabilities', [], function() {
var capabilities = {
'JSON': window.JSON && typeof JSON.parse == 'function',
'debug': (('' + document.location).indexOf('dbg') >= 0),
'debug_in_page': (('' + document.location).indexOf('dbginpage') >= 0),
'console': window.console && (typeof window.console.log == 'function'),
'replaceState': typeof history.replaceState === 'function',
'chromeless': window.locationbar && !window.locationbar.visible,
'localStorage': false,
'sessionStorage': false,
'webApps': !!(navigator.mozApps && navigator.mozApps.install),
'app_runtime': !!(
navigator.mozApps &&
typeof navigator.mozApps.html5Implementation === 'undefined'
),
'fileAPI': !!window.FileReader,
'userAgent': navigator.userAgent,
'desktop': false,
'tablet': false,
'mobile': safeMatchMedia('(max-width: 600px)'),
'firefoxAndroid': (navigator.userAgent.indexOf('Firefox') != -1 && navigator.userAgent.indexOf('Android') != -1),
'touch': ('ontouchstart' in window) || window.DocumentTouch && document instanceof DocumentTouch,
'nativeScroll': (function() {
return 'WebkitOverflowScrolling' in document.createElement('div').style;
})(),
'performance': !!(window.performance || window.msPerformance || window.webkitPerformance || window.mozPerformance),
'navPay': !!navigator.mozPay,
'webactivities': !!(window.setMessageHandler || window.mozSetMessageHandler),
'firefoxOS': null // This is set below.
};
// We're probably tablet if we have touch and we're larger than mobile.
capabilities.tablet = capabilities.touch && safeMatchMedia('(min-width: 601px)');
// We're probably desktop if we don't have touch and we're larger than some arbitrary dimension.
capabilities.desktop = !capabilities.touch && safeMatchMedia('(min-width: 673px)');
// Packaged-app installation are supported only on Firefox OS, so this is how we sniff.
capabilities.gaia = !!(capabilities.mobile && navigator.mozApps && navigator.mozApps.installPackage);
capabilities.getDeviceType = function() {
return this.desktop ? 'desktop' : (this.tablet ? 'tablet' : 'mobile');
};
if (capabilities.tablet) {
// If we're on tablet, then we're not on desktop.
capabilities.desktop = false;
}
if (capabilities.mobile) {
// If we're on mobile, then we're not on desktop nor tablet.
capabilities.desktop = capabilities.tablet = false;
}
// Detect Firefox OS.
// This will be true if the request is from a Firefox OS phone *or*
// a desktop B2G build with the correct UA pref, such as this:
// https://github.com/mozilla/r2d2b2g/blob/master/prosthesis/defaults/preferences/prefs.js
capabilities.firefoxOS = capabilities.gaia && !capabilities.firefoxAndroid;
try {
if ('localStorage' in window && window.localStorage !== null) {
capabilities.localStorage = true;
}
} catch (e) {
}
try {
if ('sessionStorage' in window && window.sessionStorage !== null) {
capabilities.sessionStorage = true;
}
} catch (e) {
}
return capabilities;
});
z.capabilities = require('capabilities');
| Joergen/zamboni | media/js/mkt/capabilities.js | JavaScript | bsd-3-clause | 3,467 |
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/gn/ninja_target_writer.h"
#include <sstream>
#include "base/files/file_util.h"
#include "base/strings/string_util.h"
#include "tools/gn/err.h"
#include "tools/gn/filesystem_utils.h"
#include "tools/gn/ninja_action_target_writer.h"
#include "tools/gn/ninja_binary_target_writer.h"
#include "tools/gn/ninja_copy_target_writer.h"
#include "tools/gn/ninja_group_target_writer.h"
#include "tools/gn/ninja_utils.h"
#include "tools/gn/output_file.h"
#include "tools/gn/scheduler.h"
#include "tools/gn/string_utils.h"
#include "tools/gn/substitution_writer.h"
#include "tools/gn/target.h"
#include "tools/gn/trace.h"
NinjaTargetWriter::NinjaTargetWriter(const Target* target,
std::ostream& out)
: settings_(target->settings()),
target_(target),
out_(out),
path_output_(settings_->build_settings()->build_dir(),
settings_->build_settings()->root_path_utf8(),
ESCAPE_NINJA) {
}
NinjaTargetWriter::~NinjaTargetWriter() {
}
// static
void NinjaTargetWriter::RunAndWriteFile(const Target* target) {
const Settings* settings = target->settings();
ScopedTrace trace(TraceItem::TRACE_FILE_WRITE,
target->label().GetUserVisibleName(false));
trace.SetToolchain(settings->toolchain_label());
base::FilePath ninja_file(settings->build_settings()->GetFullPath(
GetNinjaFileForTarget(target)));
if (g_scheduler->verbose_logging())
g_scheduler->Log("Writing", FilePathToUTF8(ninja_file));
base::CreateDirectory(ninja_file.DirName());
// It's rediculously faster to write to a string and then write that to
// disk in one operation than to use an fstream here.
std::stringstream file;
// Call out to the correct sub-type of writer.
if (target->output_type() == Target::COPY_FILES) {
NinjaCopyTargetWriter writer(target, file);
writer.Run();
} else if (target->output_type() == Target::ACTION ||
target->output_type() == Target::ACTION_FOREACH) {
NinjaActionTargetWriter writer(target, file);
writer.Run();
} else if (target->output_type() == Target::GROUP) {
NinjaGroupTargetWriter writer(target, file);
writer.Run();
} else if (target->output_type() == Target::EXECUTABLE ||
target->output_type() == Target::STATIC_LIBRARY ||
target->output_type() == Target::SHARED_LIBRARY ||
target->output_type() == Target::SOURCE_SET) {
NinjaBinaryTargetWriter writer(target, file);
writer.Run();
} else {
CHECK(0);
}
std::string contents = file.str();
base::WriteFile(ninja_file, contents.c_str(),
static_cast<int>(contents.size()));
}
void NinjaTargetWriter::WriteSharedVars(const SubstitutionBits& bits) {
bool written_anything = false;
// Target label.
if (bits.used[SUBSTITUTION_LABEL]) {
out_ << kSubstitutionNinjaNames[SUBSTITUTION_LABEL] << " = "
<< SubstitutionWriter::GetTargetSubstitution(
target_, SUBSTITUTION_LABEL)
<< std::endl;
written_anything = true;
}
// Root gen dir.
if (bits.used[SUBSTITUTION_ROOT_GEN_DIR]) {
out_ << kSubstitutionNinjaNames[SUBSTITUTION_ROOT_GEN_DIR] << " = "
<< SubstitutionWriter::GetTargetSubstitution(
target_, SUBSTITUTION_ROOT_GEN_DIR)
<< std::endl;
written_anything = true;
}
// Root out dir.
if (bits.used[SUBSTITUTION_ROOT_OUT_DIR]) {
out_ << kSubstitutionNinjaNames[SUBSTITUTION_ROOT_OUT_DIR] << " = "
<< SubstitutionWriter::GetTargetSubstitution(
target_, SUBSTITUTION_ROOT_OUT_DIR)
<< std::endl;
written_anything = true;
}
// Target gen dir.
if (bits.used[SUBSTITUTION_TARGET_GEN_DIR]) {
out_ << kSubstitutionNinjaNames[SUBSTITUTION_TARGET_GEN_DIR] << " = "
<< SubstitutionWriter::GetTargetSubstitution(
target_, SUBSTITUTION_TARGET_GEN_DIR)
<< std::endl;
written_anything = true;
}
// Target out dir.
if (bits.used[SUBSTITUTION_TARGET_OUT_DIR]) {
out_ << kSubstitutionNinjaNames[SUBSTITUTION_TARGET_OUT_DIR] << " = "
<< SubstitutionWriter::GetTargetSubstitution(
target_, SUBSTITUTION_TARGET_OUT_DIR)
<< std::endl;
written_anything = true;
}
// Target output name.
if (bits.used[SUBSTITUTION_TARGET_OUTPUT_NAME]) {
out_ << kSubstitutionNinjaNames[SUBSTITUTION_TARGET_OUTPUT_NAME] << " = "
<< SubstitutionWriter::GetTargetSubstitution(
target_, SUBSTITUTION_TARGET_OUTPUT_NAME)
<< std::endl;
written_anything = true;
}
// If we wrote any vars, separate them from the rest of the file that follows
// with a blank line.
if (written_anything)
out_ << std::endl;
}
OutputFile NinjaTargetWriter::WriteInputDepsStampAndGetDep(
const std::vector<const Target*>& extra_hard_deps) const {
CHECK(target_->toolchain())
<< "Toolchain not set on target "
<< target_->label().GetUserVisibleName(true);
// For an action (where we run a script only once) the sources are the same
// as the source prereqs.
bool list_sources_as_input_deps = (target_->output_type() == Target::ACTION);
// Actions get implicit dependencies on the script itself.
bool add_script_source_as_dep =
(target_->output_type() == Target::ACTION) ||
(target_->output_type() == Target::ACTION_FOREACH);
if (!add_script_source_as_dep &&
extra_hard_deps.empty() &&
target_->inputs().empty() &&
target_->recursive_hard_deps().empty() &&
(!list_sources_as_input_deps || target_->sources().empty()) &&
target_->toolchain()->deps().empty())
return OutputFile(); // No input/hard deps.
// One potential optimization is if there are few input dependencies (or
// potentially few sources that depend on these) it's better to just write
// all hard deps on each sources line than have this intermediate stamp. We
// do the stamp file because duplicating all the order-only deps for each
// source file can really explode the ninja file but this won't be the most
// optimal thing in all cases.
OutputFile input_stamp_file(
RebasePath(GetTargetOutputDir(target_).value(),
settings_->build_settings()->build_dir(),
settings_->build_settings()->root_path_utf8()));
input_stamp_file.value().append(target_->label().name());
input_stamp_file.value().append(".inputdeps.stamp");
out_ << "build ";
path_output_.WriteFile(out_, input_stamp_file);
out_ << ": "
<< GetNinjaRulePrefixForToolchain(settings_)
<< Toolchain::ToolTypeToName(Toolchain::TYPE_STAMP);
// Script file (if applicable).
if (add_script_source_as_dep) {
out_ << " ";
path_output_.WriteFile(out_, target_->action_values().script());
}
// Input files are order-only deps.
for (const auto& input : target_->inputs()) {
out_ << " ";
path_output_.WriteFile(out_, input);
}
if (list_sources_as_input_deps) {
for (const auto& source : target_->sources()) {
out_ << " ";
path_output_.WriteFile(out_, source);
}
}
// The different souces of input deps may duplicate some targets, so uniquify
// them (ordering doesn't matter for this case).
std::set<const Target*> unique_deps;
// Hard dependencies that are direct or indirect dependencies.
const std::set<const Target*>& hard_deps = target_->recursive_hard_deps();
for (const auto& dep : hard_deps)
unique_deps.insert(dep);
// Extra hard dependencies passed in.
unique_deps.insert(extra_hard_deps.begin(), extra_hard_deps.end());
// Toolchain dependencies. These must be resolved before doing anything.
// This just writs all toolchain deps for simplicity. If we find that
// toolchains often have more than one dependency, we could consider writing
// a toolchain-specific stamp file and only include the stamp here.
const LabelTargetVector& toolchain_deps = target_->toolchain()->deps();
for (const auto& toolchain_dep : toolchain_deps)
unique_deps.insert(toolchain_dep.ptr);
for (const auto& dep : unique_deps) {
DCHECK(!dep->dependency_output_file().value().empty());
out_ << " ";
path_output_.WriteFile(out_, dep->dependency_output_file());
}
out_ << "\n";
return input_stamp_file;
}
void NinjaTargetWriter::WriteStampForTarget(
const std::vector<OutputFile>& files,
const std::vector<OutputFile>& order_only_deps) {
const OutputFile& stamp_file = target_->dependency_output_file();
// First validate that the target's dependency is a stamp file. Otherwise,
// we shouldn't have gotten here!
CHECK(base::EndsWith(stamp_file.value(), ".stamp", false))
<< "Output should end in \".stamp\" for stamp file output. Instead got: "
<< "\"" << stamp_file.value() << "\"";
out_ << "build ";
path_output_.WriteFile(out_, stamp_file);
out_ << ": "
<< GetNinjaRulePrefixForToolchain(settings_)
<< Toolchain::ToolTypeToName(Toolchain::TYPE_STAMP);
path_output_.WriteFiles(out_, files);
if (!order_only_deps.empty()) {
out_ << " ||";
path_output_.WriteFiles(out_, order_only_deps);
}
out_ << std::endl;
}
| SaschaMester/delicium | tools/gn/ninja_target_writer.cc | C++ | bsd-3-clause | 9,381 |
/*
* Copyright 2013 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkTileImageFilter.h"
#include "SkColorSpaceXformer.h"
#include "SkCanvas.h"
#include "SkImage.h"
#include "SkImageFilterPriv.h"
#include "SkMatrix.h"
#include "SkOffsetImageFilter.h"
#include "SkPaint.h"
#include "SkReadBuffer.h"
#include "SkShader.h"
#include "SkSpecialImage.h"
#include "SkSpecialSurface.h"
#include "SkSurface.h"
#include "SkValidationUtils.h"
#include "SkWriteBuffer.h"
sk_sp<SkImageFilter> SkTileImageFilter::Make(const SkRect& srcRect, const SkRect& dstRect,
sk_sp<SkImageFilter> input) {
if (!SkIsValidRect(srcRect) || !SkIsValidRect(dstRect)) {
return nullptr;
}
if (srcRect.width() == dstRect.width() && srcRect.height() == dstRect.height()) {
SkRect ir = dstRect;
if (!ir.intersect(srcRect)) {
return input;
}
CropRect cropRect(ir);
return SkOffsetImageFilter::Make(dstRect.x() - srcRect.x(),
dstRect.y() - srcRect.y(),
std::move(input),
&cropRect);
}
return sk_sp<SkImageFilter>(new SkTileImageFilter(srcRect, dstRect, std::move(input)));
}
sk_sp<SkSpecialImage> SkTileImageFilter::onFilterImage(SkSpecialImage* source,
const Context& ctx,
SkIPoint* offset) const {
SkIPoint inputOffset = SkIPoint::Make(0, 0);
sk_sp<SkSpecialImage> input(this->filterInput(0, source, ctx, &inputOffset));
if (!input) {
return nullptr;
}
SkRect dstRect;
ctx.ctm().mapRect(&dstRect, fDstRect);
if (!dstRect.intersect(SkRect::Make(ctx.clipBounds()))) {
return nullptr;
}
const SkIRect dstIRect = dstRect.roundOut();
if (!fSrcRect.width() || !fSrcRect.height() || !dstIRect.width() || !dstIRect.height()) {
return nullptr;
}
SkRect srcRect;
ctx.ctm().mapRect(&srcRect, fSrcRect);
SkIRect srcIRect;
srcRect.roundOut(&srcIRect);
srcIRect.offset(-inputOffset);
const SkIRect inputBounds = SkIRect::MakeWH(input->width(), input->height());
if (!SkIRect::Intersects(srcIRect, inputBounds)) {
return nullptr;
}
// We create an SkImage here b.c. it needs to be a tight fit for the tiling
sk_sp<SkImage> subset;
if (inputBounds.contains(srcIRect)) {
subset = input->asImage(&srcIRect);
} else {
sk_sp<SkSurface> surf(input->makeTightSurface(ctx.outputProperties(), srcIRect.size()));
if (!surf) {
return nullptr;
}
SkCanvas* canvas = surf->getCanvas();
SkASSERT(canvas);
SkPaint paint;
paint.setBlendMode(SkBlendMode::kSrc);
input->draw(canvas,
SkIntToScalar(inputOffset.x()), SkIntToScalar(inputOffset.y()),
&paint);
subset = surf->makeImageSnapshot();
}
if (!subset) {
return nullptr;
}
SkASSERT(subset->width() == srcIRect.width());
SkASSERT(subset->height() == srcIRect.height());
sk_sp<SkSpecialSurface> surf(source->makeSurface(ctx.outputProperties(), dstIRect.size()));
if (!surf) {
return nullptr;
}
SkCanvas* canvas = surf->getCanvas();
SkASSERT(canvas);
SkPaint paint;
paint.setBlendMode(SkBlendMode::kSrc);
paint.setShader(subset->makeShader(SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode));
canvas->translate(-dstRect.fLeft, -dstRect.fTop);
canvas->drawRect(dstRect, paint);
offset->fX = dstIRect.fLeft;
offset->fY = dstIRect.fTop;
return surf->makeImageSnapshot();
}
sk_sp<SkImageFilter> SkTileImageFilter::onMakeColorSpace(SkColorSpaceXformer* xformer) const {
SkASSERT(1 == this->countInputs());
auto input = xformer->apply(this->getInput(0));
if (input.get() != this->getInput(0)) {
return SkTileImageFilter::Make(fSrcRect, fDstRect, std::move(input));
}
return this->refMe();
}
SkIRect SkTileImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
MapDirection dir, const SkIRect* inputRect) const {
SkRect rect = kReverse_MapDirection == dir ? fSrcRect : fDstRect;
ctm.mapRect(&rect);
return rect.roundOut();
}
SkIRect SkTileImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix&,
MapDirection, const SkIRect* inputRect) const {
// Don't recurse into inputs.
return src;
}
SkRect SkTileImageFilter::computeFastBounds(const SkRect& src) const {
return fDstRect;
}
sk_sp<SkFlattenable> SkTileImageFilter::CreateProc(SkReadBuffer& buffer) {
SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
SkRect src, dst;
buffer.readRect(&src);
buffer.readRect(&dst);
return Make(src, dst, common.getInput(0));
}
void SkTileImageFilter::flatten(SkWriteBuffer& buffer) const {
this->INHERITED::flatten(buffer);
buffer.writeRect(fSrcRect);
buffer.writeRect(fDstRect);
}
| Hikari-no-Tenshi/android_external_skia | src/effects/imagefilters/SkTileImageFilter.cpp | C++ | bsd-3-clause | 5,247 |
from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string, salted_hmac
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.db_manager(self.db).get_by_natural_key(app_label, model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=255)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), blank=True, null=True)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
def get_session_auth_hash(self):
"""
Returns an HMAC of the password field.
"""
key_salt = "django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash"
return salted_hmac(key_salt, self.password).hexdigest()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_perm'):
continue
try:
if backend.has_perm(user, perm, obj):
return True
except PermissionDenied:
return False
return False
def _user_has_module_perms(user, app_label):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_module_perms'):
continue
try:
if backend.has_module_perms(user, app_label):
return True
except PermissionDenied:
return False
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'their groups.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through their
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$',
_('Enter a valid username. '
'This value may contain only letters, numbers '
'and @/./+/-/_ characters.'), 'invalid'),
],
error_messages={
'unique': _("A user with that username already exists."),
})
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def delete(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def set_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def check_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| pwmarcz/django | django/contrib/auth/models.py | Python | bsd-3-clause | 17,843 |
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
from wagtail.wagtailadmin.forms import PageViewRestrictionForm
from wagtail.wagtailadmin.modal_workflow import render_modal_workflow
from wagtail.wagtailcore.models import Page, PageViewRestriction
def set_privacy(request, page_id):
page = get_object_or_404(Page, id=page_id)
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_set_view_restrictions():
raise PermissionDenied
# fetch restriction records in depth order so that ancestors appear first
restrictions = page.get_view_restrictions().order_by('page__depth')
if restrictions:
restriction = restrictions[0]
restriction_exists_on_ancestor = (restriction.page != page)
else:
restriction = None
restriction_exists_on_ancestor = False
if request.method == 'POST':
form = PageViewRestrictionForm(request.POST, instance=restriction)
if form.is_valid() and not restriction_exists_on_ancestor:
if form.cleaned_data['restriction_type'] == PageViewRestriction.NONE:
# remove any existing restriction
if restriction:
restriction.delete()
else:
restriction = form.save(commit=False)
restriction.page = page
form.save()
return render_modal_workflow(
request, None, 'wagtailadmin/page_privacy/set_privacy_done.js', {
'is_public': (form.cleaned_data['restriction_type'] == 'none')
}
)
else: # request is a GET
if not restriction_exists_on_ancestor:
if restriction:
form = PageViewRestrictionForm(instance=restriction)
else:
# no current view restrictions on this page
form = PageViewRestrictionForm(initial={
'restriction_type': 'none'
})
if restriction_exists_on_ancestor:
# display a message indicating that there is a restriction at ancestor level -
# do not provide the form for setting up new restrictions
return render_modal_workflow(
request, 'wagtailadmin/page_privacy/ancestor_privacy.html', None,
{
'page_with_restriction': restriction.page,
}
)
else:
# no restriction set at ancestor level - can set restrictions here
return render_modal_workflow(
request,
'wagtailadmin/page_privacy/set_privacy.html',
'wagtailadmin/page_privacy/set_privacy.js', {
'page': page,
'form': form,
}
)
| chrxr/wagtail | wagtail/wagtailadmin/views/page_privacy.py | Python | bsd-3-clause | 2,828 |
//
// Copyright (c) 2004-2011 Jaroslaw Kowalski <jaak@jkowalski.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
#if !NET_CF && !SILVERLIGHT
namespace NLog.Targets
{
using System.ComponentModel;
using System.Text;
using System.Text.RegularExpressions;
using NLog.Config;
/// <summary>
/// Highlighting rule for Win32 colorful console.
/// </summary>
[NLogConfigurationItem]
public class ConsoleWordHighlightingRule
{
private Regex compiledRegex;
/// <summary>
/// Initializes a new instance of the <see cref="ConsoleWordHighlightingRule" /> class.
/// </summary>
public ConsoleWordHighlightingRule()
{
this.BackgroundColor = ConsoleOutputColor.NoChange;
this.ForegroundColor = ConsoleOutputColor.NoChange;
}
/// <summary>
/// Initializes a new instance of the <see cref="ConsoleWordHighlightingRule" /> class.
/// </summary>
/// <param name="text">The text to be matched..</param>
/// <param name="foregroundColor">Color of the foreground.</param>
/// <param name="backgroundColor">Color of the background.</param>
public ConsoleWordHighlightingRule(string text, ConsoleOutputColor foregroundColor, ConsoleOutputColor backgroundColor)
{
this.Text = text;
this.ForegroundColor = foregroundColor;
this.BackgroundColor = backgroundColor;
}
/// <summary>
/// Gets or sets the regular expression to be matched. You must specify either <c>text</c> or <c>regex</c>.
/// </summary>
/// <docgen category='Rule Matching Options' order='10' />
public string Regex { get; set; }
/// <summary>
/// Gets or sets the text to be matched. You must specify either <c>text</c> or <c>regex</c>.
/// </summary>
/// <docgen category='Rule Matching Options' order='10' />
public string Text { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to match whole words only.
/// </summary>
/// <docgen category='Rule Matching Options' order='10' />
[DefaultValue(false)]
public bool WholeWords { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to ignore case when comparing texts.
/// </summary>
/// <docgen category='Rule Matching Options' order='10' />
[DefaultValue(false)]
public bool IgnoreCase { get; set; }
/// <summary>
/// Gets the compiled regular expression that matches either Text or Regex property.
/// </summary>
public Regex CompiledRegex
{
get
{
if (this.compiledRegex == null)
{
string regexpression = this.Regex;
if (regexpression == null && this.Text != null)
{
regexpression = System.Text.RegularExpressions.Regex.Escape(this.Text);
if (this.WholeWords)
{
regexpression = "\b" + regexpression + "\b";
}
}
RegexOptions regexOptions = RegexOptions.Compiled;
if (this.IgnoreCase)
{
regexOptions |= RegexOptions.IgnoreCase;
}
this.compiledRegex = new Regex(regexpression, regexOptions);
}
return this.compiledRegex;
}
}
/// <summary>
/// Gets or sets the foreground color.
/// </summary>
/// <docgen category='Formatting Options' order='10' />
[DefaultValue("NoChange")]
public ConsoleOutputColor ForegroundColor { get; set; }
/// <summary>
/// Gets or sets the background color.
/// </summary>
/// <docgen category='Formatting Options' order='10' />
[DefaultValue("NoChange")]
public ConsoleOutputColor BackgroundColor { get; set; }
internal string MatchEvaluator(Match m)
{
StringBuilder result = new StringBuilder();
result.Append('\a');
result.Append((char)((int)this.ForegroundColor + 'A'));
result.Append((char)((int)this.BackgroundColor + 'A'));
result.Append(m.Value);
result.Append('\a');
result.Append('X');
return result.ToString();
}
internal string ReplaceWithEscapeSequences(string message)
{
return this.CompiledRegex.Replace(message, new MatchEvaluator(this.MatchEvaluator));
}
}
}
#endif | jkowalski/NLog | src/NLog/Targets/ConsoleWordHighlightingRule.cs | C# | bsd-3-clause | 6,309 |
package ibxm;
/* A data array dynamically loaded from an InputStream. */
public class Data {
private int bufLen;
private byte[] buffer;
private java.io.InputStream stream;
public Data( java.io.InputStream inputStream ) throws java.io.IOException {
bufLen = 1 << 16;
buffer = new byte[ bufLen ];
stream = inputStream;
readFully( stream, buffer, 0, bufLen );
}
public Data( byte[] data ) {
bufLen = data.length;
buffer = data;
}
public byte sByte( int offset ) throws java.io.IOException {
load( offset, 1 );
return buffer[ offset ];
}
public int uByte( int offset ) throws java.io.IOException {
load( offset, 1 );
return buffer[ offset ] & 0xFF;
}
public int ubeShort( int offset ) throws java.io.IOException {
load( offset, 2 );
return ( ( buffer[ offset ] & 0xFF ) << 8 ) | ( buffer[ offset + 1 ] & 0xFF );
}
public int uleShort( int offset ) throws java.io.IOException {
load( offset, 2 );
return ( buffer[ offset ] & 0xFF ) | ( ( buffer[ offset + 1 ] & 0xFF ) << 8 );
}
public int uleInt( int offset ) throws java.io.IOException {
load( offset, 4 );
int value = buffer[ offset ] & 0xFF;
value = value | ( ( buffer[ offset + 1 ] & 0xFF ) << 8 );
value = value | ( ( buffer[ offset + 2 ] & 0xFF ) << 16 );
value = value | ( ( buffer[ offset + 3 ] & 0x7F ) << 24 );
return value;
}
public String strLatin1( int offset, int length ) throws java.io.IOException {
load( offset, length );
char[] str = new char[ length ];
for( int idx = 0; idx < length; idx++ ) {
int chr = buffer[ offset + idx ] & 0xFF;
str[ idx ] = chr < 32 ? 32 : ( char ) chr;
}
return new String( str );
}
public String strCp850( int offset, int length ) throws java.io.IOException {
load( offset, length );
try {
char[] str = new String( buffer, offset, length, "Cp850" ).toCharArray();
for( int idx = 0; idx < str.length; idx++ ) {
str[ idx ] = str[ idx ] < 32 ? 32 : str[ idx ];
}
return new String( str );
} catch( java.io.UnsupportedEncodingException e ) {
return strLatin1( offset, length );
}
}
public short[] samS8( int offset, int length ) throws java.io.IOException {
load( offset, length );
short[] sampleData = new short[ length ];
for( int idx = 0; idx < length; idx++ ) {
sampleData[ idx ] = ( short ) ( buffer[ offset + idx ] << 8 );
}
return sampleData;
}
public short[] samS8D( int offset, int length ) throws java.io.IOException {
load( offset, length );
short[] sampleData = new short[ length ];
int sam = 0;
for( int idx = 0; idx < length; idx++ ) {
sam += buffer[ offset + idx ];
sampleData[ idx ] = ( short ) ( sam << 8 );
}
return sampleData;
}
public short[] samU8( int offset, int length ) throws java.io.IOException {
load( offset, length );
short[] sampleData = new short[ length ];
for( int idx = 0; idx < length; idx++ ) {
sampleData[ idx ] = ( short ) ( ( ( buffer[ offset + idx ] & 0xFF ) - 128 ) << 8 );
}
return sampleData;
}
public short[] samS16( int offset, int samples ) throws java.io.IOException {
load( offset, samples * 2 );
short[] sampleData = new short[ samples ];
for( int idx = 0; idx < samples; idx++ ) {
sampleData[ idx ] = ( short ) ( ( buffer[ offset + idx * 2 ] & 0xFF ) | ( buffer[ offset + idx * 2 + 1 ] << 8 ) );
}
return sampleData;
}
public short[] samS16D( int offset, int samples ) throws java.io.IOException {
load( offset, samples * 2 );
short[] sampleData = new short[ samples ];
int sam = 0;
for( int idx = 0; idx < samples; idx++ ) {
sam += ( buffer[ offset + idx * 2 ] & 0xFF ) | ( buffer[ offset + idx * 2 + 1 ] << 8 );
sampleData[ idx ] = ( short ) sam;
}
return sampleData;
}
public short[] samU16( int offset, int samples ) throws java.io.IOException {
load( offset, samples * 2 );
short[] sampleData = new short[ samples ];
for( int idx = 0; idx < samples; idx++ ) {
int sam = ( buffer[ offset + idx * 2 ] & 0xFF ) | ( ( buffer[ offset + idx * 2 + 1 ] & 0xFF ) << 8 );
sampleData[ idx ] = ( short ) ( sam - 32768 );
}
return sampleData;
}
private void load( int offset, int length ) throws java.io.IOException {
while( offset + length > bufLen ) {
int newBufLen = bufLen << 1;
byte[] newBuf = new byte[ newBufLen ];
System.arraycopy( buffer, 0, newBuf, 0, bufLen );
if( stream != null ) {
readFully( stream, newBuf, bufLen, newBufLen - bufLen );
}
bufLen = newBufLen;
buffer = newBuf;
}
}
private static void readFully( java.io.InputStream inputStream, byte[] buffer, int offset, int length ) throws java.io.IOException {
int read = 1, end = offset + length;
while( read > 0 ) {
read = inputStream.read( buffer, offset, end - offset );
offset += read;
}
}
}
| Arcnor/micromod | ibxm/src/main/java/ibxm/Data.java | Java | bsd-3-clause | 4,767 |
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/spellcheck/browser/android/component_jni_registrar.h"
#include "base/android/jni_android.h"
#include "base/android/jni_registrar.h"
#include "components/spellcheck/browser/spellchecker_session_bridge_android.h"
namespace spellcheck {
namespace android {
static base::android::RegistrationMethod kSpellcheckRegisteredMethods[] = {
{"SpellCheckerSessionBridge", SpellCheckerSessionBridge::RegisterJNI},
};
bool RegisterSpellcheckJni(JNIEnv* env) {
return base::android::RegisterNativeMethods(
env, kSpellcheckRegisteredMethods,
std::size(kSpellcheckRegisteredMethods));
}
} // namespace android
} // namespace spellcheck
| chromium/chromium | components/spellcheck/browser/android/component_jni_registrar.cc | C++ | bsd-3-clause | 833 |
export { enableDebugTools, disableDebugTools } from 'angular2/src/tools/tools';
| binariedMe/blogging | node_modules/angular2/tools.d.ts | TypeScript | mit | 80 |
class ValueQuestionPresenter < QuestionPresenter
include ActionView::Helpers::NumberHelper
def response_label(value)
number_with_delimiter(value)
end
end
| stwalsh/smart-answers | app/presenters/value_question_presenter.rb | Ruby | mit | 165 |
#if !NETSTANDARD2_0
using OfficeDevPnP.Core.IdentityModel.WSTrustBindings;
using System;
using System.IdentityModel.Protocols.WSTrust;
using System.IdentityModel.Tokens;
using System.Net;
using System.ServiceModel;
using System.ServiceModel.Security;
namespace OfficeDevPnP.Core.IdentityModel.TokenProviders.ADFS
{
/// <summary>
/// ADFS Active authentication based on username + password. Uses the trust/13/usernamemixed ADFS endpoint.
/// </summary>
public class UsernameMixed : BaseProvider
{
/// <summary>
/// Performs active authentication against ADFS using the trust/13/usernamemixed ADFS endpoint.
/// </summary>
/// <param name="siteUrl">Url of the SharePoint site that's secured via ADFS</param>
/// <param name="userName">Name of the user (e.g. domain\administrator) </param>
/// <param name="password">Password of th user</param>
/// <param name="userNameMixed">Uri to the ADFS usernamemixed endpoint</param>
/// <param name="relyingPartyIdentifier">Identifier of the ADFS relying party that we're hitting</param>
/// <param name="logonTokenCacheExpirationWindow">Logon TokenCache expiration window integer value</param>
/// <returns>A cookiecontainer holding the FedAuth cookie</returns>
public CookieContainer GetFedAuthCookie(string siteUrl, string userName, string password, Uri userNameMixed, string relyingPartyIdentifier, int logonTokenCacheExpirationWindow)
{
UsernameMixed adfsTokenProvider = new UsernameMixed();
var token = adfsTokenProvider.RequestToken(userName, password, userNameMixed, relyingPartyIdentifier);
string fedAuthValue = TransformSamlTokenToFedAuth(token.TokenXml.OuterXml, siteUrl, relyingPartyIdentifier);
// Construct the cookie expiration date
TimeSpan lifeTime = SamlTokenlifeTime(token.TokenXml.OuterXml);
if (lifeTime == TimeSpan.Zero)
{
lifeTime = new TimeSpan(0, 60, 0);
}
int cookieLifeTime = Math.Min((int)lifeTime.TotalMinutes, logonTokenCacheExpirationWindow);
DateTime expiresOn = DateTime.Now.AddMinutes(cookieLifeTime);
CookieContainer cc = null;
if (!string.IsNullOrEmpty(fedAuthValue))
{
cc = new CookieContainer();
Cookie samlAuth = new Cookie("FedAuth", fedAuthValue);
samlAuth.Expires = expiresOn;
samlAuth.Path = "/";
samlAuth.Secure = true;
samlAuth.HttpOnly = true;
Uri samlUri = new Uri(siteUrl);
samlAuth.Domain = samlUri.Host;
cc.Add(samlAuth);
}
return cc;
}
private GenericXmlSecurityToken RequestToken(string userName, string passWord, Uri userNameMixed, string relyingPartyIdentifier)
{
GenericXmlSecurityToken genericToken = null;
using (var factory = new WSTrustChannelFactory(new UserNameWSTrustBinding(SecurityMode.TransportWithMessageCredential), new EndpointAddress(userNameMixed)))
{
factory.TrustVersion = TrustVersion.WSTrust13;
// Hookup the user and password
factory.Credentials.UserName.UserName = userName;
factory.Credentials.UserName.Password = passWord;
var requestSecurityToken = new RequestSecurityToken
{
RequestType = RequestTypes.Issue,
AppliesTo = new EndpointReference(relyingPartyIdentifier),
KeyType = KeyTypes.Bearer
};
IWSTrustChannelContract channel = factory.CreateChannel();
genericToken = channel.Issue(requestSecurityToken) as GenericXmlSecurityToken;
factory.Close();
}
return genericToken;
}
}
}
#endif | OfficeDev/PnP-Sites-Core | Core/OfficeDevPnP.Core/IdentityModel/TokenProviders/ADFS/UsernameMixed.cs | C# | mit | 3,992 |
# -*- coding: utf-8 -*-
"""API Request cache tests."""
#
# (C) Pywikibot team, 2012-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
from pywikibot.site import BaseSite
import scripts.maintenance.cache as cache
from tests import _cache_dir
from tests.aspects import unittest, TestCase
class RequestCacheTests(TestCase):
"""Validate cache entries."""
net = False
def _check_cache_entry(self, entry):
"""Assert validity of the cache entry."""
self.assertIsInstance(entry.site, BaseSite)
self.assertIsInstance(entry.site._loginstatus, int)
self.assertIsInstance(entry.site._username, list)
if entry.site._loginstatus >= 1:
self.assertIsNotNone(entry.site._username[0])
self.assertIsInstance(entry._params, dict)
self.assertIsNotNone(entry._params)
# TODO: more tests on entry._params, and possibly fixes needed
# to make it closely replicate the original object.
def test_cache(self):
"""Test the apicache by doing _check_cache_entry over each entry."""
cache.process_entries(_cache_dir, self._check_cache_entry)
if __name__ == '__main__':
unittest.main()
| valhallasw/pywikibot-core | tests/cache_tests.py | Python | mit | 1,258 |
var expect = require('expect.js');
var path = require('path');
var fs = require('../extfs');
describe('extfs', function () {
var rootPath = path.join(__dirname, '../');
it('should return all directories', function (done) {
fs.getDirs(rootPath, function (err, dirs) {
expect(dirs).to.be.an(Array);
expect(dirs.length).to.be.greaterThan(0);
done();
});
});
it('should return all directories sync', function () {
var dirs = fs.getDirsSync(rootPath);
expect(dirs).to.be.an(Array);
expect(dirs.length > 0).to.be.ok();
});
it('should check if a file is empty', function (done) {
var notEmptyFile = path.join(__dirname, '../README.md');
var emptyFile = './AN EMPTY FILE';
fs.isEmpty(notEmptyFile, function (empty) {
expect(empty).to.be(false);
fs.isEmpty(emptyFile, function (empty) {
expect(empty).to.be(true);
done();
});
});
});
it('should check if a file is empty sync', function () {
var notEmptyFile = path.join(__dirname, '../README.md');
var emptyFile = './AN EMPTY FILE';
var empty = fs.isEmptySync(notEmptyFile);
expect(empty).to.be(false);
empty = fs.isEmptySync(emptyFile);
expect(empty).to.be(true);
});
it('should check if a directory is empty', function (done) {
var notEmptyDir = __dirname;
var emptyDir = './AN EMPTY DIR';
fs.isEmpty(notEmptyDir, function (empty) {
expect(empty).to.be(false);
fs.isEmpty(emptyDir, function (empty) {
expect(empty).to.be(true);
done();
})
});
});
it('should check if a directory is empty sync', function () {
var notEmptyDir = __dirname;
var emptyDir = './AN EMPTY DIR';
expect(fs.isEmptySync(notEmptyDir)).to.be(false);
expect(fs.isEmptySync(emptyDir)).to.be(true);
});
describe('remove directories', function () {
var tmpPath = path.join(rootPath, 'tmp');
var folders = [ 'folder1', 'folder2', 'folder3' ];
var files = [ '1.txt', '2.txt', '3.txt' ];
folders = folders.map(function (folder) {
return path.join(tmpPath, folder);
});
/**
* Create 3 folders with 3 files each
*/
beforeEach(function () {
if (!fs.existsSync(tmpPath)) {
fs.mkdirSync(tmpPath, '0755');
}
folders.forEach(function (folder) {
if (!fs.existsSync(folder)) {
fs.mkdirSync(folder, '0755');
}
files.forEach(function (file) {
fs.writeFile(path.join(folder, file), 'file content');
});
});
});
it('should remove a non empty directory', function (done) {
fs.remove(tmpPath, function (err) {
expect(err).to.be(null);
expect(fs.existsSync(tmpPath)).to.be(false);
done();
});
});
it('should remove a non empty directory synchronously', function () {
fs.removeSync(tmpPath);
expect(fs.existsSync(tmpPath)).to.be(false);
});
it('should remove an array of directories', function (done) {
fs.remove(folders, function (err) {
expect(err).to.be(null);
expect(fs.existsSync(folders[0])).to.be(false);
expect(fs.existsSync(folders[1])).to.be(false);
expect(fs.existsSync(folders[2])).to.be(false);
expect(fs.existsSync(tmpPath)).to.be(true);
done();
});
});
it('should remove an array of directories synchronously', function () {
fs.removeSync(folders);
expect(fs.existsSync(folders[0])).to.be(false);
expect(fs.existsSync(folders[1])).to.be(false);
expect(fs.existsSync(folders[2])).to.be(false);
expect(fs.existsSync(tmpPath)).to.be(true);
});
});
it('should extends to fs', function () {
expect(fs.readdir).to.be.a(Function);
});
}); | codexar/npm-extfs | tests/extfsTest.js | JavaScript | mit | 3,671 |
require 'set'
require 'tsort'
module Librarian
class ManifestSet
class GraphHash < Hash
include TSort
alias tsort_each_node each_key
def tsort_each_child(node, &block)
self[node].each(&block)
end
end
class << self
def shallow_strip(manifests, names)
new(manifests).shallow_strip!(names).send(method_for(manifests))
end
def deep_strip(manifests, names)
new(manifests).deep_strip!(names).send(method_for(manifests))
end
def shallow_keep(manifests, names)
new(manifests).shallow_keep!(names).send(method_for(manifests))
end
def deep_keep(manifests, names)
new(manifests).deep_keep!(names).send(method_for(manifests))
end
def sort(manifests)
manifests = Hash[manifests.map{|m| [m.name, m]}] if Array === manifests
manifest_pairs = GraphHash[manifests.map{|k, m| [k, m.dependencies.map{|d| d.name}]}]
manifest_names = manifest_pairs.tsort
manifest_names.map{|n| manifests[n]}
end
private
def method_for(manifests)
case manifests
when Hash
:to_hash
when Array
:to_a
end
end
end
def initialize(manifests)
self.index = Hash === manifests ? manifests.dup : Hash[manifests.map{|m| [m.name, m]}]
end
def to_a
index.values
end
def to_hash
index.dup
end
def dup
self.class.new(index)
end
def shallow_strip(names)
dup.shallow_strip!(names)
end
def shallow_strip!(names)
assert_strings!(names)
names.each do |name|
index.delete(name)
end
self
end
def deep_strip(names)
dup.deep_strip!(names)
end
def deep_strip!(names)
names = Array === names ? names.dup : names.to_a
assert_strings!(names)
strippables = dependencies_of(names)
shallow_strip!(strippables)
self
end
def shallow_keep(names)
dup.shallow_keep!(names)
end
def shallow_keep!(names)
assert_strings!(names)
names = Set.new(names) unless Set === names
index.reject! { |k, v| !names.include?(k) }
self
end
def deep_keep(names)
dup.conservative_strip!(names)
end
def deep_keep!(names)
names = Array === names ? names.dup : names.to_a
assert_strings!(names)
keepables = dependencies_of(names)
shallow_keep!(keepables)
self
end
def consistent?
index.values.all? do |manifest|
in_compliance_with?(manifest.dependencies)
end
end
def in_compliance_with?(dependencies)
dependencies.all? do |dependency|
manifest = index[dependency.name]
manifest && manifest.satisfies?(dependency)
end
end
private
attr_accessor :index
def assert_strings!(names)
non_strings = names.reject{|name| String === name}
non_strings.empty? or raise TypeError, "names must all be strings"
end
# Straightforward breadth-first graph traversal algorithm.
def dependencies_of(names)
names = Array === names ? names.dup : names.to_a
assert_strings!(names)
deps = Set.new
until names.empty?
name = names.shift
next if deps.include?(name)
deps << name
names.concat index[name].dependencies.map(&:name)
end
deps.to_a
end
end
end
| phinze/librarian-puppet | vendor/librarian/lib/librarian/manifest_set.rb | Ruby | mit | 3,434 |
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magento.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Mage
* @package Mage_CatalogSearch
* @copyright Copyright (c) 2006-2016 X.commerce, Inc. and affiliates (http://www.magento.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
class Mage_CatalogSearch_Model_Session extends Mage_Core_Model_Session_Abstract
{
public function __construct()
{
$this->init('catalogsearch');
}
}
| hansbonini/cloud9-magento | www/app/code/core/Mage/CatalogSearch/Model/Session.php | PHP | mit | 1,136 |
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magento.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Mage
* @package Mage_Rule
* @copyright Copyright (c) 2006-2016 X.commerce, Inc. and affiliates (http://www.magento.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
class Mage_Rule_Block_Rule extends Mage_Core_Block_Abstract
{
}
| hansbonini/cloud9-magento | www/app/code/core/Mage/Rule/Block/Rule.php | PHP | mit | 1,025 |
describe("BASIC CRUD SCENARIOS", function() {
require("./basic");
});
describe("VALIDATE CRUD SCENARIOS", function() {
require("./validation");
});
describe("REPORT SCENARIOS", function() {
require("./report");
}); | AndreaZain/dl-module | test/production/finishing-printing/monitoring-event/index.js | JavaScript | mit | 228 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
namespace Microsoft.Azure.Management.ResourceManager.Models
{
using System.Linq;
/// <summary>
/// Deployment operation information.
/// </summary>
public partial class DeploymentOperation
{
/// <summary>
/// Initializes a new instance of the DeploymentOperation class.
/// </summary>
public DeploymentOperation() { }
/// <summary>
/// Initializes a new instance of the DeploymentOperation class.
/// </summary>
/// <param name="id">Full deployment operation ID.</param>
/// <param name="operationId">Deployment operation ID.</param>
/// <param name="properties">Deployment properties.</param>
public DeploymentOperation(string id = default(string), string operationId = default(string), DeploymentOperationProperties properties = default(DeploymentOperationProperties))
{
Id = id;
OperationId = operationId;
Properties = properties;
}
/// <summary>
/// Gets full deployment operation ID.
/// </summary>
[Newtonsoft.Json.JsonProperty(PropertyName = "id")]
public string Id { get; private set; }
/// <summary>
/// Gets deployment operation ID.
/// </summary>
[Newtonsoft.Json.JsonProperty(PropertyName = "operationId")]
public string OperationId { get; private set; }
/// <summary>
/// Gets or sets deployment properties.
/// </summary>
[Newtonsoft.Json.JsonProperty(PropertyName = "properties")]
public DeploymentOperationProperties Properties { get; set; }
}
}
| ScottHolden/azure-sdk-for-net | src/SDKs/Resource/Management.ResourceManager/Generated/Models/DeploymentOperation.cs | C# | mit | 1,977 |
/**
* The MIT License
* Copyright (c) 2014-2016 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.factorykit;
public class Bow implements Weapon {
@Override
public String toString() {
return "Bow";
}
}
| Crossy147/java-design-patterns | factory-kit/src/main/java/com/iluwatar/factorykit/Bow.java | Java | mit | 1,281 |
Dir[File.join(Rails.root, "lib", "core_ext", "*.rb")].each {|l| require l } | edurange/edurange | config/initializers/core_ext.rb | Ruby | mit | 75 |
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
namespace System.Runtime.CompilerServices
{
using System;
using System.Reflection;
// This Enum matchs the miImpl flags defined in corhdr.h. It is used to specify
// certain method properties.
[Flags]
[Serializable]
public enum MethodImplOptions
{
Unmanaged = MethodImplAttributes.Unmanaged ,
ForwardRef = MethodImplAttributes.ForwardRef ,
PreserveSig = MethodImplAttributes.PreserveSig ,
InternalCall = MethodImplAttributes.InternalCall,
Synchronized = MethodImplAttributes.Synchronized,
NoInlining = MethodImplAttributes.NoInlining ,
}
[Serializable]
public enum MethodCodeType
{
IL = System.Reflection.MethodImplAttributes.IL ,
Native = System.Reflection.MethodImplAttributes.Native ,
/// <internalonly/>
OPTIL = System.Reflection.MethodImplAttributes.OPTIL ,
Runtime = System.Reflection.MethodImplAttributes.Runtime,
}
// Custom attribute to specify additional method properties.
[Serializable]
[AttributeUsage( AttributeTargets.Method | AttributeTargets.Constructor, Inherited = false )]
sealed public class MethodImplAttribute : Attribute
{
internal MethodImplOptions m_val;
public MethodCodeType MethodCodeType;
internal MethodImplAttribute( MethodImplAttributes methodImplAttributes )
{
MethodImplOptions all = MethodImplOptions.Unmanaged | MethodImplOptions.ForwardRef | MethodImplOptions.PreserveSig |
MethodImplOptions.InternalCall | MethodImplOptions.Synchronized | MethodImplOptions.NoInlining;
m_val = ((MethodImplOptions)methodImplAttributes) & all;
}
public MethodImplAttribute( MethodImplOptions methodImplOptions )
{
m_val = methodImplOptions;
}
//// public MethodImplAttribute( short value )
//// {
//// m_val = (MethodImplOptions)value;
//// }
public MethodImplAttribute()
{
}
public MethodImplOptions Value
{
get
{
return m_val;
}
}
}
}
| jelin1/llilum | Zelig/Zelig/RunTime/Framework/mscorlib/System/Runtime/CompilerServices/MethodImplAttribute.cs | C# | mit | 2,378 |
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Threading;
using Microsoft.Build.Framework;
using Microsoft.Build.BuildEngine.Shared;
using System.Security.AccessControl;
namespace Microsoft.Build.BuildEngine
{
/// <summary>
/// This class hosts a node class in the child process. It uses shared memory to communicate
/// with the local node provider.
/// Wraps a Node.
/// </summary>
public class LocalNode
{
#region Static Constructors
/// <summary>
/// Hook up an unhandled exception handler, in case our error handling paths are leaky
/// </summary>
static LocalNode()
{
AppDomain currentDomain = AppDomain.CurrentDomain;
currentDomain.UnhandledException += new UnhandledExceptionEventHandler(UnhandledExceptionHandler);
}
#endregion
#region Static Methods
/// <summary>
/// Dump any unhandled exceptions to a file so they can be diagnosed
/// </summary>
private static void UnhandledExceptionHandler(object sender, UnhandledExceptionEventArgs e)
{
Exception ex = (Exception)e.ExceptionObject;
DumpExceptionToFile(ex);
}
/// <summary>
/// Dump the exception information to a file
/// </summary>
internal static void DumpExceptionToFile(Exception ex)
{
// Lock as multiple threads may throw simultaneously
lock (dumpFileLocker)
{
if (dumpFileName == null)
{
Guid guid = Guid.NewGuid();
string tempPath = Path.GetTempPath();
// For some reason we get Watson buckets because GetTempPath gives us a folder here that doesn't exist.
// Either because %TMP% is misdefined, or because they deleted the temp folder during the build.
if (!Directory.Exists(tempPath))
{
// If this throws, no sense catching it, we can't log it now, and we're here
// because we're a child node with no console to log to, so die
Directory.CreateDirectory(tempPath);
}
dumpFileName = Path.Combine(tempPath, "MSBuild_" + guid.ToString());
using (StreamWriter writer = new StreamWriter(dumpFileName, true /*append*/))
{
writer.WriteLine("UNHANDLED EXCEPTIONS FROM CHILD NODE:");
writer.WriteLine("===================");
}
}
using (StreamWriter writer = new StreamWriter(dumpFileName, true /*append*/))
{
writer.WriteLine(DateTime.Now.ToLongTimeString());
writer.WriteLine(ex.ToString());
writer.WriteLine("===================");
}
}
}
#endregion
#region Constructors
/// <summary>
/// Creates an instance of this class.
/// </summary>
internal LocalNode(int nodeNumberIn)
{
this.nodeNumber = nodeNumberIn;
engineCallback = new LocalNodeCallback(communicationThreadExitEvent, this);
}
#endregion
#region Communication Methods
/// <summary>
/// This method causes the reader and writer threads to start and create the shared memory structures
/// </summary>
void StartCommunicationThreads()
{
// The writer thread should be created before the
// reader thread because some LocalCallDescriptors
// assume the shared memory for the writer thread
// has already been created. The method will both
// instantiate the shared memory for the writer
// thread and also start the writer thread itself.
// We will verifyThrow in the method if the
// sharedMemory was not created correctly.
engineCallback.StartWriterThread(nodeNumber);
// Create the shared memory buffer
this.sharedMemory =
new SharedMemory
(
// Generate the name for the shared memory region
LocalNodeProviderGlobalNames.NodeInputMemoryName(nodeNumber),
SharedMemoryType.ReadOnly,
// Reuse an existing shared memory region as it should have already
// been created by the parent node side
true
);
ErrorUtilities.VerifyThrow(this.sharedMemory.IsUsable,
"Failed to create shared memory for local node input.");
// Start the thread that will be processing the calls from the parent engine
ThreadStart threadState = new ThreadStart(this.SharedMemoryReaderThread);
readerThread = new Thread(threadState);
readerThread.Name = "MSBuild Child<-Parent Reader";
readerThread.Start();
}
/// <summary>
/// This method causes the reader and writer threads to exit and dispose of the shared memory structures
/// </summary>
void StopCommunicationThreads()
{
communicationThreadExitEvent.Set();
// Wait for communication threads to exit
Thread writerThread = engineCallback.GetWriterThread();
// The threads may not exist if the child has timed out before the parent has told the node
// to start up its communication threads. This can happen if the node is started with /nodemode:x
// and no parent is running, or if the parent node has spawned a new process and then crashed
// before establishing communication with the child node.
if(writerThread != null)
{
writerThread.Join();
}
if (readerThread != null)
{
readerThread.Join();
}
// Make sure the exit event is not set
communicationThreadExitEvent.Reset();
}
#endregion
#region Startup Methods
/// <summary>
/// Create global events necessary for handshaking with the parent
/// </summary>
/// <param name="nodeNumber"></param>
/// <returns>True if events created successfully and false otherwise</returns>
private static bool CreateGlobalEvents(int nodeNumber)
{
bool createdNew = false;
if (NativeMethods.IsUserAdministrator())
{
EventWaitHandleSecurity mSec = new EventWaitHandleSecurity();
// Add a rule that grants the access only to admins and systems
mSec.SetSecurityDescriptorSddlForm(NativeMethods.ADMINONLYSDDL);
// Create an initiation event to allow the parent side to prove to the child that we have the same level of privilege as it does.
// this is done by having the parent set this event which means it needs to have administrative permissions to do so.
globalInitiateActivationEvent = new EventWaitHandle(false, EventResetMode.ManualReset, LocalNodeProviderGlobalNames.NodeInitiateActivationEventName(nodeNumber), out createdNew, mSec);
}
else
{
// Create an initiation event to allow the parent side to prove to the child that we have the same level of privilege as it does.
// this is done by having the parent set this event which means it has atleast the same permissions as the child process
globalInitiateActivationEvent = new EventWaitHandle(false, EventResetMode.ManualReset, LocalNodeProviderGlobalNames.NodeInitiateActivationEventName(nodeNumber), out createdNew);
}
// This process must be the creator of the event to prevent squating by a lower privilaged attacker
if (!createdNew)
{
return false;
}
// Informs the parent process that the child process has been created.
globalNodeActive = new EventWaitHandle(false, EventResetMode.ManualReset, LocalNodeProviderGlobalNames.NodeActiveEventName(nodeNumber));
globalNodeActive.Set();
// Indicate to the parent process, this node is currently is ready to start to recieve requests
globalNodeInUse = new EventWaitHandle(false, EventResetMode.ManualReset, LocalNodeProviderGlobalNames.NodeInUseEventName(nodeNumber));
// Used by the parent process to inform the child process to shutdown due to the child process
// not recieving the initialization command.
globalNodeErrorShutdown = new EventWaitHandle(false, EventResetMode.ManualReset, LocalNodeProviderGlobalNames.NodeErrorShutdownEventName(nodeNumber));
// Inform the parent process the node has started its communication threads.
globalNodeActivate = new EventWaitHandle(false, EventResetMode.ManualReset, LocalNodeProviderGlobalNames.NodeActivedEventName(nodeNumber));
return true;
}
/// <summary>
/// This function starts local node when process is launched and shuts it down on time out
/// Called by msbuild.exe.
/// </summary>
public static void StartLocalNodeServer(int nodeNumber)
{
// Create global events necessary for handshaking with the parent
if (!CreateGlobalEvents(nodeNumber))
{
return;
}
LocalNode localNode = new LocalNode(nodeNumber);
WaitHandle[] waitHandles = new WaitHandle[4];
waitHandles[0] = shutdownEvent;
waitHandles[1] = globalNodeErrorShutdown;
waitHandles[2] = inUseEvent;
waitHandles[3] = globalInitiateActivationEvent;
// This is necessary to make build.exe finish promptly. Dont remove.
if (!Engine.debugMode)
{
// Create null streams for the current input/output/error streams
Console.SetOut(new StreamWriter(Stream.Null));
Console.SetError(new StreamWriter(Stream.Null));
Console.SetIn(new StreamReader(Stream.Null));
}
bool continueRunning = true;
while (continueRunning)
{
int eventType = WaitHandle.WaitAny(waitHandles, inactivityTimeout, false);
if (eventType == 0 || eventType == 1 || eventType == WaitHandle.WaitTimeout)
{
continueRunning = false;
localNode.ShutdownNode(eventType != 1 ?
Node.NodeShutdownLevel.PoliteShutdown :
Node.NodeShutdownLevel.ErrorShutdown, true, true);
}
else if (eventType == 2)
{
// reset the event as we do not want it to go into this state again when we are done with this if statement.
inUseEvent.Reset();
// The parent knows at this point the child process has been launched
globalNodeActivate.Reset();
// Set the global inuse event so other parent processes know this node is now initialized
globalNodeInUse.Set();
// Make a copy of the parents handle to protect ourselves in case the parent dies,
// this is to prevent a parent from reserving a node another parent is trying to use.
globalNodeReserveHandle =
new EventWaitHandle(false, EventResetMode.ManualReset, LocalNodeProviderGlobalNames.NodeReserveEventName(nodeNumber));
WaitHandle[] waitHandlesActive = new WaitHandle[3];
waitHandlesActive[0] = shutdownEvent;
waitHandlesActive[1] = globalNodeErrorShutdown;
waitHandlesActive[2] = notInUseEvent;
eventType = WaitHandle.WaitTimeout;
while (eventType == WaitHandle.WaitTimeout && continueRunning == true)
{
eventType = WaitHandle.WaitAny(waitHandlesActive, parentCheckInterval, false);
if (eventType == 0 || /* nice shutdown due to shutdownEvent */
eventType == 1 || /* error shutdown due to globalNodeErrorShutdown */
eventType == WaitHandle.WaitTimeout && !localNode.IsParentProcessAlive())
{
continueRunning = false;
// If the exit is not triggered by running of shutdown method
if (eventType != 0)
{
localNode.ShutdownNode(Node.NodeShutdownLevel.ErrorShutdown, true, true);
}
}
else if (eventType == 2)
{
// Trigger a collection before the node goes idle to insure that
// the memory is released to the system as soon as possible
GC.Collect();
// Change the current directory to a safe one so that the directory
// last used by the build can be safely deleted. We must have read
// access to the safe directory so use SystemDirectory for this purpose.
Directory.SetCurrentDirectory(Environment.SystemDirectory);
notInUseEvent.Reset();
globalNodeInUse.Reset();
}
}
ErrorUtilities.VerifyThrow(localNode.node == null,
"Expected either node to be null or continueRunning to be false.");
// Stop the communication threads and release the shared memory object so that the next parent can create it
localNode.StopCommunicationThreads();
// Close the local copy of the reservation handle (this allows another parent to reserve
// the node)
globalNodeReserveHandle.Close();
globalNodeReserveHandle = null;
}
else if (eventType == 3)
{
globalInitiateActivationEvent.Reset();
localNode.StartCommunicationThreads();
globalNodeActivate.Set();
}
}
// Stop the communication threads and release the shared memory object so that the next parent can create it
localNode.StopCommunicationThreads();
globalNodeActive.Close();
globalNodeInUse.Close();
}
#endregion
#region Methods
/// <summary>
/// This method is run in its own thread, it is responsible for reading messages sent from the parent process
/// through the shared memory region.
/// </summary>
private void SharedMemoryReaderThread()
{
// Create an array of event to the node thread responds
WaitHandle[] waitHandles = new WaitHandle[2];
waitHandles[0] = communicationThreadExitEvent;
waitHandles[1] = sharedMemory.ReadFlag;
bool continueExecution = true;
try
{
while (continueExecution)
{
// Wait for the next work item or an exit command
int eventType = WaitHandle.WaitAny(waitHandles);
if (eventType == 0)
{
// Exit node event
continueExecution = false;
}
else
{
// Read the list of LocalCallDescriptors from sharedMemory,
// this will be null if a large object is being read from shared
// memory and will continue to be null until the large object has
// been completly sent.
IList localCallDescriptorList = sharedMemory.Read();
if (localCallDescriptorList != null)
{
foreach (LocalCallDescriptor callDescriptor in localCallDescriptorList)
{
// Execute the command method which relates to running on a child node
callDescriptor.NodeAction(node, this);
if ((callDescriptor.IsReply) && (callDescriptor is LocalReplyCallDescriptor))
{
// Process the reply from the parent so it can be looked in a hashtable based
// on the call descriptor who requested the reply.
engineCallback.PostReplyFromParent((LocalReplyCallDescriptor) callDescriptor);
}
}
}
}
}
}
catch (Exception e)
{
// Will rethrow the exception if necessary
ReportFatalCommunicationError(e);
}
// Dispose of the shared memory buffer
if (sharedMemory != null)
{
sharedMemory.Dispose();
sharedMemory = null;
}
}
/// <summary>
/// This method will shutdown the node being hosted by the child process and notify the parent process if requested,
/// </summary>
/// <param name="shutdownLevel">What kind of shutdown is causing the child node to shutdown</param>
/// <param name="exitProcess">should the child process exit as part of the shutdown process</param>
/// <param name="noParentNotification">Indicates if the parent process should be notified the child node is being shutdown</param>
internal void ShutdownNode(Node.NodeShutdownLevel shutdownLevel, bool exitProcess, bool noParentNotification)
{
if (node != null)
{
try
{
node.ShutdownNode(shutdownLevel);
if (!noParentNotification)
{
// Write the last event out directly
LocalCallDescriptorForShutdownComplete callDescriptor =
new LocalCallDescriptorForShutdownComplete(shutdownLevel, node.TotalTaskTime);
// Post the message indicating that the shutdown is complete
engineCallback.PostMessageToParent(callDescriptor, true);
}
}
catch (Exception e)
{
if (shutdownLevel != Node.NodeShutdownLevel.ErrorShutdown)
{
ReportNonFatalCommunicationError(e);
}
}
}
// If the shutdownLevel is not a build complete message, then this means there was a politeshutdown or an error shutdown, null the node out
// as either it is no longer needed due to the node goign idle or there was a error and it is now in a bad state.
if (shutdownLevel != Node.NodeShutdownLevel.BuildCompleteSuccess &&
shutdownLevel != Node.NodeShutdownLevel.BuildCompleteFailure)
{
node = null;
notInUseEvent.Set();
}
if (exitProcess)
{
// Even if we completed a build, if we are goign to exit the process we need to null out the node and set the notInUseEvent, this is
// accomplished by calling this method again with the ErrorShutdown handle
if ( shutdownLevel == Node.NodeShutdownLevel.BuildCompleteSuccess || shutdownLevel == Node.NodeShutdownLevel.BuildCompleteFailure )
{
ShutdownNode(Node.NodeShutdownLevel.ErrorShutdown, false, true);
}
// Signal all the communication threads to exit
shutdownEvent.Set();
}
}
/// <summary>
/// This methods activates the local node
/// </summary>
internal void Activate
(
Hashtable environmentVariables,
LoggerDescription[] nodeLoggers,
int nodeId,
BuildPropertyGroup parentGlobalProperties,
ToolsetDefinitionLocations toolsetSearchLocations,
int parentId,
string parentStartupDirectory
)
{
ErrorUtilities.VerifyThrow(node == null, "Expected node to be null on activation.");
this.parentProcessId = parentId;
engineCallback.Reset();
inUseEvent.Set();
// Clear the environment so that we dont have extra variables laying around, this
// may be a performance hog but needs to be done
IDictionary variableDictionary = Environment.GetEnvironmentVariables();
foreach (string variableName in variableDictionary.Keys)
{
Environment.SetEnvironmentVariable(variableName, null);
}
foreach(string key in environmentVariables.Keys)
{
Environment.SetEnvironmentVariable(key,(string)environmentVariables[key]);
}
// Host the msbuild engine and system
node = new Node(nodeId, nodeLoggers, engineCallback, parentGlobalProperties, toolsetSearchLocations, parentStartupDirectory);
// Write the initialization complete event out directly
LocalCallDescriptorForInitializationComplete callDescriptor =
new LocalCallDescriptorForInitializationComplete(Process.GetCurrentProcess().Id);
// Post the message indicating that the initialization is complete
engineCallback.PostMessageToParent(callDescriptor, true);
}
/// <summary>
/// This method checks is the parent process has not exited
/// </summary>
/// <returns>True if the parent process is still alive</returns>
private bool IsParentProcessAlive()
{
bool isParentAlive = true;
try
{
// Check if the parent is still there
if (Process.GetProcessById(parentProcessId).HasExited)
{
isParentAlive = false;
}
}
catch (ArgumentException)
{
isParentAlive = false;
}
if (!isParentAlive)
{
// No logging's going to reach the parent at this point:
// indicate on the console what's going on
string message = ResourceUtilities.FormatResourceString("ParentProcessUnexpectedlyDied", node.NodeId);
Console.WriteLine(message);
}
return isParentAlive;
}
/// <summary>
/// Any error occuring in the shared memory transport is considered to be fatal
/// </summary>
/// <param name="originalException"></param>
/// <exception cref="Exception">Re-throws exception passed in</exception>
internal void ReportFatalCommunicationError(Exception originalException)
{
try
{
DumpExceptionToFile(originalException);
}
finally
{
if (node != null)
{
node.ReportFatalCommunicationError(originalException, null);
}
}
}
/// <summary>
/// This function is used to report exceptions which don't indicate breakdown
/// of communication with the parent
/// </summary>
/// <param name="originalException"></param>
internal void ReportNonFatalCommunicationError(Exception originalException)
{
if (node != null)
{
try
{
DumpExceptionToFile(originalException);
}
finally
{
node.ReportUnhandledError(originalException);
}
}
else
{
// Since there is no node object report rethrow the exception
ReportFatalCommunicationError(originalException);
}
}
#endregion
#region Properties
internal static string DumpFileName
{
get
{
return dumpFileName;
}
}
#endregion
#region Member data
private Node node;
private SharedMemory sharedMemory;
private LocalNodeCallback engineCallback;
private int parentProcessId;
private int nodeNumber;
private Thread readerThread;
private static object dumpFileLocker = new Object();
// Public named events
// If this event is set the node host process is currently running
private static EventWaitHandle globalNodeActive;
// If this event is set the node is currently running a build
private static EventWaitHandle globalNodeInUse;
// If this event exists the node is reserved for use by a particular parent engine
// the node keeps a handle to this event during builds to prevent it from being used
// by another parent engine if the original dies
private static EventWaitHandle globalNodeReserveHandle;
// If this event is set the node will immediatelly exit. The event is used by the
// parent engine to cause the node to exit if communication is lost.
private static EventWaitHandle globalNodeErrorShutdown;
// This event is used to cause the child to create the shared memory structures to start communication
// with the parent
private static EventWaitHandle globalInitiateActivationEvent;
// This event is used to indicate to the parent that shared memory buffers have been created and are ready for
// use
private static EventWaitHandle globalNodeActivate;
// Private local events
private static ManualResetEvent communicationThreadExitEvent = new ManualResetEvent(false);
private static ManualResetEvent shutdownEvent = new ManualResetEvent(false);
private static ManualResetEvent notInUseEvent = new ManualResetEvent(false);
/// <summary>
/// Indicates the node is now in use. This means the node has recieved an activate command with initialization
/// data from the parent procss
/// </summary>
private static ManualResetEvent inUseEvent = new ManualResetEvent(false);
/// <summary>
/// Randomly generated file name for all exceptions thrown by this node that need to be dumped to a file.
/// (There may be more than one exception, if they occur on different threads.)
/// </summary>
private static string dumpFileName = null;
// Timeouts && Constants
private const int inactivityTimeout = 60 * 1000; // 60 seconds of inactivity to exit
private const int parentCheckInterval = 5 * 1000; // Check if the parent process is there every 5 seconds
#endregion
}
}
| nikson/msbuild | src/OrcasEngine/LocalProvider/LocalNode.cs | C# | mit | 28,537 |
/**
* @license
* Copyright 2013 Palantir Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as ts from "typescript";
import * as Lint from "../index";
export declare class Rule extends Lint.Rules.AbstractRule {
static metadata: Lint.IRuleMetadata;
static FAILURE_STRING_FACTORY(ident: string): string;
apply(sourceFile: ts.SourceFile): Lint.RuleFailure[];
}
| AxelSparkster/axelsparkster.github.io | node_modules/tslint/lib/rules/noParameterPropertiesRule.d.ts | TypeScript | mit | 911 |
<?php get_header(); ?>
<?php if ( have_posts() ) while ( have_posts() ) : the_post(); ?>
<article role="main" class="primary-content type-post" id="post-<?php the_ID(); ?>">
<header>
<h1><?php the_title(); ?></h1>
</header>
<?php the_post_thumbnail('full');?>
<?php the_content(); ?>
<?php wp_link_pages( array( 'before' => '<div class="page-link">' . __( 'Pages:' ), 'after' => '</div>' ) ); ?>
<footer class="entry-meta">
<p>Posted <strong><?php echo human_time_diff(get_the_time('U'), current_time('timestamp')) . ' ago'; ?></strong> on <time datetime="<?php the_time('l, F jS, Y') ?>" pubdate><?php the_time('l, F jS, Y') ?></time> · <a href="<?php the_permalink(); ?>">Permalink</a></p>
</footer>
<?php comments_template( '', true ); ?>
<ul class="navigation">
<li class="older">
<?php previous_post_link( '%link', '← %title' ); ?>
</li>
<li class="newer">
<?php next_post_link( '%link', '%title →' ); ?>
</li>
</ul>
<?php endwhile; // end of the loop. ?>
</article>
<?php get_footer(); ?> | evanhuntley/ftg | single.php | PHP | mit | 1,237 |
'use strict';
describe('Controller: AboutCtrl', function () {
// load the controller's module
beforeEach(module('e01App'));
var AboutCtrl,
scope;
// Initialize the controller and a mock scope
beforeEach(inject(function ($controller, $rootScope) {
scope = $rootScope.$new();
AboutCtrl = $controller('AboutCtrl', {
$scope: scope
// place here mocked dependencies
});
}));
it('should attach a list of awesomeThings to the scope', function () {
expect(AboutCtrl.awesomeThings.length).toBe(3);
});
});
| malaniz/cursoAngular | cl06ClienteB/test/spec/controllers/about.js | JavaScript | mit | 550 |
var searchData=
[
['digitalpin_2eh',['DigitalPin.h',['../_digital_pin_8h.html',1,'']]]
];
| cnorfleet/Feeder | libraries/SdFatCopy/html/search/files_2.js | JavaScript | mit | 92 |
namespace GraphQL.Types
{
public class UnionGraphType : GraphType
{
}
}
| bryanerayner/graphql-dotnet | src/GraphQL/Types/UnionGraphType.cs | C# | mit | 87 |
package org.knowm.xchange.ripple;
import static org.assertj.core.api.Assertions.assertThat;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.text.ParseException;
import org.junit.Test;
import org.knowm.xchange.currency.Currency;
import org.knowm.xchange.currency.CurrencyPair;
import org.knowm.xchange.dto.Order.OrderType;
import org.knowm.xchange.dto.account.AccountInfo;
import org.knowm.xchange.dto.account.Balance;
import org.knowm.xchange.dto.account.Wallet;
import org.knowm.xchange.dto.marketdata.OrderBook;
import org.knowm.xchange.dto.trade.LimitOrder;
import org.knowm.xchange.dto.trade.OpenOrders;
import org.knowm.xchange.dto.trade.UserTrade;
import org.knowm.xchange.ripple.dto.account.ITransferFeeSource;
import org.knowm.xchange.ripple.dto.account.RippleAccountBalances;
import org.knowm.xchange.ripple.dto.account.RippleAccountSettings;
import org.knowm.xchange.ripple.dto.marketdata.RippleOrderBook;
import org.knowm.xchange.ripple.dto.trade.IRippleTradeTransaction;
import org.knowm.xchange.ripple.dto.trade.RippleAccountOrders;
import org.knowm.xchange.ripple.dto.trade.RippleLimitOrder;
import org.knowm.xchange.ripple.dto.trade.RippleOrderTransaction;
import org.knowm.xchange.ripple.dto.trade.RipplePaymentTransaction;
import org.knowm.xchange.ripple.dto.trade.RippleUserTrade;
import org.knowm.xchange.ripple.service.params.RippleMarketDataParams;
import org.knowm.xchange.ripple.service.params.RippleTradeHistoryParams;
import org.knowm.xchange.service.trade.params.TradeHistoryParams;
public class RippleAdaptersTest implements ITransferFeeSource {
@Test
public void adaptAccountInfoTest() throws IOException {
// Read in the JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream(
"/org/knowm/xchange/ripple/dto/account/example-account-balances.json");
// Use Jackson to parse it
final ObjectMapper mapper = new ObjectMapper();
final RippleAccountBalances rippleAccount = mapper.readValue(is, RippleAccountBalances.class);
// Convert to xchange object and check field values
final AccountInfo account = RippleAdapters.adaptAccountInfo(rippleAccount, "username");
assertThat(account.getWallets()).hasSize(2);
assertThat(account.getUsername()).isEqualTo("username");
assertThat(account.getTradingFee()).isEqualTo(BigDecimal.ZERO);
final Wallet counterWallet = account.getWallet("rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B");
assertThat(counterWallet.getId()).isEqualTo("rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B");
assertThat(counterWallet.getBalances()).hasSize(2);
final Balance btcBalance = counterWallet.getBalance(Currency.BTC);
assertThat(btcBalance.getTotal()).isEqualTo("0.038777349225374");
assertThat(btcBalance.getCurrency()).isEqualTo(Currency.BTC);
final Balance usdBalance = counterWallet.getBalance(Currency.USD);
assertThat(usdBalance.getTotal()).isEqualTo("10");
assertThat(usdBalance.getCurrency()).isEqualTo(Currency.USD);
final Wallet mainWallet = account.getWallet("main");
assertThat(mainWallet.getBalances()).hasSize(1);
final Balance xrpBalance = mainWallet.getBalance(Currency.XRP);
assertThat(xrpBalance.getTotal()).isEqualTo("861.401578");
assertThat(xrpBalance.getCurrency()).isEqualTo(Currency.XRP);
}
@Test
public void adaptOrderBookTest() throws IOException {
// Read in the JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream(
"/org/knowm/xchange/ripple/dto/marketdata/example-order-book.json");
final CurrencyPair currencyPair = CurrencyPair.XRP_BTC;
// Test data uses Bitstamp issued BTC
final RippleMarketDataParams params = new RippleMarketDataParams();
params.setCounterCounterparty("rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B");
// Use Jackson to parse it
final ObjectMapper mapper = new ObjectMapper();
final RippleOrderBook rippleOrderBook = mapper.readValue(is, RippleOrderBook.class);
// Convert to xchange object and check field values
final OrderBook orderBook =
RippleAdapters.adaptOrderBook(rippleOrderBook, params, currencyPair);
assertThat(orderBook.getBids()).hasSize(10);
assertThat(orderBook.getAsks()).hasSize(10);
final LimitOrder lastBid = orderBook.getBids().get(9);
assertThat(lastBid).isInstanceOf(RippleLimitOrder.class);
assertThat(lastBid.getCurrencyPair()).isEqualTo(currencyPair);
assertThat(((RippleLimitOrder) lastBid).getCounterCounterparty())
.isEqualTo("rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B");
assertThat(lastBid.getType()).isEqualTo(OrderType.BID);
assertThat(lastBid.getId()).isEqualTo("1303704");
assertThat(lastBid.getOriginalAmount()).isEqualTo("66314.537782");
assertThat(lastBid.getLimitPrice()).isEqualTo("0.00003317721777288062");
final LimitOrder firstAsk = orderBook.getAsks().get(0);
assertThat(firstAsk).isInstanceOf(RippleLimitOrder.class);
assertThat(firstAsk.getCurrencyPair()).isEqualTo(currencyPair);
assertThat(((RippleLimitOrder) firstAsk).getCounterCounterparty())
.isEqualTo("rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B");
assertThat(firstAsk.getType()).isEqualTo(OrderType.ASK);
assertThat(firstAsk.getId()).isEqualTo("1011310");
assertThat(firstAsk.getOriginalAmount()).isEqualTo("35447.914936");
assertThat(firstAsk.getLimitPrice()).isEqualTo("0.00003380846624897726");
}
@Test
public void adaptOpenOrdersTest() throws JsonParseException, JsonMappingException, IOException {
final RippleExchange exchange = new RippleExchange();
final int roundingScale = exchange.getRoundingScale();
// Read in the JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream("/org/knowm/xchange/ripple/dto/trade/example-account-orders.json");
final ObjectMapper mapper = new ObjectMapper();
final RippleAccountOrders response = mapper.readValue(is, RippleAccountOrders.class);
// Convert to XChange orders
final OpenOrders orders = RippleAdapters.adaptOpenOrders(response, roundingScale);
assertThat(orders.getOpenOrders()).hasSize(12);
final LimitOrder firstOrder = orders.getOpenOrders().get(0);
assertThat(firstOrder).isInstanceOf(RippleLimitOrder.class);
assertThat(firstOrder.getCurrencyPair()).isEqualTo(CurrencyPair.XRP_BTC);
assertThat(((RippleLimitOrder) firstOrder).getCounterCounterparty())
.isEqualTo("rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B");
assertThat(firstOrder.getId()).isEqualTo("5");
assertThat(firstOrder.getLimitPrice()).isEqualTo("0.00003226");
assertThat(firstOrder.getTimestamp()).isNull();
assertThat(firstOrder.getOriginalAmount()).isEqualTo("1");
assertThat(firstOrder.getType()).isEqualTo(OrderType.BID);
final LimitOrder secondOrder = orders.getOpenOrders().get(1);
assertThat(secondOrder).isInstanceOf(RippleLimitOrder.class);
assertThat(secondOrder.getCurrencyPair()).isEqualTo(CurrencyPair.XRP_BTC);
assertThat(((RippleLimitOrder) secondOrder).getCounterCounterparty())
.isEqualTo("rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B");
assertThat(secondOrder.getId()).isEqualTo("7");
// Price = 15159.38551342023 / 123.123456
assertThat(secondOrder.getLimitPrice())
.isEqualTo("123.12345677999998635515884154518859509596611713043533");
assertThat(secondOrder.getTimestamp()).isNull();
assertThat(secondOrder.getOriginalAmount()).isEqualTo("123.123456");
assertThat(secondOrder.getType()).isEqualTo(OrderType.ASK);
}
@Override
public BigDecimal getTransferFeeRate(final String address) throws IOException {
final InputStream is =
getClass()
.getResourceAsStream(
String.format(
"/org/knowm/xchange/ripple/dto/account/example-account-settings-%s.json",
address));
final ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(is, RippleAccountSettings.class).getSettings().getTransferFeeRate();
}
@Test
public void adaptTrade_BuyXRP_SellBTC()
throws JsonParseException, JsonMappingException, IOException, ParseException {
final RippleExchange exchange = new RippleExchange();
final int roundingScale = exchange.getRoundingScale();
// Read the trade JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream(
"/org/knowm/xchange/ripple/dto/trade/example-trade-buyXRP-sellBTC.json");
final ObjectMapper mapper = new ObjectMapper();
final RippleOrderTransaction response = mapper.readValue(is, RippleOrderTransaction.class);
final RippleTradeHistoryParams params = new RippleTradeHistoryParams();
params.addPreferredCounterCurrency(Currency.BTC);
final UserTrade trade = RippleAdapters.adaptTrade(response, params, this, roundingScale);
assertThat(trade.getCurrencyPair()).isEqualTo(CurrencyPair.XRP_BTC);
assertThat(trade.getFeeAmount()).isEqualTo("0.012");
assertThat(trade.getFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(trade.getId())
.isEqualTo("0000000000000000000000000000000000000000000000000000000000000000");
assertThat(trade.getOrderId()).isEqualTo("1010");
// Price = 0.000029309526038 * 0.998
assertThat(trade.getPrice())
.isEqualTo(
new BigDecimal("0.000029250906985924")
.setScale(roundingScale, RoundingMode.HALF_UP)
.stripTrailingZeros());
assertThat(trade.getTimestamp()).isEqualTo(RippleExchange.ToDate("2000-00-00T00:00:00.000Z"));
assertThat(trade.getOriginalAmount()).isEqualTo("1");
assertThat(trade.getType()).isEqualTo(OrderType.BID);
assertThat(trade).isInstanceOf(RippleUserTrade.class);
final RippleUserTrade ripple = (RippleUserTrade) trade;
assertThat(ripple.getBaseCounterparty()).isEmpty();
assertThat(ripple.getBaseTransferFee()).isZero();
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().base);
assertThat(ripple.getCounterCounterparty()).isEqualTo("rMwjYedjc7qqtKYVLiAccJSmCwih4LnE2q");
// Transfer fee = 0.000029309526038 * 0.002
assertThat(ripple.getCounterTransferFee()).isEqualTo("0.000000058619052076");
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(Currency.BTC);
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().counter);
}
@Test
public void adaptTrade_SellBTC_BuyXRP()
throws JsonParseException, JsonMappingException, IOException, ParseException {
final RippleExchange exchange = new RippleExchange();
final int roundingScale = exchange.getRoundingScale();
// Read the trade JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream(
"/org/knowm/xchange/ripple/dto/trade/example-trade-buyXRP-sellBTC.json");
final ObjectMapper mapper = new ObjectMapper();
final RippleOrderTransaction response = mapper.readValue(is, RippleOrderTransaction.class);
final RippleTradeHistoryParams params = new RippleTradeHistoryParams();
params.addPreferredBaseCurrency(Currency.BTC);
final UserTrade trade = RippleAdapters.adaptTrade(response, params, this, roundingScale);
assertThat(trade.getCurrencyPair()).isEqualTo(CurrencyPair.BTC_XRP);
assertThat(trade.getFeeAmount()).isEqualTo("0.012");
assertThat(trade.getFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(trade.getId())
.isEqualTo("0000000000000000000000000000000000000000000000000000000000000000");
assertThat(trade.getOrderId()).isEqualTo("1010");
// Price = 1.0 / (0.000029309526038 * 0.998)
assertThat(trade.getPrice())
.isEqualTo(
new BigDecimal("34186.97411609205306550363511634115030681332485583111528")
.setScale(roundingScale, RoundingMode.HALF_UP));
assertThat(trade.getTimestamp()).isEqualTo(RippleExchange.ToDate("2000-00-00T00:00:00.000Z"));
// Quantity = 0.000029309526038 * 0.998
assertThat(trade.getOriginalAmount()).isEqualTo("0.000029250906985924");
assertThat(trade.getType()).isEqualTo(OrderType.ASK);
assertThat(trade).isInstanceOf(RippleUserTrade.class);
final RippleUserTrade ripple = (RippleUserTrade) trade;
assertThat(ripple.getBaseCounterparty()).isEqualTo("rMwjYedjc7qqtKYVLiAccJSmCwih4LnE2q");
// Transfer fee = 0.000029309526038 * 0.002
assertThat(ripple.getBaseTransferFee()).isEqualTo("0.000000058619052076");
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(Currency.BTC);
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().base);
assertThat(ripple.getCounterCounterparty()).isEmpty();
assertThat(ripple.getCounterTransferFee()).isZero();
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().counter);
}
@Test
public void adaptTrade_SellXRP_BuyBTC()
throws JsonParseException, JsonMappingException, IOException, ParseException {
final RippleExchange exchange = new RippleExchange();
final int roundingScale = exchange.getRoundingScale();
// Read the trade JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream(
"/org/knowm/xchange/ripple/dto/trade/example-trade-sellXRP-buyBTC.json");
final ObjectMapper mapper = new ObjectMapper();
final IRippleTradeTransaction response = mapper.readValue(is, RippleOrderTransaction.class);
final RippleTradeHistoryParams params = new RippleTradeHistoryParams();
params.setCurrencyPair(CurrencyPair.XRP_BTC);
final UserTrade trade = RippleAdapters.adaptTrade(response, params, this, roundingScale);
assertThat(trade.getCurrencyPair()).isEqualTo(CurrencyPair.XRP_BTC);
assertThat(trade.getFeeAmount()).isEqualTo("0.012");
assertThat(trade.getFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(trade.getId())
.isEqualTo("1111111111111111111111111111111111111111111111111111111111111111");
assertThat(trade.getOrderId()).isEqualTo("1111");
assertThat(trade.getPrice())
.isEqualTo(
new BigDecimal("0.000028572057152")
.setScale(roundingScale, RoundingMode.HALF_UP)
.stripTrailingZeros());
assertThat(trade.getTimestamp()).isEqualTo(RippleExchange.ToDate("2011-11-11T11:11:11.111Z"));
assertThat(trade.getOriginalAmount()).isEqualTo("1");
assertThat(trade.getType()).isEqualTo(OrderType.ASK);
assertThat(trade).isInstanceOf(RippleUserTrade.class);
final RippleUserTrade ripple = (RippleUserTrade) trade;
assertThat(ripple.getBaseCounterparty()).isEmpty();
assertThat(ripple.getBaseTransferFee()).isZero();
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().base);
assertThat(ripple.getCounterCounterparty()).isEqualTo("rMwjYedjc7qqtKYVLiAccJSmCwih4LnE2q");
assertThat(ripple.getCounterTransferFee()).isZero();
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(Currency.BTC);
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().counter);
// make sure that if the IRippleTradeTransaction is adapted again it returns the same values
final UserTrade trade2 = RippleAdapters.adaptTrade(response, params, this, roundingScale);
assertThat(trade2.getCurrencyPair()).isEqualTo(trade.getCurrencyPair());
assertThat(trade2.getFeeAmount()).isEqualTo(trade.getFeeAmount());
assertThat(trade2.getFeeCurrency()).isEqualTo(trade.getFeeCurrency());
assertThat(trade2.getId()).isEqualTo(trade.getId());
assertThat(trade2.getOrderId()).isEqualTo(trade.getOrderId());
assertThat(trade2.getPrice()).isEqualTo(trade.getPrice());
assertThat(trade2.getTimestamp()).isEqualTo(trade.getTimestamp());
assertThat(trade2.getOriginalAmount()).isEqualTo(trade.getOriginalAmount());
assertThat(trade2.getType()).isEqualTo(trade.getType());
}
@Test
public void adaptTrade_BuyBTC_SellXRP()
throws JsonParseException, JsonMappingException, IOException, ParseException {
final RippleExchange exchange = new RippleExchange();
final int roundingScale = exchange.getRoundingScale();
// Read the trade JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream(
"/org/knowm/xchange/ripple/dto/trade/example-trade-sellXRP-buyBTC.json");
final ObjectMapper mapper = new ObjectMapper();
final RippleOrderTransaction response = mapper.readValue(is, RippleOrderTransaction.class);
final RippleTradeHistoryParams params = new RippleTradeHistoryParams();
params.addPreferredBaseCurrency(Currency.BTC);
final UserTrade trade = RippleAdapters.adaptTrade(response, params, this, roundingScale);
assertThat(trade.getCurrencyPair()).isEqualTo(CurrencyPair.BTC_XRP);
assertThat(trade.getFeeAmount()).isEqualTo("0.012");
assertThat(trade.getFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(trade.getId())
.isEqualTo("1111111111111111111111111111111111111111111111111111111111111111");
assertThat(trade.getOrderId()).isEqualTo("1111");
// Price = 1.0 / 0.000028572057152
assertThat(trade.getPrice())
.isEqualTo(
new BigDecimal("34999.23000574012011552062010939099496310569328655387396")
.setScale(roundingScale, RoundingMode.HALF_UP)
.stripTrailingZeros());
assertThat(trade.getTimestamp()).isEqualTo(RippleExchange.ToDate("2011-11-11T11:11:11.111Z"));
assertThat(trade.getOriginalAmount()).isEqualTo("0.000028572057152");
assertThat(trade.getType()).isEqualTo(OrderType.BID);
assertThat(trade).isInstanceOf(RippleUserTrade.class);
final RippleUserTrade ripple = (RippleUserTrade) trade;
assertThat(ripple.getBaseCounterparty()).isEqualTo("rMwjYedjc7qqtKYVLiAccJSmCwih4LnE2q");
assertThat(ripple.getBaseTransferFee()).isZero();
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(Currency.BTC);
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().base);
assertThat(ripple.getCounterCounterparty()).isEmpty();
assertThat(ripple.getCounterTransferFee()).isZero();
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().counter);
}
@Test
public void adaptTrade_BuyBTC_SellBTC()
throws JsonParseException, JsonMappingException, IOException, ParseException {
final RippleExchange exchange = new RippleExchange();
final int roundingScale = exchange.getRoundingScale();
// Read the trade JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream(
"/org/knowm/xchange/ripple/dto/trade/example-trade-buyBTC-sellBTC.json");
final ObjectMapper mapper = new ObjectMapper();
final RippleOrderTransaction response = mapper.readValue(is, RippleOrderTransaction.class);
final TradeHistoryParams params = new TradeHistoryParams() {};
final UserTrade trade = RippleAdapters.adaptTrade(response, params, this, roundingScale);
assertThat(trade.getCurrencyPair().base).isEqualTo(Currency.BTC);
assertThat(trade.getCurrencyPair().counter).isEqualTo(Currency.BTC);
assertThat(trade.getFeeAmount()).isEqualTo("0.012");
assertThat(trade.getFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(trade.getId())
.isEqualTo("2222222222222222222222222222222222222222222222222222222222222222");
assertThat(trade.getOrderId()).isEqualTo("2222");
// Price = 0.501 * 0.998 / 0.50150835545121407952
assertThat(trade.getPrice())
.isEqualTo(
new BigDecimal("0.99698837430165008596385145696065600512973847422746")
.setScale(roundingScale, RoundingMode.HALF_UP));
assertThat(trade.getTimestamp()).isEqualTo(RippleExchange.ToDate("2022-22-22T22:22:22.222Z"));
assertThat(trade.getOriginalAmount()).isEqualTo("0.50150835545121407952");
assertThat(trade.getType()).isEqualTo(OrderType.BID);
assertThat(trade).isInstanceOf(RippleUserTrade.class);
final RippleUserTrade ripple = (RippleUserTrade) trade;
assertThat(ripple.getBaseCounterparty()).isEqualTo("rMwjYedjc7qqtKYVLiAccJSmCwih4LnE2q");
assertThat(ripple.getBaseTransferFee()).isZero();
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(Currency.BTC);
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().base);
assertThat(ripple.getCounterCounterparty()).isEqualTo("rvYAfWj5gh67oV6fW32ZzP3Aw4Eubs59B");
// Transfer fee = 0.501 * 0.002
assertThat(ripple.getCounterTransferFee()).isEqualTo("0.001002");
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(Currency.BTC);
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().counter);
}
@Test
public void adaptTrade_PaymentPassthrough()
throws JsonParseException, JsonMappingException, IOException, ParseException {
final RippleExchange exchange = new RippleExchange();
final int roundingScale = exchange.getRoundingScale();
// Read the trade JSON from the example resources
final InputStream is =
getClass()
.getResourceAsStream(
"/org/knowm/xchange/ripple/dto/trade/example-payment-passthrough.json");
final ObjectMapper mapper = new ObjectMapper();
final RipplePaymentTransaction response = mapper.readValue(is, RipplePaymentTransaction.class);
final TradeHistoryParams params = new TradeHistoryParams() {};
final UserTrade trade = RippleAdapters.adaptTrade(response, params, this, roundingScale);
assertThat(trade.getCurrencyPair().base).isEqualTo(Currency.XRP);
assertThat(trade.getCurrencyPair().counter).isEqualTo(Currency.BTC);
assertThat(trade.getFeeAmount()).isEqualTo("0.012");
assertThat(trade.getFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(trade.getId())
.isEqualTo("GHRE072948B95345396B2D9A364363GDE521HRT67QQRGGRTHYTRUP0RRB631107");
assertThat(trade.getOrderId()).isEqualTo("9338");
// Price = 0.009941478580724 / (349.559725 - 0.012)
assertThat(trade.getPrice())
.isEqualTo(
new BigDecimal("0.00002844097635229638527900589254299967193321026478")
.setScale(roundingScale, RoundingMode.HALF_UP));
assertThat(trade.getTimestamp()).isEqualTo(RippleExchange.ToDate("2015-08-07T03:58:10.000Z"));
assertThat(trade.getOriginalAmount()).isEqualTo("349.547725");
assertThat(trade.getType()).isEqualTo(OrderType.ASK);
assertThat(trade).isInstanceOf(RippleUserTrade.class);
final RippleUserTrade ripple = (RippleUserTrade) trade;
assertThat(ripple.getBaseCounterparty()).isEqualTo("");
assertThat(ripple.getBaseTransferFee()).isZero();
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(Currency.XRP);
assertThat(ripple.getBaseTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().base);
assertThat(ripple.getCounterCounterparty()).isEqualTo("rMwjYedjc7qqtKYVLiAccJSmCwih4LnE2q");
// Transfer fee = 0.501 * 0.002
assertThat(ripple.getCounterTransferFee()).isEqualTo("0");
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(Currency.BTC);
assertThat(ripple.getCounterTransferFeeCurrency()).isEqualTo(trade.getCurrencyPair().counter);
}
}
| timmolter/XChange | xchange-ripple/src/test/java/org/knowm/xchange/ripple/RippleAdaptersTest.java | Java | mit | 24,065 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace ScintillaNET
{
/// <summary>
/// Lexer property types.
/// </summary>
public enum PropertyType
{
/// <summary>
/// A Boolean property. This is the default.
/// </summary>
Boolean = NativeMethods.SC_TYPE_BOOLEAN,
/// <summary>
/// An integer property.
/// </summary>
Integer = NativeMethods.SC_TYPE_INTEGER,
/// <summary>
/// A string property.
/// </summary>
String = NativeMethods.SC_TYPE_STRING
}
}
| cqwang/ScintillaNET | src/ScintillaNET/PropertyType.cs | C# | mit | 623 |
from __future__ import print_function
import sys
def func():
print('{0}.{1}'.format(*sys.version_info[:2]))
print(repr(sys.argv[1:]))
print('Hello World')
return 0
| Teino1978-Corp/pre-commit | testing/resources/python3_hooks_repo/python3_hook/main.py | Python | mit | 183 |
/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import '@angular/compiler';
import * as fs from 'fs';
import * as path from 'path';
const UTF8 = {
encoding: 'utf-8'
};
const PACKAGE = 'angular/packages/core/test/bundling/hello_world_r2';
describe('treeshaking with uglify', () => {
let content: string;
const contentPath = require.resolve(path.join(PACKAGE, 'bundle.debug.min.js'));
beforeAll(() => {
content = fs.readFileSync(contentPath, UTF8);
});
it('should drop unused TypeScript helpers', () => {
expect(content).not.toContain('__asyncGenerator');
});
it('should not contain rxjs from commonjs distro', () => {
expect(content).not.toContain('commonjsGlobal');
expect(content).not.toContain('createCommonjsModule');
});
});
| gkalpak/angular | packages/core/test/bundling/hello_world_r2/treeshaking_spec.ts | TypeScript | mit | 923 |
(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
/*!
* The buffer module from node.js, for the browser.
*
* @author Feross Aboukhadijeh <feross@feross.org> <http://feross.org>
* @license MIT
*/
var base64 = require('base64-js')
var ieee754 = require('ieee754')
var isArray = require('is-array')
exports.Buffer = Buffer
exports.SlowBuffer = Buffer
exports.INSPECT_MAX_BYTES = 50
Buffer.poolSize = 8192 // not used by this implementation
var kMaxLength = 0x3fffffff
/**
* If `Buffer.TYPED_ARRAY_SUPPORT`:
* === true Use Uint8Array implementation (fastest)
* === false Use Object implementation (most compatible, even IE6)
*
* Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,
* Opera 11.6+, iOS 4.2+.
*
* Note:
*
* - Implementation must support adding new properties to `Uint8Array` instances.
* Firefox 4-29 lacked support, fixed in Firefox 30+.
* See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438.
*
* - Chrome 9-10 is missing the `TypedArray.prototype.subarray` function.
*
* - IE10 has a broken `TypedArray.prototype.subarray` function which returns arrays of
* incorrect length in some situations.
*
* We detect these buggy browsers and set `Buffer.TYPED_ARRAY_SUPPORT` to `false` so they will
* get the Object implementation, which is slower but will work correctly.
*/
Buffer.TYPED_ARRAY_SUPPORT = (function () {
try {
var buf = new ArrayBuffer(0)
var arr = new Uint8Array(buf)
arr.foo = function () { return 42 }
return 42 === arr.foo() && // typed array instances can be augmented
typeof arr.subarray === 'function' && // chrome 9-10 lack `subarray`
new Uint8Array(1).subarray(1, 1).byteLength === 0 // ie10 has broken `subarray`
} catch (e) {
return false
}
})()
/**
* Class: Buffer
* =============
*
* The Buffer constructor returns instances of `Uint8Array` that are augmented
* with function properties for all the node `Buffer` API functions. We use
* `Uint8Array` so that square bracket notation works as expected -- it returns
* a single octet.
*
* By augmenting the instances, we can avoid modifying the `Uint8Array`
* prototype.
*/
function Buffer (subject, encoding, noZero) {
if (!(this instanceof Buffer))
return new Buffer(subject, encoding, noZero)
var type = typeof subject
// Find the length
var length
if (type === 'number')
length = subject > 0 ? subject >>> 0 : 0
else if (type === 'string') {
if (encoding === 'base64')
subject = base64clean(subject)
length = Buffer.byteLength(subject, encoding)
} else if (type === 'object' && subject !== null) { // assume object is array-like
if (subject.type === 'Buffer' && isArray(subject.data))
subject = subject.data
length = +subject.length > 0 ? Math.floor(+subject.length) : 0
} else
throw new TypeError('must start with number, buffer, array or string')
if (this.length > kMaxLength)
throw new RangeError('Attempt to allocate Buffer larger than maximum ' +
'size: 0x' + kMaxLength.toString(16) + ' bytes')
var buf
if (Buffer.TYPED_ARRAY_SUPPORT) {
// Preferred: Return an augmented `Uint8Array` instance for best performance
buf = Buffer._augment(new Uint8Array(length))
} else {
// Fallback: Return THIS instance of Buffer (created by `new`)
buf = this
buf.length = length
buf._isBuffer = true
}
var i
if (Buffer.TYPED_ARRAY_SUPPORT && typeof subject.byteLength === 'number') {
// Speed optimization -- use set if we're copying from a typed array
buf._set(subject)
} else if (isArrayish(subject)) {
// Treat array-ish objects as a byte array
if (Buffer.isBuffer(subject)) {
for (i = 0; i < length; i++)
buf[i] = subject.readUInt8(i)
} else {
for (i = 0; i < length; i++)
buf[i] = ((subject[i] % 256) + 256) % 256
}
} else if (type === 'string') {
buf.write(subject, 0, encoding)
} else if (type === 'number' && !Buffer.TYPED_ARRAY_SUPPORT && !noZero) {
for (i = 0; i < length; i++) {
buf[i] = 0
}
}
return buf
}
Buffer.isBuffer = function (b) {
return !!(b != null && b._isBuffer)
}
Buffer.compare = function (a, b) {
if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b))
throw new TypeError('Arguments must be Buffers')
var x = a.length
var y = b.length
for (var i = 0, len = Math.min(x, y); i < len && a[i] === b[i]; i++) {}
if (i !== len) {
x = a[i]
y = b[i]
}
if (x < y) return -1
if (y < x) return 1
return 0
}
Buffer.isEncoding = function (encoding) {
switch (String(encoding).toLowerCase()) {
case 'hex':
case 'utf8':
case 'utf-8':
case 'ascii':
case 'binary':
case 'base64':
case 'raw':
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return true
default:
return false
}
}
Buffer.concat = function (list, totalLength) {
if (!isArray(list)) throw new TypeError('Usage: Buffer.concat(list[, length])')
if (list.length === 0) {
return new Buffer(0)
} else if (list.length === 1) {
return list[0]
}
var i
if (totalLength === undefined) {
totalLength = 0
for (i = 0; i < list.length; i++) {
totalLength += list[i].length
}
}
var buf = new Buffer(totalLength)
var pos = 0
for (i = 0; i < list.length; i++) {
var item = list[i]
item.copy(buf, pos)
pos += item.length
}
return buf
}
Buffer.byteLength = function (str, encoding) {
var ret
str = str + ''
switch (encoding || 'utf8') {
case 'ascii':
case 'binary':
case 'raw':
ret = str.length
break
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
ret = str.length * 2
break
case 'hex':
ret = str.length >>> 1
break
case 'utf8':
case 'utf-8':
ret = utf8ToBytes(str).length
break
case 'base64':
ret = base64ToBytes(str).length
break
default:
ret = str.length
}
return ret
}
// pre-set for values that may exist in the future
Buffer.prototype.length = undefined
Buffer.prototype.parent = undefined
// toString(encoding, start=0, end=buffer.length)
Buffer.prototype.toString = function (encoding, start, end) {
var loweredCase = false
start = start >>> 0
end = end === undefined || end === Infinity ? this.length : end >>> 0
if (!encoding) encoding = 'utf8'
if (start < 0) start = 0
if (end > this.length) end = this.length
if (end <= start) return ''
while (true) {
switch (encoding) {
case 'hex':
return hexSlice(this, start, end)
case 'utf8':
case 'utf-8':
return utf8Slice(this, start, end)
case 'ascii':
return asciiSlice(this, start, end)
case 'binary':
return binarySlice(this, start, end)
case 'base64':
return base64Slice(this, start, end)
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
return utf16leSlice(this, start, end)
default:
if (loweredCase)
throw new TypeError('Unknown encoding: ' + encoding)
encoding = (encoding + '').toLowerCase()
loweredCase = true
}
}
}
Buffer.prototype.equals = function (b) {
if(!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer')
return Buffer.compare(this, b) === 0
}
Buffer.prototype.inspect = function () {
var str = ''
var max = exports.INSPECT_MAX_BYTES
if (this.length > 0) {
str = this.toString('hex', 0, max).match(/.{2}/g).join(' ')
if (this.length > max)
str += ' ... '
}
return '<Buffer ' + str + '>'
}
Buffer.prototype.compare = function (b) {
if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer')
return Buffer.compare(this, b)
}
// `get` will be removed in Node 0.13+
Buffer.prototype.get = function (offset) {
console.log('.get() is deprecated. Access using array indexes instead.')
return this.readUInt8(offset)
}
// `set` will be removed in Node 0.13+
Buffer.prototype.set = function (v, offset) {
console.log('.set() is deprecated. Access using array indexes instead.')
return this.writeUInt8(v, offset)
}
function hexWrite (buf, string, offset, length) {
offset = Number(offset) || 0
var remaining = buf.length - offset
if (!length) {
length = remaining
} else {
length = Number(length)
if (length > remaining) {
length = remaining
}
}
// must be an even number of digits
var strLen = string.length
if (strLen % 2 !== 0) throw new Error('Invalid hex string')
if (length > strLen / 2) {
length = strLen / 2
}
for (var i = 0; i < length; i++) {
var byte = parseInt(string.substr(i * 2, 2), 16)
if (isNaN(byte)) throw new Error('Invalid hex string')
buf[offset + i] = byte
}
return i
}
function utf8Write (buf, string, offset, length) {
var charsWritten = blitBuffer(utf8ToBytes(string), buf, offset, length)
return charsWritten
}
function asciiWrite (buf, string, offset, length) {
var charsWritten = blitBuffer(asciiToBytes(string), buf, offset, length)
return charsWritten
}
function binaryWrite (buf, string, offset, length) {
return asciiWrite(buf, string, offset, length)
}
function base64Write (buf, string, offset, length) {
var charsWritten = blitBuffer(base64ToBytes(string), buf, offset, length)
return charsWritten
}
function utf16leWrite (buf, string, offset, length) {
var charsWritten = blitBuffer(utf16leToBytes(string), buf, offset, length)
return charsWritten
}
Buffer.prototype.write = function (string, offset, length, encoding) {
// Support both (string, offset, length, encoding)
// and the legacy (string, encoding, offset, length)
if (isFinite(offset)) {
if (!isFinite(length)) {
encoding = length
length = undefined
}
} else { // legacy
var swap = encoding
encoding = offset
offset = length
length = swap
}
offset = Number(offset) || 0
var remaining = this.length - offset
if (!length) {
length = remaining
} else {
length = Number(length)
if (length > remaining) {
length = remaining
}
}
encoding = String(encoding || 'utf8').toLowerCase()
var ret
switch (encoding) {
case 'hex':
ret = hexWrite(this, string, offset, length)
break
case 'utf8':
case 'utf-8':
ret = utf8Write(this, string, offset, length)
break
case 'ascii':
ret = asciiWrite(this, string, offset, length)
break
case 'binary':
ret = binaryWrite(this, string, offset, length)
break
case 'base64':
ret = base64Write(this, string, offset, length)
break
case 'ucs2':
case 'ucs-2':
case 'utf16le':
case 'utf-16le':
ret = utf16leWrite(this, string, offset, length)
break
default:
throw new TypeError('Unknown encoding: ' + encoding)
}
return ret
}
Buffer.prototype.toJSON = function () {
return {
type: 'Buffer',
data: Array.prototype.slice.call(this._arr || this, 0)
}
}
function base64Slice (buf, start, end) {
if (start === 0 && end === buf.length) {
return base64.fromByteArray(buf)
} else {
return base64.fromByteArray(buf.slice(start, end))
}
}
function utf8Slice (buf, start, end) {
var res = ''
var tmp = ''
end = Math.min(buf.length, end)
for (var i = start; i < end; i++) {
if (buf[i] <= 0x7F) {
res += decodeUtf8Char(tmp) + String.fromCharCode(buf[i])
tmp = ''
} else {
tmp += '%' + buf[i].toString(16)
}
}
return res + decodeUtf8Char(tmp)
}
function asciiSlice (buf, start, end) {
var ret = ''
end = Math.min(buf.length, end)
for (var i = start; i < end; i++) {
ret += String.fromCharCode(buf[i])
}
return ret
}
function binarySlice (buf, start, end) {
return asciiSlice(buf, start, end)
}
function hexSlice (buf, start, end) {
var len = buf.length
if (!start || start < 0) start = 0
if (!end || end < 0 || end > len) end = len
var out = ''
for (var i = start; i < end; i++) {
out += toHex(buf[i])
}
return out
}
function utf16leSlice (buf, start, end) {
var bytes = buf.slice(start, end)
var res = ''
for (var i = 0; i < bytes.length; i += 2) {
res += String.fromCharCode(bytes[i] + bytes[i + 1] * 256)
}
return res
}
Buffer.prototype.slice = function (start, end) {
var len = this.length
start = ~~start
end = end === undefined ? len : ~~end
if (start < 0) {
start += len;
if (start < 0)
start = 0
} else if (start > len) {
start = len
}
if (end < 0) {
end += len
if (end < 0)
end = 0
} else if (end > len) {
end = len
}
if (end < start)
end = start
if (Buffer.TYPED_ARRAY_SUPPORT) {
return Buffer._augment(this.subarray(start, end))
} else {
var sliceLen = end - start
var newBuf = new Buffer(sliceLen, undefined, true)
for (var i = 0; i < sliceLen; i++) {
newBuf[i] = this[i + start]
}
return newBuf
}
}
/*
* Need to make sure that buffer isn't trying to write out of bounds.
*/
function checkOffset (offset, ext, length) {
if ((offset % 1) !== 0 || offset < 0)
throw new RangeError('offset is not uint')
if (offset + ext > length)
throw new RangeError('Trying to access beyond buffer length')
}
Buffer.prototype.readUInt8 = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 1, this.length)
return this[offset]
}
Buffer.prototype.readUInt16LE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 2, this.length)
return this[offset] | (this[offset + 1] << 8)
}
Buffer.prototype.readUInt16BE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 2, this.length)
return (this[offset] << 8) | this[offset + 1]
}
Buffer.prototype.readUInt32LE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 4, this.length)
return ((this[offset]) |
(this[offset + 1] << 8) |
(this[offset + 2] << 16)) +
(this[offset + 3] * 0x1000000)
}
Buffer.prototype.readUInt32BE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 4, this.length)
return (this[offset] * 0x1000000) +
((this[offset + 1] << 16) |
(this[offset + 2] << 8) |
this[offset + 3])
}
Buffer.prototype.readInt8 = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 1, this.length)
if (!(this[offset] & 0x80))
return (this[offset])
return ((0xff - this[offset] + 1) * -1)
}
Buffer.prototype.readInt16LE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 2, this.length)
var val = this[offset] | (this[offset + 1] << 8)
return (val & 0x8000) ? val | 0xFFFF0000 : val
}
Buffer.prototype.readInt16BE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 2, this.length)
var val = this[offset + 1] | (this[offset] << 8)
return (val & 0x8000) ? val | 0xFFFF0000 : val
}
Buffer.prototype.readInt32LE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 4, this.length)
return (this[offset]) |
(this[offset + 1] << 8) |
(this[offset + 2] << 16) |
(this[offset + 3] << 24)
}
Buffer.prototype.readInt32BE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 4, this.length)
return (this[offset] << 24) |
(this[offset + 1] << 16) |
(this[offset + 2] << 8) |
(this[offset + 3])
}
Buffer.prototype.readFloatLE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 4, this.length)
return ieee754.read(this, offset, true, 23, 4)
}
Buffer.prototype.readFloatBE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 4, this.length)
return ieee754.read(this, offset, false, 23, 4)
}
Buffer.prototype.readDoubleLE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 8, this.length)
return ieee754.read(this, offset, true, 52, 8)
}
Buffer.prototype.readDoubleBE = function (offset, noAssert) {
if (!noAssert)
checkOffset(offset, 8, this.length)
return ieee754.read(this, offset, false, 52, 8)
}
function checkInt (buf, value, offset, ext, max, min) {
if (!Buffer.isBuffer(buf)) throw new TypeError('buffer must be a Buffer instance')
if (value > max || value < min) throw new TypeError('value is out of bounds')
if (offset + ext > buf.length) throw new TypeError('index out of range')
}
Buffer.prototype.writeUInt8 = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 1, 0xff, 0)
if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)
this[offset] = value
return offset + 1
}
function objectWriteUInt16 (buf, value, offset, littleEndian) {
if (value < 0) value = 0xffff + value + 1
for (var i = 0, j = Math.min(buf.length - offset, 2); i < j; i++) {
buf[offset + i] = (value & (0xff << (8 * (littleEndian ? i : 1 - i)))) >>>
(littleEndian ? i : 1 - i) * 8
}
}
Buffer.prototype.writeUInt16LE = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 2, 0xffff, 0)
if (Buffer.TYPED_ARRAY_SUPPORT) {
this[offset] = value
this[offset + 1] = (value >>> 8)
} else objectWriteUInt16(this, value, offset, true)
return offset + 2
}
Buffer.prototype.writeUInt16BE = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 2, 0xffff, 0)
if (Buffer.TYPED_ARRAY_SUPPORT) {
this[offset] = (value >>> 8)
this[offset + 1] = value
} else objectWriteUInt16(this, value, offset, false)
return offset + 2
}
function objectWriteUInt32 (buf, value, offset, littleEndian) {
if (value < 0) value = 0xffffffff + value + 1
for (var i = 0, j = Math.min(buf.length - offset, 4); i < j; i++) {
buf[offset + i] = (value >>> (littleEndian ? i : 3 - i) * 8) & 0xff
}
}
Buffer.prototype.writeUInt32LE = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 4, 0xffffffff, 0)
if (Buffer.TYPED_ARRAY_SUPPORT) {
this[offset + 3] = (value >>> 24)
this[offset + 2] = (value >>> 16)
this[offset + 1] = (value >>> 8)
this[offset] = value
} else objectWriteUInt32(this, value, offset, true)
return offset + 4
}
Buffer.prototype.writeUInt32BE = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 4, 0xffffffff, 0)
if (Buffer.TYPED_ARRAY_SUPPORT) {
this[offset] = (value >>> 24)
this[offset + 1] = (value >>> 16)
this[offset + 2] = (value >>> 8)
this[offset + 3] = value
} else objectWriteUInt32(this, value, offset, false)
return offset + 4
}
Buffer.prototype.writeInt8 = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 1, 0x7f, -0x80)
if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)
if (value < 0) value = 0xff + value + 1
this[offset] = value
return offset + 1
}
Buffer.prototype.writeInt16LE = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 2, 0x7fff, -0x8000)
if (Buffer.TYPED_ARRAY_SUPPORT) {
this[offset] = value
this[offset + 1] = (value >>> 8)
} else objectWriteUInt16(this, value, offset, true)
return offset + 2
}
Buffer.prototype.writeInt16BE = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 2, 0x7fff, -0x8000)
if (Buffer.TYPED_ARRAY_SUPPORT) {
this[offset] = (value >>> 8)
this[offset + 1] = value
} else objectWriteUInt16(this, value, offset, false)
return offset + 2
}
Buffer.prototype.writeInt32LE = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)
if (Buffer.TYPED_ARRAY_SUPPORT) {
this[offset] = value
this[offset + 1] = (value >>> 8)
this[offset + 2] = (value >>> 16)
this[offset + 3] = (value >>> 24)
} else objectWriteUInt32(this, value, offset, true)
return offset + 4
}
Buffer.prototype.writeInt32BE = function (value, offset, noAssert) {
value = +value
offset = offset >>> 0
if (!noAssert)
checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)
if (value < 0) value = 0xffffffff + value + 1
if (Buffer.TYPED_ARRAY_SUPPORT) {
this[offset] = (value >>> 24)
this[offset + 1] = (value >>> 16)
this[offset + 2] = (value >>> 8)
this[offset + 3] = value
} else objectWriteUInt32(this, value, offset, false)
return offset + 4
}
function checkIEEE754 (buf, value, offset, ext, max, min) {
if (value > max || value < min) throw new TypeError('value is out of bounds')
if (offset + ext > buf.length) throw new TypeError('index out of range')
}
function writeFloat (buf, value, offset, littleEndian, noAssert) {
if (!noAssert)
checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38)
ieee754.write(buf, value, offset, littleEndian, 23, 4)
return offset + 4
}
Buffer.prototype.writeFloatLE = function (value, offset, noAssert) {
return writeFloat(this, value, offset, true, noAssert)
}
Buffer.prototype.writeFloatBE = function (value, offset, noAssert) {
return writeFloat(this, value, offset, false, noAssert)
}
function writeDouble (buf, value, offset, littleEndian, noAssert) {
if (!noAssert)
checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308)
ieee754.write(buf, value, offset, littleEndian, 52, 8)
return offset + 8
}
Buffer.prototype.writeDoubleLE = function (value, offset, noAssert) {
return writeDouble(this, value, offset, true, noAssert)
}
Buffer.prototype.writeDoubleBE = function (value, offset, noAssert) {
return writeDouble(this, value, offset, false, noAssert)
}
// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)
Buffer.prototype.copy = function (target, target_start, start, end) {
var source = this
if (!start) start = 0
if (!end && end !== 0) end = this.length
if (!target_start) target_start = 0
// Copy 0 bytes; we're done
if (end === start) return
if (target.length === 0 || source.length === 0) return
// Fatal error conditions
if (end < start) throw new TypeError('sourceEnd < sourceStart')
if (target_start < 0 || target_start >= target.length)
throw new TypeError('targetStart out of bounds')
if (start < 0 || start >= source.length) throw new TypeError('sourceStart out of bounds')
if (end < 0 || end > source.length) throw new TypeError('sourceEnd out of bounds')
// Are we oob?
if (end > this.length)
end = this.length
if (target.length - target_start < end - start)
end = target.length - target_start + start
var len = end - start
if (len < 100 || !Buffer.TYPED_ARRAY_SUPPORT) {
for (var i = 0; i < len; i++) {
target[i + target_start] = this[i + start]
}
} else {
target._set(this.subarray(start, start + len), target_start)
}
}
// fill(value, start=0, end=buffer.length)
Buffer.prototype.fill = function (value, start, end) {
if (!value) value = 0
if (!start) start = 0
if (!end) end = this.length
if (end < start) throw new TypeError('end < start')
// Fill 0 bytes; we're done
if (end === start) return
if (this.length === 0) return
if (start < 0 || start >= this.length) throw new TypeError('start out of bounds')
if (end < 0 || end > this.length) throw new TypeError('end out of bounds')
var i
if (typeof value === 'number') {
for (i = start; i < end; i++) {
this[i] = value
}
} else {
var bytes = utf8ToBytes(value.toString())
var len = bytes.length
for (i = start; i < end; i++) {
this[i] = bytes[i % len]
}
}
return this
}
/**
* Creates a new `ArrayBuffer` with the *copied* memory of the buffer instance.
* Added in Node 0.12. Only available in browsers that support ArrayBuffer.
*/
Buffer.prototype.toArrayBuffer = function () {
if (typeof Uint8Array !== 'undefined') {
if (Buffer.TYPED_ARRAY_SUPPORT) {
return (new Buffer(this)).buffer
} else {
var buf = new Uint8Array(this.length)
for (var i = 0, len = buf.length; i < len; i += 1) {
buf[i] = this[i]
}
return buf.buffer
}
} else {
throw new TypeError('Buffer.toArrayBuffer not supported in this browser')
}
}
// HELPER FUNCTIONS
// ================
var BP = Buffer.prototype
/**
* Augment a Uint8Array *instance* (not the Uint8Array class!) with Buffer methods
*/
Buffer._augment = function (arr) {
arr._isBuffer = true
// save reference to original Uint8Array get/set methods before overwriting
arr._get = arr.get
arr._set = arr.set
// deprecated, will be removed in node 0.13+
arr.get = BP.get
arr.set = BP.set
arr.write = BP.write
arr.toString = BP.toString
arr.toLocaleString = BP.toString
arr.toJSON = BP.toJSON
arr.equals = BP.equals
arr.compare = BP.compare
arr.copy = BP.copy
arr.slice = BP.slice
arr.readUInt8 = BP.readUInt8
arr.readUInt16LE = BP.readUInt16LE
arr.readUInt16BE = BP.readUInt16BE
arr.readUInt32LE = BP.readUInt32LE
arr.readUInt32BE = BP.readUInt32BE
arr.readInt8 = BP.readInt8
arr.readInt16LE = BP.readInt16LE
arr.readInt16BE = BP.readInt16BE
arr.readInt32LE = BP.readInt32LE
arr.readInt32BE = BP.readInt32BE
arr.readFloatLE = BP.readFloatLE
arr.readFloatBE = BP.readFloatBE
arr.readDoubleLE = BP.readDoubleLE
arr.readDoubleBE = BP.readDoubleBE
arr.writeUInt8 = BP.writeUInt8
arr.writeUInt16LE = BP.writeUInt16LE
arr.writeUInt16BE = BP.writeUInt16BE
arr.writeUInt32LE = BP.writeUInt32LE
arr.writeUInt32BE = BP.writeUInt32BE
arr.writeInt8 = BP.writeInt8
arr.writeInt16LE = BP.writeInt16LE
arr.writeInt16BE = BP.writeInt16BE
arr.writeInt32LE = BP.writeInt32LE
arr.writeInt32BE = BP.writeInt32BE
arr.writeFloatLE = BP.writeFloatLE
arr.writeFloatBE = BP.writeFloatBE
arr.writeDoubleLE = BP.writeDoubleLE
arr.writeDoubleBE = BP.writeDoubleBE
arr.fill = BP.fill
arr.inspect = BP.inspect
arr.toArrayBuffer = BP.toArrayBuffer
return arr
}
var INVALID_BASE64_RE = /[^+\/0-9A-z]/g
function base64clean (str) {
// Node strips out invalid characters like \n and \t from the string, base64-js does not
str = stringtrim(str).replace(INVALID_BASE64_RE, '')
// Node allows for non-padded base64 strings (missing trailing ===), base64-js does not
while (str.length % 4 !== 0) {
str = str + '='
}
return str
}
function stringtrim (str) {
if (str.trim) return str.trim()
return str.replace(/^\s+|\s+$/g, '')
}
function isArrayish (subject) {
return isArray(subject) || Buffer.isBuffer(subject) ||
subject && typeof subject === 'object' &&
typeof subject.length === 'number'
}
function toHex (n) {
if (n < 16) return '0' + n.toString(16)
return n.toString(16)
}
function utf8ToBytes (str) {
var byteArray = []
for (var i = 0; i < str.length; i++) {
var b = str.charCodeAt(i)
if (b <= 0x7F) {
byteArray.push(b)
} else {
var start = i
if (b >= 0xD800 && b <= 0xDFFF) i++
var h = encodeURIComponent(str.slice(start, i+1)).substr(1).split('%')
for (var j = 0; j < h.length; j++) {
byteArray.push(parseInt(h[j], 16))
}
}
}
return byteArray
}
function asciiToBytes (str) {
var byteArray = []
for (var i = 0; i < str.length; i++) {
// Node's code seems to be doing this and not & 0x7F..
byteArray.push(str.charCodeAt(i) & 0xFF)
}
return byteArray
}
function utf16leToBytes (str) {
var c, hi, lo
var byteArray = []
for (var i = 0; i < str.length; i++) {
c = str.charCodeAt(i)
hi = c >> 8
lo = c % 256
byteArray.push(lo)
byteArray.push(hi)
}
return byteArray
}
function base64ToBytes (str) {
return base64.toByteArray(str)
}
function blitBuffer (src, dst, offset, length) {
for (var i = 0; i < length; i++) {
if ((i + offset >= dst.length) || (i >= src.length))
break
dst[i + offset] = src[i]
}
return i
}
function decodeUtf8Char (str) {
try {
return decodeURIComponent(str)
} catch (err) {
return String.fromCharCode(0xFFFD) // UTF 8 invalid char
}
}
},{"base64-js":2,"ieee754":3,"is-array":4}],2:[function(require,module,exports){
var lookup = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
;(function (exports) {
'use strict';
var Arr = (typeof Uint8Array !== 'undefined')
? Uint8Array
: Array
var PLUS = '+'.charCodeAt(0)
var SLASH = '/'.charCodeAt(0)
var NUMBER = '0'.charCodeAt(0)
var LOWER = 'a'.charCodeAt(0)
var UPPER = 'A'.charCodeAt(0)
function decode (elt) {
var code = elt.charCodeAt(0)
if (code === PLUS)
return 62 // '+'
if (code === SLASH)
return 63 // '/'
if (code < NUMBER)
return -1 //no match
if (code < NUMBER + 10)
return code - NUMBER + 26 + 26
if (code < UPPER + 26)
return code - UPPER
if (code < LOWER + 26)
return code - LOWER + 26
}
function b64ToByteArray (b64) {
var i, j, l, tmp, placeHolders, arr
if (b64.length % 4 > 0) {
throw new Error('Invalid string. Length must be a multiple of 4')
}
// the number of equal signs (place holders)
// if there are two placeholders, than the two characters before it
// represent one byte
// if there is only one, then the three characters before it represent 2 bytes
// this is just a cheap hack to not do indexOf twice
var len = b64.length
placeHolders = '=' === b64.charAt(len - 2) ? 2 : '=' === b64.charAt(len - 1) ? 1 : 0
// base64 is 4/3 + up to two characters of the original data
arr = new Arr(b64.length * 3 / 4 - placeHolders)
// if there are placeholders, only get up to the last complete 4 chars
l = placeHolders > 0 ? b64.length - 4 : b64.length
var L = 0
function push (v) {
arr[L++] = v
}
for (i = 0, j = 0; i < l; i += 4, j += 3) {
tmp = (decode(b64.charAt(i)) << 18) | (decode(b64.charAt(i + 1)) << 12) | (decode(b64.charAt(i + 2)) << 6) | decode(b64.charAt(i + 3))
push((tmp & 0xFF0000) >> 16)
push((tmp & 0xFF00) >> 8)
push(tmp & 0xFF)
}
if (placeHolders === 2) {
tmp = (decode(b64.charAt(i)) << 2) | (decode(b64.charAt(i + 1)) >> 4)
push(tmp & 0xFF)
} else if (placeHolders === 1) {
tmp = (decode(b64.charAt(i)) << 10) | (decode(b64.charAt(i + 1)) << 4) | (decode(b64.charAt(i + 2)) >> 2)
push((tmp >> 8) & 0xFF)
push(tmp & 0xFF)
}
return arr
}
function uint8ToBase64 (uint8) {
var i,
extraBytes = uint8.length % 3, // if we have 1 byte left, pad 2 bytes
output = "",
temp, length
function encode (num) {
return lookup.charAt(num)
}
function tripletToBase64 (num) {
return encode(num >> 18 & 0x3F) + encode(num >> 12 & 0x3F) + encode(num >> 6 & 0x3F) + encode(num & 0x3F)
}
// go through the array every three bytes, we'll deal with trailing stuff later
for (i = 0, length = uint8.length - extraBytes; i < length; i += 3) {
temp = (uint8[i] << 16) + (uint8[i + 1] << 8) + (uint8[i + 2])
output += tripletToBase64(temp)
}
// pad the end with zeros, but make sure to not forget the extra bytes
switch (extraBytes) {
case 1:
temp = uint8[uint8.length - 1]
output += encode(temp >> 2)
output += encode((temp << 4) & 0x3F)
output += '=='
break
case 2:
temp = (uint8[uint8.length - 2] << 8) + (uint8[uint8.length - 1])
output += encode(temp >> 10)
output += encode((temp >> 4) & 0x3F)
output += encode((temp << 2) & 0x3F)
output += '='
break
}
return output
}
exports.toByteArray = b64ToByteArray
exports.fromByteArray = uint8ToBase64
}(typeof exports === 'undefined' ? (this.base64js = {}) : exports))
},{}],3:[function(require,module,exports){
exports.read = function(buffer, offset, isLE, mLen, nBytes) {
var e, m,
eLen = nBytes * 8 - mLen - 1,
eMax = (1 << eLen) - 1,
eBias = eMax >> 1,
nBits = -7,
i = isLE ? (nBytes - 1) : 0,
d = isLE ? -1 : 1,
s = buffer[offset + i];
i += d;
e = s & ((1 << (-nBits)) - 1);
s >>= (-nBits);
nBits += eLen;
for (; nBits > 0; e = e * 256 + buffer[offset + i], i += d, nBits -= 8);
m = e & ((1 << (-nBits)) - 1);
e >>= (-nBits);
nBits += mLen;
for (; nBits > 0; m = m * 256 + buffer[offset + i], i += d, nBits -= 8);
if (e === 0) {
e = 1 - eBias;
} else if (e === eMax) {
return m ? NaN : ((s ? -1 : 1) * Infinity);
} else {
m = m + Math.pow(2, mLen);
e = e - eBias;
}
return (s ? -1 : 1) * m * Math.pow(2, e - mLen);
};
exports.write = function(buffer, value, offset, isLE, mLen, nBytes) {
var e, m, c,
eLen = nBytes * 8 - mLen - 1,
eMax = (1 << eLen) - 1,
eBias = eMax >> 1,
rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0),
i = isLE ? 0 : (nBytes - 1),
d = isLE ? 1 : -1,
s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0;
value = Math.abs(value);
if (isNaN(value) || value === Infinity) {
m = isNaN(value) ? 1 : 0;
e = eMax;
} else {
e = Math.floor(Math.log(value) / Math.LN2);
if (value * (c = Math.pow(2, -e)) < 1) {
e--;
c *= 2;
}
if (e + eBias >= 1) {
value += rt / c;
} else {
value += rt * Math.pow(2, 1 - eBias);
}
if (value * c >= 2) {
e++;
c /= 2;
}
if (e + eBias >= eMax) {
m = 0;
e = eMax;
} else if (e + eBias >= 1) {
m = (value * c - 1) * Math.pow(2, mLen);
e = e + eBias;
} else {
m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen);
e = 0;
}
}
for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8);
e = (e << mLen) | m;
eLen += mLen;
for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8);
buffer[offset + i - d] |= s * 128;
};
},{}],4:[function(require,module,exports){
/**
* isArray
*/
var isArray = Array.isArray;
/**
* toString
*/
var str = Object.prototype.toString;
/**
* Whether or not the given `val`
* is an array.
*
* example:
*
* isArray([]);
* // > true
* isArray(arguments);
* // > false
* isArray('');
* // > false
*
* @param {mixed} val
* @return {bool}
*/
module.exports = isArray || function (val) {
return !! val && '[object Array]' == str.call(val);
};
},{}],5:[function(require,module,exports){
ndarray = require( 'ndarray' );
},{"ndarray":6}],6:[function(require,module,exports){
(function (Buffer){
var iota = require("iota-array")
var hasTypedArrays = ((typeof Float64Array) !== "undefined")
var hasBuffer = ((typeof Buffer) !== "undefined")
function compare1st(a, b) {
return a[0] - b[0]
}
function order() {
var stride = this.stride
var terms = new Array(stride.length)
var i
for(i=0; i<terms.length; ++i) {
terms[i] = [Math.abs(stride[i]), i]
}
terms.sort(compare1st)
var result = new Array(terms.length)
for(i=0; i<result.length; ++i) {
result[i] = terms[i][1]
}
return result
}
function compileConstructor(dtype, dimension) {
var className = ["View", dimension, "d", dtype].join("")
if(dimension < 0) {
className = "View_Nil" + dtype
}
var useGetters = (dtype === "generic")
if(dimension === -1) {
//Special case for trivial arrays
var code =
"function "+className+"(a){this.data=a;};\
var proto="+className+".prototype;\
proto.dtype='"+dtype+"';\
proto.index=function(){return -1};\
proto.size=0;\
proto.dimension=-1;\
proto.shape=proto.stride=proto.order=[];\
proto.lo=proto.hi=proto.transpose=proto.step=\
function(){return new "+className+"(this.data);};\
proto.get=proto.set=function(){};\
proto.pick=function(){return null};\
return function construct_"+className+"(a){return new "+className+"(a);}"
var procedure = new Function(code)
return procedure()
} else if(dimension === 0) {
//Special case for 0d arrays
var code =
"function "+className+"(a,d) {\
this.data = a;\
this.offset = d\
};\
var proto="+className+".prototype;\
proto.dtype='"+dtype+"';\
proto.index=function(){return this.offset};\
proto.dimension=0;\
proto.size=1;\
proto.shape=\
proto.stride=\
proto.order=[];\
proto.lo=\
proto.hi=\
proto.transpose=\
proto.step=function "+className+"_copy() {\
return new "+className+"(this.data,this.offset)\
};\
proto.pick=function "+className+"_pick(){\
return TrivialArray(this.data);\
};\
proto.valueOf=proto.get=function "+className+"_get(){\
return "+(useGetters ? "this.data.get(this.offset)" : "this.data[this.offset]")+
"};\
proto.set=function "+className+"_set(v){\
return "+(useGetters ? "this.data.set(this.offset,v)" : "this.data[this.offset]=v")+"\
};\
return function construct_"+className+"(a,b,c,d){return new "+className+"(a,d)}"
var procedure = new Function("TrivialArray", code)
return procedure(CACHED_CONSTRUCTORS[dtype][0])
}
var code = ["'use strict'"]
//Create constructor for view
var indices = iota(dimension)
var args = indices.map(function(i) { return "i"+i })
var index_str = "this.offset+" + indices.map(function(i) {
return "this.stride[" + i + "]*i" + i
}).join("+")
var shapeArg = indices.map(function(i) {
return "b"+i
}).join(",")
var strideArg = indices.map(function(i) {
return "c"+i
}).join(",")
code.push(
"function "+className+"(a," + shapeArg + "," + strideArg + ",d){this.data=a",
"this.shape=[" + shapeArg + "]",
"this.stride=[" + strideArg + "]",
"this.offset=d|0}",
"var proto="+className+".prototype",
"proto.dtype='"+dtype+"'",
"proto.dimension="+dimension)
//view.size:
code.push("Object.defineProperty(proto,'size',{get:function "+className+"_size(){\
return "+indices.map(function(i) { return "this.shape["+i+"]" }).join("*"),
"}})")
//view.order:
if(dimension === 1) {
code.push("proto.order=[0]")
} else {
code.push("Object.defineProperty(proto,'order',{get:")
if(dimension < 4) {
code.push("function "+className+"_order(){")
if(dimension === 2) {
code.push("return (Math.abs(this.stride[0])>Math.abs(this.stride[1]))?[1,0]:[0,1]}})")
} else if(dimension === 3) {
code.push(
"var s0=Math.abs(this.stride[0]),s1=Math.abs(this.stride[1]),s2=Math.abs(this.stride[2]);\
if(s0>s1){\
if(s1>s2){\
return [2,1,0];\
}else if(s0>s2){\
return [1,2,0];\
}else{\
return [1,0,2];\
}\
}else if(s0>s2){\
return [2,0,1];\
}else if(s2>s1){\
return [0,1,2];\
}else{\
return [0,2,1];\
}}})")
}
} else {
code.push("ORDER})")
}
}
//view.set(i0, ..., v):
code.push(
"proto.set=function "+className+"_set("+args.join(",")+",v){")
if(useGetters) {
code.push("return this.data.set("+index_str+",v)}")
} else {
code.push("return this.data["+index_str+"]=v}")
}
//view.get(i0, ...):
code.push("proto.get=function "+className+"_get("+args.join(",")+"){")
if(useGetters) {
code.push("return this.data.get("+index_str+")}")
} else {
code.push("return this.data["+index_str+"]}")
}
//view.index:
code.push(
"proto.index=function "+className+"_index(", args.join(), "){return "+index_str+"}")
//view.hi():
code.push("proto.hi=function "+className+"_hi("+args.join(",")+"){return new "+className+"(this.data,"+
indices.map(function(i) {
return ["(typeof i",i,"!=='number'||i",i,"<0)?this.shape[", i, "]:i", i,"|0"].join("")
}).join(",")+","+
indices.map(function(i) {
return "this.stride["+i + "]"
}).join(",")+",this.offset)}")
//view.lo():
var a_vars = indices.map(function(i) { return "a"+i+"=this.shape["+i+"]" })
var c_vars = indices.map(function(i) { return "c"+i+"=this.stride["+i+"]" })
code.push("proto.lo=function "+className+"_lo("+args.join(",")+"){var b=this.offset,d=0,"+a_vars.join(",")+","+c_vars.join(","))
for(var i=0; i<dimension; ++i) {
code.push(
"if(typeof i"+i+"==='number'&&i"+i+">=0){\
d=i"+i+"|0;\
b+=c"+i+"*d;\
a"+i+"-=d}")
}
code.push("return new "+className+"(this.data,"+
indices.map(function(i) {
return "a"+i
}).join(",")+","+
indices.map(function(i) {
return "c"+i
}).join(",")+",b)}")
//view.step():
code.push("proto.step=function "+className+"_step("+args.join(",")+"){var "+
indices.map(function(i) {
return "a"+i+"=this.shape["+i+"]"
}).join(",")+","+
indices.map(function(i) {
return "b"+i+"=this.stride["+i+"]"
}).join(",")+",c=this.offset,d=0,ceil=Math.ceil")
for(var i=0; i<dimension; ++i) {
code.push(
"if(typeof i"+i+"==='number'){\
d=i"+i+"|0;\
if(d<0){\
c+=b"+i+"*(a"+i+"-1);\
a"+i+"=ceil(-a"+i+"/d)\
}else{\
a"+i+"=ceil(a"+i+"/d)\
}\
b"+i+"*=d\
}")
}
code.push("return new "+className+"(this.data,"+
indices.map(function(i) {
return "a" + i
}).join(",")+","+
indices.map(function(i) {
return "b" + i
}).join(",")+",c)}")
//view.transpose():
var tShape = new Array(dimension)
var tStride = new Array(dimension)
for(var i=0; i<dimension; ++i) {
tShape[i] = "a[i"+i+"]"
tStride[i] = "b[i"+i+"]"
}
code.push("proto.transpose=function "+className+"_transpose("+args+"){"+
args.map(function(n,idx) { return n + "=(" + n + "===undefined?" + idx + ":" + n + "|0)"}).join(";"),
"var a=this.shape,b=this.stride;return new "+className+"(this.data,"+tShape.join(",")+","+tStride.join(",")+",this.offset)}")
//view.pick():
code.push("proto.pick=function "+className+"_pick("+args+"){var a=[],b=[],c=this.offset")
for(var i=0; i<dimension; ++i) {
code.push("if(typeof i"+i+"==='number'&&i"+i+">=0){c=(c+this.stride["+i+"]*i"+i+")|0}else{a.push(this.shape["+i+"]);b.push(this.stride["+i+"])}")
}
code.push("var ctor=CTOR_LIST[a.length+1];return ctor(this.data,a,b,c)}")
//Add return statement
code.push("return function construct_"+className+"(data,shape,stride,offset){return new "+className+"(data,"+
indices.map(function(i) {
return "shape["+i+"]"
}).join(",")+","+
indices.map(function(i) {
return "stride["+i+"]"
}).join(",")+",offset)}")
//Compile procedure
var procedure = new Function("CTOR_LIST", "ORDER", code.join("\n"))
return procedure(CACHED_CONSTRUCTORS[dtype], order)
}
function arrayDType(data) {
if(hasBuffer) {
if(Buffer.isBuffer(data)) {
return "buffer"
}
}
if(hasTypedArrays) {
switch(Object.prototype.toString.call(data)) {
case "[object Float64Array]":
return "float64"
case "[object Float32Array]":
return "float32"
case "[object Int8Array]":
return "int8"
case "[object Int16Array]":
return "int16"
case "[object Int32Array]":
return "int32"
case "[object Uint8Array]":
return "uint8"
case "[object Uint16Array]":
return "uint16"
case "[object Uint32Array]":
return "uint32"
case "[object Uint8ClampedArray]":
return "uint8_clamped"
}
}
if(Array.isArray(data)) {
return "array"
}
return "generic"
}
var CACHED_CONSTRUCTORS = {
"float32":[],
"float64":[],
"int8":[],
"int16":[],
"int32":[],
"uint8":[],
"uint16":[],
"uint32":[],
"array":[],
"uint8_clamped":[],
"buffer":[],
"generic":[]
}
;(function() {
for(var id in CACHED_CONSTRUCTORS) {
CACHED_CONSTRUCTORS[id].push(compileConstructor(id, -1))
}
});
function wrappedNDArrayCtor(data, shape, stride, offset) {
if(data === undefined) {
var ctor = CACHED_CONSTRUCTORS.array[0]
return ctor([])
} else if(typeof data === "number") {
data = [data]
}
if(shape === undefined) {
shape = [ data.length ]
}
var d = shape.length
if(stride === undefined) {
stride = new Array(d)
for(var i=d-1, sz=1; i>=0; --i) {
stride[i] = sz
sz *= shape[i]
}
}
if(offset === undefined) {
offset = 0
for(var i=0; i<d; ++i) {
if(stride[i] < 0) {
offset -= (shape[i]-1)*stride[i]
}
}
}
var dtype = arrayDType(data)
var ctor_list = CACHED_CONSTRUCTORS[dtype]
while(ctor_list.length <= d+1) {
ctor_list.push(compileConstructor(dtype, ctor_list.length-1))
}
var ctor = ctor_list[d+1]
return ctor(data, shape, stride, offset)
}
module.exports = wrappedNDArrayCtor
}).call(this,require("buffer").Buffer)
},{"buffer":1,"iota-array":7}],7:[function(require,module,exports){
"use strict"
function iota(n) {
var result = new Array(n)
for(var i=0; i<n; ++i) {
result[i] = i
}
return result
}
module.exports = iota
},{}]},{},[5]);
| NeoVand/networks3d | js/vendor/ndarray.js | JavaScript | mit | 45,269 |
package com.aspose.cells.model;
public class SideWall {
private Link link = null;
public Link getLink() {
return link;
}
public void setLink(Link link) {
this.link = link;
}
}
| aspose-cells/Aspose.Cells-for-Cloud | SDKs/Aspose.Cells-Cloud-SDK-for-Java/src/main/java/com/aspose/cells/model/SideWall.java | Java | mit | 210 |
class Admin::BaseController < ApplicationController
end
| HakubJozak/barbecue | test/dummy/app/controllers/admin/base_controller.rb | Ruby | mit | 56 |
package com.punchthrough.bean.sdk.internal.upload.sketch;
public enum SketchUploadState {
INACTIVE, RESETTING_REMOTE, SENDING_START_COMMAND, SENDING_BLOCKS, FINISHED
}
| PunchThrough/bean-sdk-android | sdk/src/main/java/com/punchthrough/bean/sdk/internal/upload/sketch/SketchUploadState.java | Java | mit | 173 |
define('lodash/internal/createWrapper', ['exports', 'lodash/internal/baseSetData', 'lodash/internal/createBindWrapper', 'lodash/internal/createHybridWrapper', 'lodash/internal/createPartialWrapper', 'lodash/internal/getData', 'lodash/internal/mergeData', 'lodash/internal/setData'], function (exports, _lodashInternalBaseSetData, _lodashInternalCreateBindWrapper, _lodashInternalCreateHybridWrapper, _lodashInternalCreatePartialWrapper, _lodashInternalGetData, _lodashInternalMergeData, _lodashInternalSetData) {
'use strict';
/** Used to compose bitmasks for wrapper metadata. */
var BIND_FLAG = 1,
BIND_KEY_FLAG = 2,
PARTIAL_FLAG = 32,
PARTIAL_RIGHT_FLAG = 64;
/** Used as the `TypeError` message for "Functions" methods. */
var FUNC_ERROR_TEXT = 'Expected a function';
/* Native method references for those with the same name as other `lodash` methods. */
var nativeMax = Math.max;
/**
* Creates a function that either curries or invokes `func` with optional
* `this` binding and partially applied arguments.
*
* @private
* @param {Function|string} func The function or method name to reference.
* @param {number} bitmask The bitmask of flags.
* The bitmask may be composed of the following flags:
* 1 - `_.bind`
* 2 - `_.bindKey`
* 4 - `_.curry` or `_.curryRight` of a bound function
* 8 - `_.curry`
* 16 - `_.curryRight`
* 32 - `_.partial`
* 64 - `_.partialRight`
* 128 - `_.rearg`
* 256 - `_.ary`
* @param {*} [thisArg] The `this` binding of `func`.
* @param {Array} [partials] The arguments to be partially applied.
* @param {Array} [holders] The `partials` placeholder indexes.
* @param {Array} [argPos] The argument positions of the new function.
* @param {number} [ary] The arity cap of `func`.
* @param {number} [arity] The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createWrapper(func, bitmask, thisArg, partials, holders, argPos, ary, arity) {
var isBindKey = bitmask & BIND_KEY_FLAG;
if (!isBindKey && typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
var length = partials ? partials.length : 0;
if (!length) {
bitmask &= ~(PARTIAL_FLAG | PARTIAL_RIGHT_FLAG);
partials = holders = undefined;
}
length -= holders ? holders.length : 0;
if (bitmask & PARTIAL_RIGHT_FLAG) {
var partialsRight = partials,
holdersRight = holders;
partials = holders = undefined;
}
var data = isBindKey ? undefined : (0, _lodashInternalGetData['default'])(func),
newData = [func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, argPos, ary, arity];
if (data) {
(0, _lodashInternalMergeData['default'])(newData, data);
bitmask = newData[1];
arity = newData[9];
}
newData[9] = arity == null ? isBindKey ? 0 : func.length : nativeMax(arity - length, 0) || 0;
if (bitmask == BIND_FLAG) {
var result = (0, _lodashInternalCreateBindWrapper['default'])(newData[0], newData[2]);
} else if ((bitmask == PARTIAL_FLAG || bitmask == (BIND_FLAG | PARTIAL_FLAG)) && !newData[4].length) {
result = _lodashInternalCreatePartialWrapper['default'].apply(undefined, newData);
} else {
result = _lodashInternalCreateHybridWrapper['default'].apply(undefined, newData);
}
var setter = data ? _lodashInternalBaseSetData['default'] : _lodashInternalSetData['default'];
return setter(result, newData);
}
exports['default'] = createWrapper;
}); | hoka-plus/p-01-web | tmp/babel-output_path-hOv4KMmE.tmp/lodash/internal/createWrapper.js | JavaScript | mit | 3,598 |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Xunit;
using System;
using System.Collections;
using System.Collections.Specialized;
namespace System.Collections.Specialized.Tests
{
public class GetEnumeratorNameObjectCollectionBaseTests
{
private String _strErr = "Error!";
[Fact]
public void Test01()
{
MyNameObjectCollection noc = new MyNameObjectCollection();
IEnumerator en = null;
bool res;
// [] Enumerator for empty collection
// Get enumerator
en = noc.GetEnumerator();
// MoveNext should return false
res = en.MoveNext();
if (res)
{
Assert.False(true, _strErr + "MoveNext returned true");
}
// Attempt to get Current should result in exception
Assert.Throws<InvalidOperationException>(() => { String curr = (String)en.Current; });
// [] Enumerator for non-empty collection
// Add items
for (int i = 0; i < 10; i++)
{
noc.Add("key_" + i.ToString(), new Foo());
}
// Get enumerator
en = noc.GetEnumerator();
// Attempt to get Current should result in exception
Assert.Throws<InvalidOperationException>(() => { String curr = (String)en.Current; });
// Iterate over collection
for (int i = 0; i < noc.Count; i++)
{
// MoveNext should return true
res = en.MoveNext();
if (!res)
{
Assert.False(true, string.Format(_strErr + "#{0}, MoveNext returned false", i));
}
// Check current
String curr = (String)en.Current;
if (noc[curr] == null)
{
Assert.False(true, string.Format(_strErr + "#{0}, Current={1}, key not found in collection", i, curr));
}
// Check current again
String current1 = (String)en.Current;
if (current1 != curr)
{
Assert.False(true, string.Format(_strErr + "#{0}, Value of Current changed! Was {1}, now {2}", i, curr, current1));
}
}
// next MoveNext should bring us outside of the collection, return false
res = en.MoveNext();
if (res)
{
Assert.False(true, _strErr + "MoveNext returned true");
}
// Attempt to get Current should result in exception
Assert.Throws<InvalidOperationException>(() => { String curr = (String)en.Current; });
// Reset
en.Reset();
// Attempt to get Current should result in exception
Assert.Throws<InvalidOperationException>(() => { String curr = (String)en.Current; });
// Modify collection and then then try MoveNext, Current, Reset
// new collection
noc = new MyNameObjectCollection();
noc.Add("key1", new Foo());
noc.Add("key2", new Foo());
noc.Add("key3", new Foo());
en = noc.GetEnumerator();
// MoveNext
if (!en.MoveNext())
{
Assert.False(true, _strErr + "MoveNext returned false");
}
// Current
String current = (String)en.Current;
// Modify collection
noc.RemoveAt(0);
if (noc.Count != 2)
{
Assert.False(true, string.Format(_strErr + "Collection Count wrong. Expected {0}, got {1}", 2, noc.Count));
}
// Current should not throw, but no guarantee is made on the return value
string curr1 = (String)en.Current;
// MoveNext should throw exception
Assert.Throws<InvalidOperationException>(() => { en.MoveNext(); });
// Reset should throw exception
Assert.Throws<InvalidOperationException>(() => { en.Reset(); });
// Current should not throw, but no guarantee is made on the return value
curr1 = (String)en.Current;
// MoveNext should still throw exception if collection is ReadOnly
noc.IsReadOnly = true;
Assert.Throws<InvalidOperationException>(() => { en.MoveNext(); });
// Clear collection and then then try MoveNext, Current, Reset
// new collection
noc = new MyNameObjectCollection();
noc.Add("key1", new Foo());
noc.Add("key2", new Foo());
noc.Add("key3", new Foo());
en = noc.GetEnumerator();
// MoveNext
if (!en.MoveNext())
{
Assert.False(true, _strErr + "MoveNext returned false");
}
// Current
current = (String)en.Current;
// Modify collection
noc.Clear();
if (noc.Count != 0)
{
Assert.False(true, string.Format(_strErr + "Collection Count wrong. Expected {0}, got {1}", 2, noc.Count));
}
// Current throws. Should it throw here?!
Assert.Throws<InvalidOperationException>(() => { String curr = (String)en.Current; });
// MoveNext should throw exception
Assert.Throws<InvalidOperationException>(() => { en.MoveNext(); });
// Reset should throw exception
Assert.Throws<InvalidOperationException>(() => { en.Reset(); });
}
}
}
| mafiya69/corefx | src/System.Collections.Specialized/tests/NameObjectCollectionBase/GetEnumeratorTests.cs | C# | mit | 5,843 |
Clazz.declarePackage ("JU");
Clazz.load (["JU.V3"], "JU.Measure", ["java.lang.Float", "javajs.api.Interface", "JU.Lst", "$.P3", "$.P4", "$.Quat"], function () {
c$ = Clazz.declareType (JU, "Measure");
c$.computeAngle = Clazz.defineMethod (c$, "computeAngle",
function (pointA, pointB, pointC, vectorBA, vectorBC, asDegrees) {
vectorBA.sub2 (pointA, pointB);
vectorBC.sub2 (pointC, pointB);
var angle = vectorBA.angle (vectorBC);
return (asDegrees ? angle / 0.017453292 : angle);
}, "JU.T3,JU.T3,JU.T3,JU.V3,JU.V3,~B");
c$.computeAngleABC = Clazz.defineMethod (c$, "computeAngleABC",
function (pointA, pointB, pointC, asDegrees) {
var vectorBA = new JU.V3 ();
var vectorBC = new JU.V3 ();
return JU.Measure.computeAngle (pointA, pointB, pointC, vectorBA, vectorBC, asDegrees);
}, "JU.T3,JU.T3,JU.T3,~B");
c$.computeTorsion = Clazz.defineMethod (c$, "computeTorsion",
function (p1, p2, p3, p4, asDegrees) {
var ijx = p1.x - p2.x;
var ijy = p1.y - p2.y;
var ijz = p1.z - p2.z;
var kjx = p3.x - p2.x;
var kjy = p3.y - p2.y;
var kjz = p3.z - p2.z;
var klx = p3.x - p4.x;
var kly = p3.y - p4.y;
var klz = p3.z - p4.z;
var ax = ijy * kjz - ijz * kjy;
var ay = ijz * kjx - ijx * kjz;
var az = ijx * kjy - ijy * kjx;
var cx = kjy * klz - kjz * kly;
var cy = kjz * klx - kjx * klz;
var cz = kjx * kly - kjy * klx;
var ai2 = 1 / (ax * ax + ay * ay + az * az);
var ci2 = 1 / (cx * cx + cy * cy + cz * cz);
var ai = Math.sqrt (ai2);
var ci = Math.sqrt (ci2);
var denom = ai * ci;
var cross = ax * cx + ay * cy + az * cz;
var cosang = cross * denom;
if (cosang > 1) {
cosang = 1;
}if (cosang < -1) {
cosang = -1;
}var torsion = Math.acos (cosang);
var dot = ijx * cx + ijy * cy + ijz * cz;
var absDot = Math.abs (dot);
torsion = (dot / absDot > 0) ? torsion : -torsion;
return (asDegrees ? torsion / 0.017453292 : torsion);
}, "JU.T3,JU.T3,JU.T3,JU.T3,~B");
c$.computeHelicalAxis = Clazz.defineMethod (c$, "computeHelicalAxis",
function (a, b, dq) {
var vab = new JU.V3 ();
vab.sub2 (b, a);
var theta = dq.getTheta ();
var n = dq.getNormal ();
var v_dot_n = vab.dot (n);
if (Math.abs (v_dot_n) < 0.0001) v_dot_n = 0;
var va_prime_d = new JU.V3 ();
va_prime_d.cross (vab, n);
if (va_prime_d.dot (va_prime_d) != 0) va_prime_d.normalize ();
var vda = new JU.V3 ();
var vcb = JU.V3.newV (n);
if (v_dot_n == 0) v_dot_n = 1.4E-45;
vcb.scale (v_dot_n);
vda.sub2 (vcb, vab);
vda.scale (0.5);
va_prime_d.scale (theta == 0 ? 0 : (vda.length () / Math.tan (theta / 2 / 180 * 3.141592653589793)));
var r = JU.V3.newV (va_prime_d);
if (theta != 0) r.add (vda);
var pt_a_prime = JU.P3.newP (a);
pt_a_prime.sub (r);
if (v_dot_n != 1.4E-45) n.scale (v_dot_n);
var pt_b_prime = JU.P3.newP (pt_a_prime);
pt_b_prime.add (n);
theta = JU.Measure.computeTorsion (a, pt_a_prime, pt_b_prime, b, true);
if (Float.isNaN (theta) || r.length () < 0.0001) theta = dq.getThetaDirectedV (n);
var residuesPerTurn = Math.abs (theta == 0 ? 0 : 360 / theta);
var pitch = Math.abs (v_dot_n == 1.4E-45 ? 0 : n.length () * (theta == 0 ? 1 : 360 / theta));
return Clazz.newArray (-1, [pt_a_prime, n, r, JU.P3.new3 (theta, pitch, residuesPerTurn), pt_b_prime]);
}, "JU.P3,JU.P3,JU.Quat");
c$.getPlaneThroughPoints = Clazz.defineMethod (c$, "getPlaneThroughPoints",
function (pointA, pointB, pointC, vNorm, vAB, plane) {
var w = JU.Measure.getNormalThroughPoints (pointA, pointB, pointC, vNorm, vAB);
plane.set4 (vNorm.x, vNorm.y, vNorm.z, w);
return plane;
}, "JU.T3,JU.T3,JU.T3,JU.V3,JU.V3,JU.P4");
c$.getPlaneThroughPoint = Clazz.defineMethod (c$, "getPlaneThroughPoint",
function (pt, normal, plane) {
plane.set4 (normal.x, normal.y, normal.z, -normal.dot (pt));
}, "JU.T3,JU.V3,JU.P4");
c$.distanceToPlane = Clazz.defineMethod (c$, "distanceToPlane",
function (plane, pt) {
return (plane == null ? NaN : (plane.dot (pt) + plane.w) / Math.sqrt (plane.dot (plane)));
}, "JU.P4,JU.T3");
c$.directedDistanceToPlane = Clazz.defineMethod (c$, "directedDistanceToPlane",
function (pt, plane, ptref) {
var f = plane.dot (pt) + plane.w;
var f1 = plane.dot (ptref) + plane.w;
return Math.signum (f1) * f / Math.sqrt (plane.dot (plane));
}, "JU.P3,JU.P4,JU.P3");
c$.distanceToPlaneD = Clazz.defineMethod (c$, "distanceToPlaneD",
function (plane, d, pt) {
return (plane == null ? NaN : (plane.dot (pt) + plane.w) / d);
}, "JU.P4,~N,JU.P3");
c$.distanceToPlaneV = Clazz.defineMethod (c$, "distanceToPlaneV",
function (norm, w, pt) {
return (norm == null ? NaN : (norm.dot (pt) + w) / Math.sqrt (norm.dot (norm)));
}, "JU.V3,~N,JU.P3");
c$.calcNormalizedNormal = Clazz.defineMethod (c$, "calcNormalizedNormal",
function (pointA, pointB, pointC, vNormNorm, vAB) {
vAB.sub2 (pointB, pointA);
vNormNorm.sub2 (pointC, pointA);
vNormNorm.cross (vAB, vNormNorm);
vNormNorm.normalize ();
}, "JU.T3,JU.T3,JU.T3,JU.V3,JU.V3");
c$.getDirectedNormalThroughPoints = Clazz.defineMethod (c$, "getDirectedNormalThroughPoints",
function (pointA, pointB, pointC, ptRef, vNorm, vAB) {
var nd = JU.Measure.getNormalThroughPoints (pointA, pointB, pointC, vNorm, vAB);
if (ptRef != null) {
var pt0 = JU.P3.newP (pointA);
pt0.add (vNorm);
var d = pt0.distance (ptRef);
pt0.sub2 (pointA, vNorm);
if (d > pt0.distance (ptRef)) {
vNorm.scale (-1);
nd = -nd;
}}return nd;
}, "JU.T3,JU.T3,JU.T3,JU.T3,JU.V3,JU.V3");
c$.getNormalThroughPoints = Clazz.defineMethod (c$, "getNormalThroughPoints",
function (pointA, pointB, pointC, vNorm, vTemp) {
JU.Measure.calcNormalizedNormal (pointA, pointB, pointC, vNorm, vTemp);
vTemp.setT (pointA);
return -vTemp.dot (vNorm);
}, "JU.T3,JU.T3,JU.T3,JU.V3,JU.V3");
c$.getPlaneProjection = Clazz.defineMethod (c$, "getPlaneProjection",
function (pt, plane, ptProj, vNorm) {
var dist = JU.Measure.distanceToPlane (plane, pt);
vNorm.set (plane.x, plane.y, plane.z);
vNorm.normalize ();
vNorm.scale (-dist);
ptProj.add2 (pt, vNorm);
}, "JU.P3,JU.P4,JU.P3,JU.V3");
c$.getNormalFromCenter = Clazz.defineMethod (c$, "getNormalFromCenter",
function (ptCenter, ptA, ptB, ptC, isOutward, normal, vTemp) {
var d = JU.Measure.getNormalThroughPoints (ptA, ptB, ptC, normal, vTemp);
var isReversed = (JU.Measure.distanceToPlaneV (normal, d, ptCenter) > 0);
if (isReversed == isOutward) normal.scale (-1.0);
return !isReversed;
}, "JU.P3,JU.P3,JU.P3,JU.P3,~B,JU.V3,JU.V3");
c$.getNormalToLine = Clazz.defineMethod (c$, "getNormalToLine",
function (pointA, pointB, vNormNorm) {
vNormNorm.sub2 (pointA, pointB);
vNormNorm.cross (vNormNorm, JU.Measure.axisY);
vNormNorm.normalize ();
if (Float.isNaN (vNormNorm.x)) vNormNorm.set (1, 0, 0);
}, "JU.P3,JU.P3,JU.V3");
c$.getBisectingPlane = Clazz.defineMethod (c$, "getBisectingPlane",
function (pointA, vAB, ptTemp, vTemp, plane) {
ptTemp.scaleAdd2 (0.5, vAB, pointA);
vTemp.setT (vAB);
vTemp.normalize ();
JU.Measure.getPlaneThroughPoint (ptTemp, vTemp, plane);
}, "JU.P3,JU.V3,JU.T3,JU.V3,JU.P4");
c$.projectOntoAxis = Clazz.defineMethod (c$, "projectOntoAxis",
function (point, axisA, axisUnitVector, vectorProjection) {
vectorProjection.sub2 (point, axisA);
var projectedLength = vectorProjection.dot (axisUnitVector);
point.scaleAdd2 (projectedLength, axisUnitVector, axisA);
vectorProjection.sub2 (point, axisA);
}, "JU.P3,JU.P3,JU.V3,JU.V3");
c$.calcBestAxisThroughPoints = Clazz.defineMethod (c$, "calcBestAxisThroughPoints",
function (points, axisA, axisUnitVector, vectorProjection, nTriesMax) {
var nPoints = points.length;
axisA.setT (points[0]);
axisUnitVector.sub2 (points[nPoints - 1], axisA);
axisUnitVector.normalize ();
JU.Measure.calcAveragePointN (points, nPoints, axisA);
var nTries = 0;
while (nTries++ < nTriesMax && JU.Measure.findAxis (points, nPoints, axisA, axisUnitVector, vectorProjection) > 0.001) {
}
var tempA = JU.P3.newP (points[0]);
JU.Measure.projectOntoAxis (tempA, axisA, axisUnitVector, vectorProjection);
axisA.setT (tempA);
}, "~A,JU.P3,JU.V3,JU.V3,~N");
c$.findAxis = Clazz.defineMethod (c$, "findAxis",
function (points, nPoints, axisA, axisUnitVector, vectorProjection) {
var sumXiYi = new JU.V3 ();
var vTemp = new JU.V3 ();
var pt = new JU.P3 ();
var ptProj = new JU.P3 ();
var a = JU.V3.newV (axisUnitVector);
var sum_Xi2 = 0;
for (var i = nPoints; --i >= 0; ) {
pt.setT (points[i]);
ptProj.setT (pt);
JU.Measure.projectOntoAxis (ptProj, axisA, axisUnitVector, vectorProjection);
vTemp.sub2 (pt, ptProj);
vTemp.cross (vectorProjection, vTemp);
sumXiYi.add (vTemp);
sum_Xi2 += vectorProjection.lengthSquared ();
}
var m = JU.V3.newV (sumXiYi);
m.scale (1 / sum_Xi2);
vTemp.cross (m, axisUnitVector);
axisUnitVector.add (vTemp);
axisUnitVector.normalize ();
vTemp.sub2 (axisUnitVector, a);
return vTemp.length ();
}, "~A,~N,JU.P3,JU.V3,JU.V3");
c$.calcAveragePoint = Clazz.defineMethod (c$, "calcAveragePoint",
function (pointA, pointB, pointC) {
pointC.set ((pointA.x + pointB.x) / 2, (pointA.y + pointB.y) / 2, (pointA.z + pointB.z) / 2);
}, "JU.P3,JU.P3,JU.P3");
c$.calcAveragePointN = Clazz.defineMethod (c$, "calcAveragePointN",
function (points, nPoints, averagePoint) {
averagePoint.setT (points[0]);
for (var i = 1; i < nPoints; i++) averagePoint.add (points[i]);
averagePoint.scale (1 / nPoints);
}, "~A,~N,JU.P3");
c$.transformPoints = Clazz.defineMethod (c$, "transformPoints",
function (vPts, m4, center) {
var v = new JU.Lst ();
for (var i = 0; i < vPts.size (); i++) {
var pt = JU.P3.newP (vPts.get (i));
pt.sub (center);
m4.rotTrans (pt);
pt.add (center);
v.addLast (pt);
}
return v;
}, "JU.Lst,JU.M4,JU.P3");
c$.isInTetrahedron = Clazz.defineMethod (c$, "isInTetrahedron",
function (pt, ptA, ptB, ptC, ptD, plane, vTemp, vTemp2, fullyEnclosed) {
var b = (JU.Measure.distanceToPlane (JU.Measure.getPlaneThroughPoints (ptC, ptD, ptA, vTemp, vTemp2, plane), pt) >= 0);
if (b != (JU.Measure.distanceToPlane (JU.Measure.getPlaneThroughPoints (ptA, ptD, ptB, vTemp, vTemp2, plane), pt) >= 0)) return false;
if (b != (JU.Measure.distanceToPlane (JU.Measure.getPlaneThroughPoints (ptB, ptD, ptC, vTemp, vTemp2, plane), pt) >= 0)) return false;
var d = JU.Measure.distanceToPlane (JU.Measure.getPlaneThroughPoints (ptA, ptB, ptC, vTemp, vTemp2, plane), pt);
if (fullyEnclosed) return (b == (d >= 0));
var d1 = JU.Measure.distanceToPlane (plane, ptD);
return d1 * d <= 0 || Math.abs (d1) > Math.abs (d);
}, "JU.P3,JU.P3,JU.P3,JU.P3,JU.P3,JU.P4,JU.V3,JU.V3,~B");
c$.getIntersectionPP = Clazz.defineMethod (c$, "getIntersectionPP",
function (plane1, plane2) {
var a1 = plane1.x;
var b1 = plane1.y;
var c1 = plane1.z;
var d1 = plane1.w;
var a2 = plane2.x;
var b2 = plane2.y;
var c2 = plane2.z;
var d2 = plane2.w;
var norm1 = JU.V3.new3 (a1, b1, c1);
var norm2 = JU.V3.new3 (a2, b2, c2);
var nxn = new JU.V3 ();
nxn.cross (norm1, norm2);
var ax = Math.abs (nxn.x);
var ay = Math.abs (nxn.y);
var az = Math.abs (nxn.z);
var x;
var y;
var z;
var diff;
var type = (ax > ay ? (ax > az ? 1 : 3) : ay > az ? 2 : 3);
switch (type) {
case 1:
x = 0;
diff = (b1 * c2 - b2 * c1);
if (Math.abs (diff) < 0.01) return null;
y = (c1 * d2 - c2 * d1) / diff;
z = (b2 * d1 - d2 * b1) / diff;
break;
case 2:
diff = (a1 * c2 - a2 * c1);
if (Math.abs (diff) < 0.01) return null;
x = (c1 * d2 - c2 * d1) / diff;
y = 0;
z = (a2 * d1 - d2 * a1) / diff;
break;
case 3:
default:
diff = (a1 * b2 - a2 * b1);
if (Math.abs (diff) < 0.01) return null;
x = (b1 * d2 - b2 * d1) / diff;
y = (a2 * d1 - d2 * a1) / diff;
z = 0;
}
var list = new JU.Lst ();
list.addLast (JU.P3.new3 (x, y, z));
nxn.normalize ();
list.addLast (nxn);
return list;
}, "JU.P4,JU.P4");
c$.getIntersection = Clazz.defineMethod (c$, "getIntersection",
function (pt1, v, plane, ptRet, tempNorm, vTemp) {
JU.Measure.getPlaneProjection (pt1, plane, ptRet, tempNorm);
tempNorm.set (plane.x, plane.y, plane.z);
tempNorm.normalize ();
if (v == null) v = JU.V3.newV (tempNorm);
var l_dot_n = v.dot (tempNorm);
if (Math.abs (l_dot_n) < 0.01) return null;
vTemp.sub2 (ptRet, pt1);
ptRet.scaleAdd2 (vTemp.dot (tempNorm) / l_dot_n, v, pt1);
return ptRet;
}, "JU.P3,JU.V3,JU.P4,JU.P3,JU.V3,JU.V3");
c$.calculateQuaternionRotation = Clazz.defineMethod (c$, "calculateQuaternionRotation",
function (centerAndPoints, retStddev) {
retStddev[1] = NaN;
var q = new JU.Quat ();
if (centerAndPoints[0].length == 1 || centerAndPoints[0].length != centerAndPoints[1].length) return q;
var n = centerAndPoints[0].length - 1;
if (n < 2) return q;
var Sxx = 0;
var Sxy = 0;
var Sxz = 0;
var Syx = 0;
var Syy = 0;
var Syz = 0;
var Szx = 0;
var Szy = 0;
var Szz = 0;
var ptA = new JU.P3 ();
var ptB = new JU.P3 ();
for (var i = n + 1; --i >= 1; ) {
var aij = centerAndPoints[0][i];
var bij = centerAndPoints[1][i];
ptA.sub2 (aij, centerAndPoints[0][0]);
ptB.sub2 (bij, centerAndPoints[0][1]);
Sxx += ptA.x * ptB.x;
Sxy += ptA.x * ptB.y;
Sxz += ptA.x * ptB.z;
Syx += ptA.y * ptB.x;
Syy += ptA.y * ptB.y;
Syz += ptA.y * ptB.z;
Szx += ptA.z * ptB.x;
Szy += ptA.z * ptB.y;
Szz += ptA.z * ptB.z;
}
retStddev[0] = JU.Measure.getRmsd (centerAndPoints, q);
var N = Clazz.newDoubleArray (4, 4, 0);
N[0][0] = Sxx + Syy + Szz;
N[0][1] = N[1][0] = Syz - Szy;
N[0][2] = N[2][0] = Szx - Sxz;
N[0][3] = N[3][0] = Sxy - Syx;
N[1][1] = Sxx - Syy - Szz;
N[1][2] = N[2][1] = Sxy + Syx;
N[1][3] = N[3][1] = Szx + Sxz;
N[2][2] = -Sxx + Syy - Szz;
N[2][3] = N[3][2] = Syz + Szy;
N[3][3] = -Sxx - Syy + Szz;
var v = (javajs.api.Interface.getInterface ("JU.Eigen")).setM (N).getEigenvectorsFloatTransposed ()[3];
q = JU.Quat.newP4 (JU.P4.new4 (v[1], v[2], v[3], v[0]));
retStddev[1] = JU.Measure.getRmsd (centerAndPoints, q);
return q;
}, "~A,~A");
c$.getTransformMatrix4 = Clazz.defineMethod (c$, "getTransformMatrix4",
function (ptsA, ptsB, m, centerA) {
var cptsA = JU.Measure.getCenterAndPoints (ptsA);
var cptsB = JU.Measure.getCenterAndPoints (ptsB);
var retStddev = Clazz.newFloatArray (2, 0);
var q = JU.Measure.calculateQuaternionRotation ( Clazz.newArray (-1, [cptsA, cptsB]), retStddev);
var r = q.getMatrix ();
if (centerA == null) r.rotate (cptsA[0]);
else centerA.setT (cptsA[0]);
var t = JU.V3.newVsub (cptsB[0], cptsA[0]);
m.setMV (r, t);
return retStddev[1];
}, "JU.Lst,JU.Lst,JU.M4,JU.P3");
c$.getCenterAndPoints = Clazz.defineMethod (c$, "getCenterAndPoints",
function (vPts) {
var n = vPts.size ();
var pts = new Array (n + 1);
pts[0] = new JU.P3 ();
if (n > 0) {
for (var i = 0; i < n; i++) {
pts[0].add (pts[i + 1] = vPts.get (i));
}
pts[0].scale (1 / n);
}return pts;
}, "JU.Lst");
c$.getRmsd = Clazz.defineMethod (c$, "getRmsd",
function (centerAndPoints, q) {
var sum2 = 0;
var ptsA = centerAndPoints[0];
var ptsB = centerAndPoints[1];
var cA = ptsA[0];
var cB = ptsB[0];
var n = ptsA.length - 1;
var ptAnew = new JU.P3 ();
for (var i = n + 1; --i >= 1; ) {
ptAnew.sub2 (ptsA[i], cA);
q.transform2 (ptAnew, ptAnew).add (cB);
sum2 += ptAnew.distanceSquared (ptsB[i]);
}
return Math.sqrt (sum2 / n);
}, "~A,JU.Quat");
Clazz.defineStatics (c$,
"radiansPerDegree", (0.017453292519943295));
c$.axisY = c$.prototype.axisY = JU.V3.new3 (0, 1, 0);
});
| sandipde/Interactive-Sketchmap-Visualizer | static/jmol/j2s/JU/Measure.js | JavaScript | mit | 15,234 |
/*
-----------------------------------------------------------------------------
This source file is part of OGRE
(Object-oriented Graphics Rendering Engine)
For the latest info, see http://www.ogre3d.org/
Copyright (c) 2000-2011 Torus Knot Software Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
*/
#include "OgreQuake3Shader.h"
#include "OgreSceneManager.h"
#include "OgreMaterial.h"
#include "OgreTechnique.h"
#include "OgrePass.h"
#include "OgreTextureUnitState.h"
#include "OgreMath.h"
#include "OgreLogManager.h"
#include "OgreTextureManager.h"
#include "OgreRoot.h"
#include "OgreMaterialManager.h"
namespace Ogre {
//-----------------------------------------------------------------------
Quake3Shader::Quake3Shader(const String& name)
{
mName = name;
numPasses = 0;
deformFunc = DEFORM_FUNC_NONE;
farbox = false;
skyDome = false;
flags = 0;
fog = false;
cullMode = MANUAL_CULL_BACK;
}
//-----------------------------------------------------------------------
Quake3Shader::~Quake3Shader()
{
}
//-----------------------------------------------------------------------
MaterialPtr Quake3Shader::createAsMaterial(int lightmapNumber)
{
String matName;
StringUtil::StrStreamType str;
String resourceGroup = ResourceGroupManager::getSingleton().getWorldResourceGroupName();
str << mName << "#" << lightmapNumber;
matName = str.str();
MaterialPtr mat = MaterialManager::getSingleton().create(matName,
resourceGroup);
Ogre::Pass* ogrePass = mat->getTechnique(0)->getPass(0);
LogManager::getSingleton().logMessage("Using Q3 shader " + mName, LML_CRITICAL);
for (int p = 0; p < numPasses; ++p)
{
TextureUnitState* t;
// Create basic texture
if (pass[p].textureName == "$lightmap")
{
StringUtil::StrStreamType str2;
str2 << "@lightmap" << lightmapNumber;
t = ogrePass->createTextureUnitState(str2.str());
}
// Animated texture support
else if (pass[p].animNumFrames > 0)
{
Real sequenceTime = pass[p].animNumFrames / pass[p].animFps;
/* Pre-load textures
We need to know if each one was loaded OK since extensions may change for each
Quake3 can still include alternate extension filenames e.g. jpg instead of tga
Pain in the arse - have to check for each frame as letters<n>.tga for example
is different per frame!
*/
for (unsigned int alt = 0; alt < pass[p].animNumFrames; ++alt)
{
if (!ResourceGroupManager::getSingleton().resourceExists(
resourceGroup, pass[p].frames[alt]))
{
// Try alternate extension
pass[p].frames[alt] = getAlternateName(pass[p].frames[alt]);
if (!ResourceGroupManager::getSingleton().resourceExists(
resourceGroup, pass[p].frames[alt]))
{
// stuffed - no texture
continue;
}
}
}
t = ogrePass->createTextureUnitState("");
t->setAnimatedTextureName(pass[p].frames, pass[p].animNumFrames, sequenceTime);
}
else
{
// Quake3 can still include alternate extension filenames e.g. jpg instead of tga
// Pain in the arse - have to check for failure
if (!ResourceGroupManager::getSingleton().resourceExists(
resourceGroup, pass[p].textureName))
{
// Try alternate extension
pass[p].textureName = getAlternateName(pass[p].textureName);
if (!ResourceGroupManager::getSingleton().resourceExists(
resourceGroup, pass[p].textureName))
{
// stuffed - no texture
continue;
}
}
t = ogrePass->createTextureUnitState(pass[p].textureName);
}
// Blending
if (p == 0)
{
// scene blend
mat->setSceneBlending(pass[p].blendSrc, pass[p].blendDest);
if (mat->isTransparent())
mat->setDepthWriteEnabled(false);
t->setColourOperation(LBO_REPLACE);
// Alpha mode
ogrePass->setAlphaRejectSettings(
pass[p].alphaFunc, pass[p].alphaVal);
}
else
{
if (pass[p].customBlend)
{
// Fallback for now
t->setColourOperation(LBO_MODULATE);
}
else
{
// simple layer blend
t->setColourOperation(pass[p].blend);
}
// Alpha mode, prefer 'most alphary'
CompareFunction currFunc = ogrePass->getAlphaRejectFunction();
unsigned char currVal = ogrePass->getAlphaRejectValue();
if (pass[p].alphaFunc > currFunc ||
(pass[p].alphaFunc == currFunc && pass[p].alphaVal < currVal))
{
ogrePass->setAlphaRejectSettings(
pass[p].alphaFunc, pass[p].alphaVal);
}
}
// Tex coords
if (pass[p].texGen == TEXGEN_BASE)
{
t->setTextureCoordSet(0);
}
else if (pass[p].texGen == TEXGEN_LIGHTMAP)
{
t->setTextureCoordSet(1);
}
else if (pass[p].texGen == TEXGEN_ENVIRONMENT)
{
t->setEnvironmentMap(true, TextureUnitState::ENV_PLANAR);
}
// Tex mod
// Scale
t->setTextureUScale(pass[p].tcModScale[0]);
t->setTextureVScale(pass[p].tcModScale[1]);
// Procedural mods
// Custom - don't use mod if generating environment
// Because I do env a different way it look horrible
if (pass[p].texGen != TEXGEN_ENVIRONMENT)
{
if (pass[p].tcModRotate)
{
t->setRotateAnimation(pass[p].tcModRotate);
}
if (pass[p].tcModScroll[0] || pass[p].tcModScroll[1])
{
if (pass[p].tcModTurbOn)
{
// Turbulent scroll
if (pass[p].tcModScroll[0])
{
t->setTransformAnimation(TextureUnitState::TT_TRANSLATE_U, WFT_SINE,
pass[p].tcModTurb[0], pass[p].tcModTurb[3], pass[p].tcModTurb[2], pass[p].tcModTurb[1]);
}
if (pass[p].tcModScroll[1])
{
t->setTransformAnimation(TextureUnitState::TT_TRANSLATE_V, WFT_SINE,
pass[p].tcModTurb[0], pass[p].tcModTurb[3], pass[p].tcModTurb[2], pass[p].tcModTurb[1]);
}
}
else
{
// Constant scroll
t->setScrollAnimation(pass[p].tcModScroll[0], pass[p].tcModScroll[1]);
}
}
if (pass[p].tcModStretchWave != SHADER_FUNC_NONE)
{
WaveformType wft = WFT_SINE;
switch(pass[p].tcModStretchWave)
{
case SHADER_FUNC_SIN:
wft = WFT_SINE;
break;
case SHADER_FUNC_TRIANGLE:
wft = WFT_TRIANGLE;
break;
case SHADER_FUNC_SQUARE:
wft = WFT_SQUARE;
break;
case SHADER_FUNC_SAWTOOTH:
wft = WFT_SAWTOOTH;
break;
case SHADER_FUNC_INVERSESAWTOOTH:
wft = WFT_INVERSE_SAWTOOTH;
break;
default:
break;
}
// Create wave-based stretcher
t->setTransformAnimation(TextureUnitState::TT_SCALE_U, wft, pass[p].tcModStretchParams[3],
pass[p].tcModStretchParams[0], pass[p].tcModStretchParams[2], pass[p].tcModStretchParams[1]);
t->setTransformAnimation(TextureUnitState::TT_SCALE_V, wft, pass[p].tcModStretchParams[3],
pass[p].tcModStretchParams[0], pass[p].tcModStretchParams[2], pass[p].tcModStretchParams[1]);
}
}
// Address mode
t->setTextureAddressingMode(pass[p].addressMode);
//assert(!t->isBlank());
}
// Do farbox (create new material)
// Set culling mode and lighting to defaults
mat->setCullingMode(CULL_NONE);
mat->setManualCullingMode(cullMode);
mat->setLightingEnabled(false);
mat->load();
return mat;
}
String Quake3Shader::getAlternateName(const String& texName)
{
// Get alternative JPG to TGA and vice versa
size_t pos;
String ext, base;
pos = texName.find_last_of(".");
ext = texName.substr(pos, 4);
StringUtil::toLowerCase(ext);
base = texName.substr(0,pos);
if (ext == ".jpg")
{
return base + ".tga";
}
else
{
return base + ".jpg";
}
}
}
| ruleless/ogre | PlugIns/BSPSceneManager/src/OgreQuake3Shader.cpp | C++ | mit | 11,012 |
import Ember from 'ember';
export default Ember.Object.extend({
content: {},
contentLength: 0,
add: function(obj) {
var id = this.generateId();
this.get('content')[id] = obj;
this.incrementProperty("contentLength");
return id;
},
getObj: function(key) {
var res = this.get('content')[key];
if (!res) {
throw "no obj for key "+key;
}
return res;
},
generateId: function() {
var num = Math.random() * 1000000000000.0;
num = parseInt(num);
num = ""+num;
return num;
},
keys: function() {
var res = [];
for (var key in this.get('content')) {
res.push(key);
}
return Ember.A(res);
},
lengthBinding: "contentLength"
}); | bdvholmes/ember-drag-drop | app/models/obj-hash.js | JavaScript | mit | 717 |
# Image Patches Differential Optical Flow Rotation/Scale
#
# This example shows off using your OpenMV Cam to measure
# rotation/scale by comparing the current and the previous
# image against each other. Note that only rotation/scale is
# handled - not X and Y translation in this mode.
#
# However, this examples goes beyond doing optical flow on the whole
# image at once. Instead it breaks up the process by working on groups
# of pixels in the image. This gives you a "new" image of results.
#
# NOTE that surfaces need to have some type of "edge" on them for the
# algorithm to work. A featureless surface produces crazy results.
# NOTE: Unless you have a very nice test rig this example is hard to see usefulness of...
BLOCK_W = 16 # pow2
BLOCK_H = 16 # pow2
# To run this demo effectively please mount your OpenMV Cam on a steady
# base and SLOWLY rotate the camera around the lens and move the camera
# forward/backwards to see the numbers change.
# I.e. Z direction changes only.
import sensor, image, time, math
# NOTE!!! You have to use a small power of 2 resolution when using
# find_displacement(). This is because the algorithm is powered by
# something called phase correlation which does the image comparison
# using FFTs. A non-power of 2 resolution requires padding to a power
# of 2 which reduces the usefulness of the algorithm results. Please
# use a resolution like B128X128 or B128X64 (2x faster).
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
for y in range(0, sensor.height(), BLOCK_H):
for x in range(0, sensor.width(), BLOCK_W):
displacement = extra_fb.find_displacement(img, logpolar=True, \
roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))
# Below 0.1 or so (YMMV) and the results are just noise.
if(displacement.response() > 0.1):
rotation_change = displacement.rotation()
zoom_amount = 1.0 + displacement.scale()
pixel_x = x + (BLOCK_W//2) + int(math.sin(rotation_change) * zoom_amount * (BLOCK_W//4))
pixel_y = y + (BLOCK_H//2) + int(math.cos(rotation_change) * zoom_amount * (BLOCK_H//4))
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \
color = 255)
else:
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, x + BLOCK_W//2, y + BLOCK_H//2), \
color = 0)
extra_fb.replace(img)
print(clock.fps())
| openmv/openmv | scripts/examples/OpenMV/22-Optical-Flow/image-patches-differential-rotation-scale.py | Python | mit | 3,596 |
import { Observable } from 'rxjs/Observable';
/**
* @name Keyboard
* @description
* @usage
* ```typescript
* import { Keyboard } from 'ionic-native';
*
*
*
* ```
*/
export declare class Keyboard {
/**
* Hide the keyboard accessory bar with the next, previous and done buttons.
* @param hide {boolean}
*/
static hideKeyboardAccessoryBar(hide: boolean): void;
/**
* Force keyboard to be shown.
*/
static show(): void;
/**
* Close the keyboard if open.
*/
static close(): void;
/**
* Prevents the native UIScrollView from moving when an input is focused.
* @param disable
*/
static disableScroll(disable: boolean): void;
/**
* Creates an observable that notifies you when the keyboard is shown. Unsubscribe to observable to cancel event watch.
* @returns {Observable<any>}
*/
static onKeyboardShow(): Observable<any>;
/**
* Creates an observable that notifies you when the keyboard is hidden. Unsubscribe to observable to cancel event watch.
* @returns {Observable<any>}
*/
static onKeyboardHide(): Observable<any>;
}
| Spect-AR/Spect-AR | node_modules/node_modules/ionic-native/dist/esm/plugins/keyboard.d.ts | TypeScript | mit | 1,153 |
"""
categories: Types,bytes
description: Bytes subscr with step != 1 not implemented
cause: Unknown
workaround: Unknown
"""
print(b'123'[0:3:2])
| cwyark/micropython | tests/cpydiff/types_bytes_subscrstep.py | Python | mit | 145 |
require('ember-runtime/core');
require('ember-runtime/system/core_object');
require('ember-runtime/mixins/mutable_enumerable');
require('ember-runtime/mixins/copyable');
require('ember-runtime/mixins/freezable');
/**
@module ember
@submodule ember-runtime
*/
var get = Ember.get, set = Ember.set, guidFor = Ember.guidFor, isNone = Ember.isNone, fmt = Ember.String.fmt;
/**
An unordered collection of objects.
A Set works a bit like an array except that its items are not ordered. You
can create a set to efficiently test for membership for an object. You can
also iterate through a set just like an array, even accessing objects by
index, however there is no guarantee as to their order.
All Sets are observable via the Enumerable Observer API - which works
on any enumerable object including both Sets and Arrays.
## Creating a Set
You can create a set like you would most objects using
`new Ember.Set()`. Most new sets you create will be empty, but you can
also initialize the set with some content by passing an array or other
enumerable of objects to the constructor.
Finally, you can pass in an existing set and the set will be copied. You
can also create a copy of a set by calling `Ember.Set#copy()`.
```javascript
// creates a new empty set
var foundNames = new Ember.Set();
// creates a set with four names in it.
var names = new Ember.Set(["Charles", "Tom", "Juan", "Alex"]); // :P
// creates a copy of the names set.
var namesCopy = new Ember.Set(names);
// same as above.
var anotherNamesCopy = names.copy();
```
## Adding/Removing Objects
You generally add or remove objects from a set using `add()` or
`remove()`. You can add any type of object including primitives such as
numbers, strings, and booleans.
Unlike arrays, objects can only exist one time in a set. If you call `add()`
on a set with the same object multiple times, the object will only be added
once. Likewise, calling `remove()` with the same object multiple times will
remove the object the first time and have no effect on future calls until
you add the object to the set again.
NOTE: You cannot add/remove `null` or `undefined` to a set. Any attempt to do
so will be ignored.
In addition to add/remove you can also call `push()`/`pop()`. Push behaves
just like `add()` but `pop()`, unlike `remove()` will pick an arbitrary
object, remove it and return it. This is a good way to use a set as a job
queue when you don't care which order the jobs are executed in.
## Testing for an Object
To test for an object's presence in a set you simply call
`Ember.Set#contains()`.
## Observing changes
When using `Ember.Set`, you can observe the `"[]"` property to be
alerted whenever the content changes. You can also add an enumerable
observer to the set to be notified of specific objects that are added and
removed from the set. See `Ember.Enumerable` for more information on
enumerables.
This is often unhelpful. If you are filtering sets of objects, for instance,
it is very inefficient to re-filter all of the items each time the set
changes. It would be better if you could just adjust the filtered set based
on what was changed on the original set. The same issue applies to merging
sets, as well.
## Other Methods
`Ember.Set` primary implements other mixin APIs. For a complete reference
on the methods you will use with `Ember.Set`, please consult these mixins.
The most useful ones will be `Ember.Enumerable` and
`Ember.MutableEnumerable` which implement most of the common iterator
methods you are used to on Array.
Note that you can also use the `Ember.Copyable` and `Ember.Freezable`
APIs on `Ember.Set` as well. Once a set is frozen it can no longer be
modified. The benefit of this is that when you call `frozenCopy()` on it,
Ember will avoid making copies of the set. This allows you to write
code that can know with certainty when the underlying set data will or
will not be modified.
@class Set
@namespace Ember
@extends Ember.CoreObject
@uses Ember.MutableEnumerable
@uses Ember.Copyable
@uses Ember.Freezable
@since Ember 0.9
*/
Ember.Set = Ember.CoreObject.extend(Ember.MutableEnumerable, Ember.Copyable, Ember.Freezable,
/** @scope Ember.Set.prototype */ {
// ..........................................................
// IMPLEMENT ENUMERABLE APIS
//
/**
This property will change as the number of objects in the set changes.
@property length
@type number
@default 0
*/
length: 0,
/**
Clears the set. This is useful if you want to reuse an existing set
without having to recreate it.
```javascript
var colors = new Ember.Set(["red", "green", "blue"]);
colors.length; // 3
colors.clear();
colors.length; // 0
```
@method clear
@return {Ember.Set} An empty Set
*/
clear: function() {
if (this.isFrozen) { throw new Error(Ember.FROZEN_ERROR); }
var len = get(this, 'length');
if (len === 0) { return this; }
var guid;
this.enumerableContentWillChange(len, 0);
Ember.propertyWillChange(this, 'firstObject');
Ember.propertyWillChange(this, 'lastObject');
for (var i=0; i < len; i++){
guid = guidFor(this[i]);
delete this[guid];
delete this[i];
}
set(this, 'length', 0);
Ember.propertyDidChange(this, 'firstObject');
Ember.propertyDidChange(this, 'lastObject');
this.enumerableContentDidChange(len, 0);
return this;
},
/**
Returns true if the passed object is also an enumerable that contains the
same objects as the receiver.
```javascript
var colors = ["red", "green", "blue"],
same_colors = new Ember.Set(colors);
same_colors.isEqual(colors); // true
same_colors.isEqual(["purple", "brown"]); // false
```
@method isEqual
@param {Ember.Set} obj the other object.
@return {Boolean}
*/
isEqual: function(obj) {
// fail fast
if (!Ember.Enumerable.detect(obj)) return false;
var loc = get(this, 'length');
if (get(obj, 'length') !== loc) return false;
while(--loc >= 0) {
if (!obj.contains(this[loc])) return false;
}
return true;
},
/**
Adds an object to the set. Only non-`null` objects can be added to a set
and those can only be added once. If the object is already in the set or
the passed value is null this method will have no effect.
This is an alias for `Ember.MutableEnumerable.addObject()`.
```javascript
var colors = new Ember.Set();
colors.add("blue"); // ["blue"]
colors.add("blue"); // ["blue"]
colors.add("red"); // ["blue", "red"]
colors.add(null); // ["blue", "red"]
colors.add(undefined); // ["blue", "red"]
```
@method add
@param {Object} obj The object to add.
@return {Ember.Set} The set itself.
*/
add: Ember.aliasMethod('addObject'),
/**
Removes the object from the set if it is found. If you pass a `null` value
or an object that is already not in the set, this method will have no
effect. This is an alias for `Ember.MutableEnumerable.removeObject()`.
```javascript
var colors = new Ember.Set(["red", "green", "blue"]);
colors.remove("red"); // ["blue", "green"]
colors.remove("purple"); // ["blue", "green"]
colors.remove(null); // ["blue", "green"]
```
@method remove
@param {Object} obj The object to remove
@return {Ember.Set} The set itself.
*/
remove: Ember.aliasMethod('removeObject'),
/**
Removes the last element from the set and returns it, or `null` if it's empty.
```javascript
var colors = new Ember.Set(["green", "blue"]);
colors.pop(); // "blue"
colors.pop(); // "green"
colors.pop(); // null
```
@method pop
@return {Object} The removed object from the set or null.
*/
pop: function() {
if (get(this, 'isFrozen')) throw new Error(Ember.FROZEN_ERROR);
var obj = this.length > 0 ? this[this.length-1] : null;
this.remove(obj);
return obj;
},
/**
Inserts the given object on to the end of the set. It returns
the set itself.
This is an alias for `Ember.MutableEnumerable.addObject()`.
```javascript
var colors = new Ember.Set();
colors.push("red"); // ["red"]
colors.push("green"); // ["red", "green"]
colors.push("blue"); // ["red", "green", "blue"]
```
@method push
@return {Ember.Set} The set itself.
*/
push: Ember.aliasMethod('addObject'),
/**
Removes the last element from the set and returns it, or `null` if it's empty.
This is an alias for `Ember.Set.pop()`.
```javascript
var colors = new Ember.Set(["green", "blue"]);
colors.shift(); // "blue"
colors.shift(); // "green"
colors.shift(); // null
```
@method shift
@return {Object} The removed object from the set or null.
*/
shift: Ember.aliasMethod('pop'),
/**
Inserts the given object on to the end of the set. It returns
the set itself.
This is an alias of `Ember.Set.push()`
```javascript
var colors = new Ember.Set();
colors.unshift("red"); // ["red"]
colors.unshift("green"); // ["red", "green"]
colors.unshift("blue"); // ["red", "green", "blue"]
```
@method unshift
@return {Ember.Set} The set itself.
*/
unshift: Ember.aliasMethod('push'),
/**
Adds each object in the passed enumerable to the set.
This is an alias of `Ember.MutableEnumerable.addObjects()`
```javascript
var colors = new Ember.Set();
colors.addEach(["red", "green", "blue"]); // ["red", "green", "blue"]
```
@method addEach
@param {Ember.Enumerable} objects the objects to add.
@return {Ember.Set} The set itself.
*/
addEach: Ember.aliasMethod('addObjects'),
/**
Removes each object in the passed enumerable to the set.
This is an alias of `Ember.MutableEnumerable.removeObjects()`
```javascript
var colors = new Ember.Set(["red", "green", "blue"]);
colors.removeEach(["red", "blue"]); // ["green"]
```
@method removeEach
@param {Ember.Enumerable} objects the objects to remove.
@return {Ember.Set} The set itself.
*/
removeEach: Ember.aliasMethod('removeObjects'),
// ..........................................................
// PRIVATE ENUMERABLE SUPPORT
//
init: function(items) {
this._super();
if (items) this.addObjects(items);
},
// implement Ember.Enumerable
nextObject: function(idx) {
return this[idx];
},
// more optimized version
firstObject: Ember.computed(function() {
return this.length > 0 ? this[0] : undefined;
}),
// more optimized version
lastObject: Ember.computed(function() {
return this.length > 0 ? this[this.length-1] : undefined;
}),
// implements Ember.MutableEnumerable
addObject: function(obj) {
if (get(this, 'isFrozen')) throw new Error(Ember.FROZEN_ERROR);
if (isNone(obj)) return this; // nothing to do
var guid = guidFor(obj),
idx = this[guid],
len = get(this, 'length'),
added ;
if (idx>=0 && idx<len && (this[idx] === obj)) return this; // added
added = [obj];
this.enumerableContentWillChange(null, added);
Ember.propertyWillChange(this, 'lastObject');
len = get(this, 'length');
this[guid] = len;
this[len] = obj;
set(this, 'length', len+1);
Ember.propertyDidChange(this, 'lastObject');
this.enumerableContentDidChange(null, added);
return this;
},
// implements Ember.MutableEnumerable
removeObject: function(obj) {
if (get(this, 'isFrozen')) throw new Error(Ember.FROZEN_ERROR);
if (isNone(obj)) return this; // nothing to do
var guid = guidFor(obj),
idx = this[guid],
len = get(this, 'length'),
isFirst = idx === 0,
isLast = idx === len-1,
last, removed;
if (idx>=0 && idx<len && (this[idx] === obj)) {
removed = [obj];
this.enumerableContentWillChange(removed, null);
if (isFirst) { Ember.propertyWillChange(this, 'firstObject'); }
if (isLast) { Ember.propertyWillChange(this, 'lastObject'); }
// swap items - basically move the item to the end so it can be removed
if (idx < len-1) {
last = this[len-1];
this[idx] = last;
this[guidFor(last)] = idx;
}
delete this[guid];
delete this[len-1];
set(this, 'length', len-1);
if (isFirst) { Ember.propertyDidChange(this, 'firstObject'); }
if (isLast) { Ember.propertyDidChange(this, 'lastObject'); }
this.enumerableContentDidChange(removed, null);
}
return this;
},
// optimized version
contains: function(obj) {
return this[guidFor(obj)]>=0;
},
copy: function() {
var C = this.constructor, ret = new C(), loc = get(this, 'length');
set(ret, 'length', loc);
while(--loc>=0) {
ret[loc] = this[loc];
ret[guidFor(this[loc])] = loc;
}
return ret;
},
toString: function() {
var len = this.length, idx, array = [];
for(idx = 0; idx < len; idx++) {
array[idx] = this[idx];
}
return fmt("Ember.Set<%@>", [array.join(',')]);
}
});
| teddyzeenny/ember.js | packages/ember-runtime/lib/system/set.js | JavaScript | mit | 13,316 |
// --------------------------------------------------------------------------------------------------------------------
// <copyright file="BoundingBoxWireFrameVisual3D.cs" company="Helix Toolkit">
// Copyright (c) 2014 Helix Toolkit contributors
// </copyright>
// <summary>
// A visual element that shows a wireframe for the specified bounding box.
// </summary>
// --------------------------------------------------------------------------------------------------------------------
namespace HelixToolkit.Wpf
{
using System;
using System.Collections.Generic;
using System.Windows;
using System.Windows.Media.Media3D;
/// <summary>
/// A visual element that shows a wireframe for the specified bounding box.
/// </summary>
public class BoundingBoxWireFrameVisual3D : LinesVisual3D
{
/// <summary>
/// Identifies the <see cref="BoundingBox"/> dependency property.
/// </summary>
public static readonly DependencyProperty BoundingBoxProperty = DependencyProperty.Register(
"BoundingBox", typeof(Rect3D), typeof(BoundingBoxWireFrameVisual3D), new UIPropertyMetadata(new Rect3D(), BoxChanged));
/// <summary>
/// Gets or sets the bounding box.
/// </summary>
/// <value> The bounding box. </value>
public Rect3D BoundingBox
{
get
{
return (Rect3D)this.GetValue(BoundingBoxProperty);
}
set
{
this.SetValue(BoundingBoxProperty, value);
}
}
/// <summary>
/// Updates the box.
/// </summary>
protected virtual void OnBoxChanged()
{
if (this.BoundingBox.IsEmpty)
{
this.Points = null;
return;
}
var points = new List<Point3D>();
var bb = this.BoundingBox;
var p0 = new Point3D(bb.X, bb.Y, bb.Z);
var p1 = new Point3D(bb.X, bb.Y + bb.SizeY, bb.Z);
var p2 = new Point3D(bb.X + bb.SizeX, bb.Y + bb.SizeY, bb.Z);
var p3 = new Point3D(bb.X + bb.SizeX, bb.Y, bb.Z);
var p4 = new Point3D(bb.X, bb.Y, bb.Z + bb.SizeZ);
var p5 = new Point3D(bb.X, bb.Y + bb.SizeY, bb.Z + bb.SizeZ);
var p6 = new Point3D(bb.X + bb.SizeX, bb.Y + bb.SizeY, bb.Z + bb.SizeZ);
var p7 = new Point3D(bb.X + bb.SizeX, bb.Y, bb.Z + bb.SizeZ);
Action<Point3D, Point3D> addEdge = (p, q) =>
{
points.Add(p);
points.Add(q);
};
addEdge(p0, p1);
addEdge(p1, p2);
addEdge(p2, p3);
addEdge(p3, p0);
addEdge(p4, p5);
addEdge(p5, p6);
addEdge(p6, p7);
addEdge(p7, p4);
addEdge(p0, p4);
addEdge(p1, p5);
addEdge(p2, p6);
addEdge(p3, p7);
this.Points = points;
}
/// <summary>
/// Called when the box dimensions changed.
/// </summary>
/// <param name="d">
/// The sender.
/// </param>
/// <param name="e">
/// The event arguments.
/// </param>
private static void BoxChanged(DependencyObject d, DependencyPropertyChangedEventArgs e)
{
((BoundingBoxWireFrameVisual3D)d).OnBoxChanged();
}
}
} | DynamoDS/helix-toolkit | Source/HelixToolkit.Wpf/Visual3Ds/ScreenSpaceVisuals/BoundingBoxWireFrameVisual3D.cs | C# | mit | 3,576 |
//
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
//
using UnityEngine;
using System;
using System.Collections.Generic;
namespace HUX.Utility
{
public class AABBTree<T> where T : class
{
#region Private Classes
/// <summary>
/// Node class for the tree. CAn be either a branch or a leaf.
/// </summary>
private class AABBNode
{
#region Public Static Functions
/// <summary>
/// Creates a node containing both bounds.
/// </summary>
/// <param name="rightBounds"></param>
/// <param name="leftBounds"></param>
/// <returns></returns>
public static AABBNode CreateNode(Bounds rightBounds, Bounds leftBounds)
{
AABBNode newNode = new AABBNode();
newNode.Bounds = rightBounds.ExpandToContian(leftBounds);
return newNode;
}
/// <summary>
/// Creates a node with the bounds and data.
/// </summary>
/// <param name="bounds"></param>
/// <param name="data"></param>
/// <returns></returns>
public static AABBNode CreateNode(Bounds bounds, T data)
{
AABBNode newNode = new AABBNode();
newNode.Bounds = bounds;
newNode.UserData = data;
// Determine if we want a margin bounds;
return newNode;
}
#endregion
//-----------------------------------------------------------------------------------------------------------
#region Public Variables
/// <summary>
/// Children of this node.
/// </summary>
public AABBNode[] Children = new AABBNode[2];
/// <summary>
/// The Axis Aligned Bounding Box for this node.
/// </summary>
public Bounds Bounds;
/// <summary>
/// User Data for this node.
/// </summary>
public T UserData;
#endregion
//-----------------------------------------------------------------------------------------------------------
#region Private Variables
/// <summary>
/// A weak reference to the parent so the tree will get cleaned up if the root node is no longer referenced.
/// </summary>
private WeakReference m_ParentRef;
#endregion
//-----------------------------------------------------------------------------------------------------------
#region Accessors
/// <summary>
/// True if this is a branch node with no children assigned.
/// </summary>
public bool IsLeaf
{
get
{
return Children[0] == null && Children[1] == null;
}
}
/// <summary>
/// Accessor for setting/getting the parent.
/// </summary>
public AABBNode Parent
{
get
{
return m_ParentRef != null && m_ParentRef.IsAlive ? m_ParentRef.Target as AABBNode : null;
}
set
{
if (value == null)
{
m_ParentRef = null;
}
else
{
m_ParentRef = new WeakReference(value);
}
}
}
#endregion
//-----------------------------------------------------------------------------------------------------------
#region Public Functions
/// <summary>
/// Sets the children for this node.
/// </summary>
/// <param name="child1"></param>
/// <param name="child2"></param>
public void SetChildren(AABBNode child1, AABBNode child2)
{
child1.Parent = this;
child2.Parent = this;
Children[0] = child1;
Children[1] = child2;
}
/// <summary>
/// Sets the bounds to the size of both children.
/// </summary>
public void RebuildBounds()
{
Bounds = Children[0].Bounds.ExpandToContian(Children[1].Bounds);
}
#endregion
}
#endregion
//-----------------------------------------------------------------------------------------------------------
#region Private Variables
/// <summary>
/// The root node of the tree.
/// </summary>
private AABBNode m_RootNode;
#endregion
//-----------------------------------------------------------------------------------------------------------
#region Public Functions
/// <summary>
/// Creates a new node with the provided bounds.
/// </summary>
/// <param name="bounds"></param>
/// <param name="data"></param>
public void Insert(Bounds bounds, T data)
{
AABBNode newNode = AABBNode.CreateNode(bounds, data);
if (m_RootNode == null)
{
m_RootNode = newNode;
}
else
{
RecursiveInsert(m_RootNode, newNode);
}
}
/// <summary>
/// Removes the node containing data.
/// </summary>
/// <param name="data"></param>
public void Remove(T data)
{
AABBNode node = FindNode(data);
RemoveNode(node);
}
/// <summary>
/// Removes the node with the bounds. If two nodes have the exact same bounds only the first one found will be removed.
/// </summary>
/// <param name="bounds"></param>
public void Remove(Bounds bounds)
{
AABBNode node = FindNode(bounds);
RemoveNode(node);
}
/// <summary>
/// Destroys all nodes in the tree.
/// </summary>
public void Clear()
{
// All we need to do is remove the root node reference. The garbage collector will do the rest.
m_RootNode = null;
}
#endregion
//-----------------------------------------------------------------------------------------------------------
#region Private Functions
/// <summary>
/// Recursively Insert the new Node until we hit a leaf node. Then branch and insert both nodes.
/// </summary>
/// <param name="currentNode"></param>
/// <param name="newNode"></param>
private void RecursiveInsert(AABBNode currentNode, AABBNode newNode)
{
AABBNode branch = currentNode;
if (currentNode.IsLeaf)
{
branch = AABBNode.CreateNode(currentNode.Bounds, newNode.Bounds);
branch.Parent = currentNode.Parent;
if (currentNode == m_RootNode)
{
m_RootNode = branch;
}
else
{
branch.Parent.Children[branch.Parent.Children[0] == currentNode ? 0 : 1] = branch;
}
branch.SetChildren(currentNode, newNode);
}
else
{
Bounds withChild1 = branch.Children[0].Bounds.ExpandToContian(newNode.Bounds);
Bounds withChild2 = branch.Children[1].Bounds.ExpandToContian(newNode.Bounds);
float volume1 = withChild1.Volume();
float volume2 = withChild2.Volume();
RecursiveInsert((volume1 <= volume2) ? branch.Children[0] : branch.Children[1], newNode);
}
branch.RebuildBounds();
}
/// <summary>
/// Finds the node that has the assigned user data.
/// </summary>
/// <param name="userData"></param>
/// <returns></returns>
private AABBNode FindNode(T userData)
{
AABBNode foundNode = null;
List<AABBNode> nodesToSearch = new List<AABBNode>();
nodesToSearch.Add(m_RootNode);
while (nodesToSearch.Count > 0)
{
AABBNode currentNode = nodesToSearch[0];
nodesToSearch.RemoveAt(0);
if (currentNode.UserData == userData)
{
foundNode = currentNode;
break;
}
else if (!currentNode.IsLeaf)
{
nodesToSearch.AddRange(currentNode.Children);
}
}
return foundNode;
}
/// <summary>
/// Finds the leaf node that matches bounds.
/// </summary>
/// <param name="bounds"></param>
/// <returns></returns>
private AABBNode FindNode(Bounds bounds)
{
AABBNode foundNode = null;
AABBNode currentNode = m_RootNode;
while (currentNode != null)
{
if (currentNode.IsLeaf)
{
foundNode = currentNode.Bounds == bounds ? currentNode : null;
break;
}
else
{
//Which child node if any would the bounds be in?
if (currentNode.Children[0].Bounds.ContainsBounds(bounds))
{
currentNode = currentNode.Children[0];
}
else if (currentNode.Children[1].Bounds.ContainsBounds(bounds))
{
currentNode = currentNode.Children[1];
}
else
{
currentNode = null;
}
}
}
return foundNode;
}
/// <summary>
/// Removes a node from the tree
/// </summary>
/// <param name="node"></param>
private void RemoveNode(AABBNode node)
{
AABBNode nodeParent = node.Parent;
if (node == m_RootNode)
{
m_RootNode = null;
}
else
{
AABBNode otherChild = nodeParent.Children[0] == node ? nodeParent.Children[1] : nodeParent.Children[0];
if (nodeParent.Parent == null)
{
m_RootNode = otherChild;
otherChild.Parent = null;
}
else
{
int childIndex = nodeParent.Parent.Children[0] == nodeParent ? 0 : 1;
nodeParent.Parent.Children[childIndex] = otherChild;
otherChild.Parent = nodeParent.Parent;
}
UpdateNodeBoundUp(otherChild.Parent);
}
}
/// <summary>
/// Updates the bounds nonleaf node object moving up the Parent tree to Root.
/// </summary>
/// <param name="node"></param>
private void UpdateNodeBoundUp(AABBNode node)
{
if (node != null)
{
if (!node.IsLeaf)
{
node.RebuildBounds();
}
UpdateNodeBoundUp(node.Parent);
}
}
#endregion
}
}
| AllBecomesGood/Share-UpdateHolograms | ShareAndKeepSynced/Assets/MRDesignLab/HUX/Scripts/Utility/AABBTree.cs | C# | mit | 8,882 |
/*!
* Bootstrap-select v1.13.2 (https://developer.snapappointments.com/bootstrap-select)
*
* Copyright 2012-2018 SnapAppointments, LLC
* Licensed under MIT (https://github.com/snapappointments/bootstrap-select/blob/master/LICENSE)
*/
(function (root, factory) {
if (root === undefined && window !== undefined) root = window;
if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module unless amdModuleId is set
define(["jquery"], function (a0) {
return (factory(a0));
});
} else if (typeof module === 'object' && module.exports) {
// Node. Does not work with strict CommonJS, but
// only CommonJS-like environments that support module.exports,
// like Node.
module.exports = factory(require("jquery"));
} else {
factory(root["jQuery"]);
}
}(this, function (jQuery) {
(function ($) {
$.fn.selectpicker.defaults = {
noneSelectedText: 'Нищо избрано',
noneResultsText: 'Няма резултат за {0}',
countSelectedText: function (numSelected, numTotal) {
return (numSelected == 1) ? "{0} избран елемент" : "{0} избрани елемента";
},
maxOptionsText: function (numAll, numGroup) {
return [
(numAll == 1) ? 'Лимита е достигнат ({n} елемент максимум)' : 'Лимита е достигнат ({n} елемента максимум)',
(numGroup == 1) ? 'Груповия лимит е достигнат ({n} елемент максимум)' : 'Груповия лимит е достигнат ({n} елемента максимум)'
];
},
selectAllText: 'Избери всички',
deselectAllText: 'Размаркирай всички',
multipleSeparator: ', '
};
})(jQuery);
}));
| extend1994/cdnjs | ajax/libs/bootstrap-select/1.13.2/js/i18n/defaults-bg_BG.js | JavaScript | mit | 1,838 |
require 'faraday'
require 'json'
module Twitter
module REST
module Response
class ParseJson < Faraday::Response::Middleware
WHITESPACE_REGEX = /\A^\s*$\z/
def parse(body)
case body
when WHITESPACE_REGEX, nil
nil
else
JSON.parse(body, :symbolize_names => true)
end
end
def on_complete(response)
response.body = parse(response.body) if respond_to?(:parse) && !unparsable_status_codes.include?(response.status)
end
def unparsable_status_codes
[204, 301, 302, 304]
end
end
end
end
end
Faraday::Response.register_middleware :twitter_parse_json => Twitter::REST::Response::ParseJson
| yukisako/omiyage | vendor/bundle/ruby/2.2.0/gems/twitter-5.15.0/lib/twitter/rest/response/parse_json.rb | Ruby | mit | 746 |
/*
* This file is part of SpongeAPI, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.api.event.item.inventory;
import org.spongepowered.api.entity.living.Living;
import org.spongepowered.api.event.Cancellable;
import org.spongepowered.api.item.inventory.ItemStack;
public interface ChangeInventoryEvent extends TargetInventoryEvent, AffectSlotEvent, Cancellable {
/**
* Fired when a {@link Living} changes it's equipment.
*/
interface Equipment extends ChangeInventoryEvent {}
/**
* Fired when a {@link Living} changes it's held {@link ItemStack}.
*/
interface Held extends ChangeInventoryEvent {}
interface Transfer extends ChangeInventoryEvent {}
interface Pickup extends ChangeInventoryEvent {}
}
| kashike/SpongeAPI | src/main/java/org/spongepowered/api/event/item/inventory/ChangeInventoryEvent.java | Java | mit | 1,942 |
/*******************************************************************************
* Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved.
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0
* which accompanies this distribution.
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Oracle - initial API and implementation from Oracle TopLink
******************************************************************************/
package org.eclipse.persistence.tools.workbench.framework.context;
import org.eclipse.persistence.tools.workbench.framework.resources.IconResourceFileNameMap;
import org.eclipse.persistence.tools.workbench.framework.resources.ResourceRepository;
import org.eclipse.persistence.tools.workbench.framework.resources.ResourceRepositoryWrapper;
/**
* Wrap another context and expand its resource
* repository with a resource repository wrapper.
*/
public class ExpandedResourceRepositoryApplicationContext extends ApplicationContextWrapper {
private ResourceRepository expandedResourceRepository;
// ********** constructor/initialization **********
/**
* Construct a context with an expanded resource repository
* that adds the resources in the specified resource bundle and icon map
* to the original resource repository.
*/
public ExpandedResourceRepositoryApplicationContext(ApplicationContext delegate, Class resourceBundleClass, IconResourceFileNameMap iconResourceFileNameMap) {
super(delegate);
this.expandedResourceRepository = new ResourceRepositoryWrapper(this.delegateResourceRepository(), resourceBundleClass, iconResourceFileNameMap);
}
// ********** non-delegated behavior **********
/**
* @see ApplicationContextWrapper#getResourceRepository()
*/
public ResourceRepository getResourceRepository() {
return this.expandedResourceRepository;
}
// ********** additional behavior **********
/**
* Return the original, unwrapped resource repository.
*/
public ResourceRepository delegateResourceRepository() {
return this.getDelegate().getResourceRepository();
}
}
| RallySoftware/eclipselink.runtime | utils/eclipselink.utils.workbench/framework/source/org/eclipse/persistence/tools/workbench/framework/context/ExpandedResourceRepositoryApplicationContext.java | Java | epl-1.0 | 2,433 |
/*******************************************************************************
* Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved.
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0
* which accompanies this distribution.
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Oracle - initial API and implementation from Oracle TopLink
******************************************************************************/
package org.eclipse.persistence.mappings.transformers;
import org.eclipse.persistence.sessions.Session;
import org.eclipse.persistence.core.mappings.transformers.CoreFieldTransformer;
import org.eclipse.persistence.mappings.foundation.AbstractTransformationMapping;
/**
* PUBLIC:
* This interface is used by the Transformation Mapping to build the value for a
* specific field. The user must provide implementations of this interface to the
* Transformation Mapping.
* @author mmacivor
* @since 10.1.3
*/
public interface FieldTransformer extends CoreFieldTransformer<Session> {
/**
* Initialize this transformer. Only required if the user needs some special
* information from the mapping in order to do the transformation
* @param mapping - the mapping this transformer is associated with.
*/
public void initialize(AbstractTransformationMapping mapping);
/**
* @param instance - an instance of the domain class which contains the attribute
* @param session - the current session
* @param fieldName - the name of the field being transformed. Used if the user wants to use this transformer for multiple fields.
* @return - The value to be written for the field associated with this transformer
*/
@Override
public Object buildFieldValue(Object instance, String fieldName, Session session);
}
| RallySoftware/eclipselink.runtime | foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/mappings/transformers/FieldTransformer.java | Java | epl-1.0 | 2,097 |
/*******************************************************************************
* Copyright (c) 1998, 2015 Oracle and/or its affiliates. All rights reserved.
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0
* which accompanies this distribution.
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Oracle - initial API and implementation from Oracle TopLink
******************************************************************************/
package org.eclipse.persistence.testing.tests.writing;
import org.eclipse.persistence.testing.framework.*;
import org.eclipse.persistence.descriptors.ClassDescriptor;
import org.eclipse.persistence.sessions.*;
import org.eclipse.persistence.sessions.server.ClientSession;
import org.eclipse.persistence.testing.framework.WriteObjectTest;
/**
* Test changing private parts of an object.
*/
public class ComplexUpdateTest extends WriteObjectTest {
/** The object which is actually changed */
public Object workingCopy;
public boolean usesUnitOfWork = false;
public boolean usesNestedUnitOfWork = false;
public boolean shouldCommitParent = false;
/** TODO: Set this to true, and fix issues from tests that fail. */
public boolean shouldCompareClone = true;
public ComplexUpdateTest() {
super();
}
public ComplexUpdateTest(Object originalObject) {
super(originalObject);
}
protected void changeObject() {
// By default do nothing
}
public void commitParentUnitOfWork() {
useNestedUnitOfWork();
this.shouldCommitParent = true;
}
public String getName() {
return super.getName() + new Boolean(usesUnitOfWork) + new Boolean(usesNestedUnitOfWork);
}
public void reset() {
if (getExecutor().getSession().isUnitOfWork()) {
getExecutor().setSession(((UnitOfWork)getSession()).getParent());
// Do the same for nested units of work.
if (getExecutor().getSession().isUnitOfWork()) {
getExecutor().setSession(((UnitOfWork)getSession()).getParent());
}
}
super.reset();
}
protected void setup() {
super.setup();
if (this.usesUnitOfWork) {
getExecutor().setSession(getSession().acquireUnitOfWork());
if (this.usesNestedUnitOfWork) {
getExecutor().setSession(getSession().acquireUnitOfWork());
}
this.workingCopy = ((UnitOfWork)getSession()).registerObject(this.objectToBeWritten);
} else {
this.workingCopy = this.objectToBeWritten;
}
}
protected void test() {
changeObject();
if (this.usesUnitOfWork) {
// Ensure that the original has not been changed.
if (!((UnitOfWork)getSession()).getParent().compareObjects(this.originalObject, this.objectToBeWritten)) {
throw new TestErrorException("The original object was changed through changing the clone.");
}
((UnitOfWork)getSession()).commit();
getExecutor().setSession(((UnitOfWork)getSession()).getParent());
if (this.usesNestedUnitOfWork) {
if (this.shouldCommitParent) {
((UnitOfWork)getSession()).commit();
}
getExecutor().setSession(((UnitOfWork)getSession()).getParent());
}
// Ensure that the clone matches the cache.
if (this.shouldCompareClone) {
ClassDescriptor descriptor = getSession().getClassDescriptor(this.objectToBeWritten);
if(descriptor.shouldIsolateObjectsInUnitOfWork()) {
getSession().logMessage("ComplexUpdateTest: descriptor.shouldIsolateObjectsInUnitOfWork() == null. In this case object's changes are not merged back into parent's cache");
} else if (descriptor.shouldIsolateProtectedObjectsInUnitOfWork() && getSession().isClientSession()){
if (!getAbstractSession().compareObjects(this.workingCopy, ((ClientSession)getSession()).getParent().getIdentityMapAccessor().getFromIdentityMap(this.workingCopy))) {
throw new TestErrorException("The clone does not match the cached object.");
}
}
else {
if (!getAbstractSession().compareObjects(this.workingCopy, this.objectToBeWritten)) {
throw new TestErrorException("The clone does not match the cached object.");
}
}
}
} else {
super.test();
}
}
public void useNestedUnitOfWork() {
this.usesNestedUnitOfWork = true;
this.usesUnitOfWork = true;
}
}
| RallySoftware/eclipselink.runtime | foundation/eclipselink.core.test/src/org/eclipse/persistence/testing/tests/writing/ComplexUpdateTest.java | Java | epl-1.0 | 5,045 |
<?php
/*
Plugin Name: Black Studio TinyMCE Widget
Plugin URI: https://wordpress.org/plugins/black-studio-tinymce-widget/
Description: Adds a new "Visual Editor" widget type based on the native WordPress TinyMCE editor.
Version: 2.2.10
Author: Black Studio
Author URI: http://www.blackstudio.it
Requires at least: 3.1
Tested up to: 4.5
License: GPLv3
Text Domain: black-studio-tinymce-widget
Domain Path: /languages
*/
// Exit if accessed directly
if ( ! defined( 'ABSPATH' ) ) {
exit;
}
/**
* Main plugin class
*
* @package Black_Studio_TinyMCE_Widget
* @since 2.0.0
*/
if ( ! class_exists( 'Black_Studio_TinyMCE_Plugin' ) ) {
final class Black_Studio_TinyMCE_Plugin {
/**
* Plugin version
*
* @var string
* @since 2.0.0
*/
public static $version = '2.2.10';
/**
* The single instance of the plugin class
*
* @var object
* @since 2.0.0
*/
protected static $_instance = null;
/**
* Instance of admin class
*
* @var object
* @since 2.0.0
*/
protected static $admin = null;
/**
* Instance of admin pointer class
*
* @var object
* @since 2.1.0
*/
protected static $admin_pointer = null;
/**
* Instance of compatibility class
*
* @var object
* @since 2.0.0
*/
protected static $compatibility = null;
/**
* Instance of the text filters class
*
* @var object
* @since 2.0.0
*/
protected static $text_filters = null;
/**
* Return the main plugin instance
*
* @return object
* @since 2.0.0
*/
public static function instance() {
if ( is_null( self::$_instance ) ) {
self::$_instance = new self();
}
return self::$_instance;
}
/**
* Return the instance of the admin class
*
* @return object
* @since 2.0.0
*/
public static function admin() {
return self::$admin;
}
/**
* Return the instance of the admin pointer class
*
* @return object
* @since 2.1.0
*/
public static function admin_pointer() {
return self::$admin_pointer;
}
/**
* Return the instance of the compatibility class
*
* @return object
* @since 2.0.0
*/
public static function compatibility() {
return self::$compatibility;
}
/**
* Return the instance of the text filters class
*
* @return object
* @since 2.0.0
*/
public static function text_filters() {
return self::$text_filters;
}
/**
* Get plugin version
*
* @return string
* @since 2.0.0
*/
public static function get_version() {
return self::$version;
}
/**
* Get plugin basename
*
* @uses plugin_basename()
*
* @return string
* @since 2.0.0
*/
public static function get_basename() {
return plugin_basename( __FILE__ );
}
/**
* Class constructor
*
* @uses add_action()
* @uses add_filter()
* @uses get_option()
* @uses get_bloginfo()
*
* @global object $wp_embed
* @since 2.0.0
*/
protected function __construct() {
// Include required files
include_once( plugin_dir_path( __FILE__ ) . 'includes/class-widget.php' );
// Include and instantiate admin class on admin pages
if ( is_admin() ) {
include_once( plugin_dir_path( __FILE__ ) . 'includes/class-admin.php' );
self::$admin = Black_Studio_TinyMCE_Admin::instance();
include_once( plugin_dir_path( __FILE__ ) . 'includes/class-admin-pointer.php' );
self::$admin_pointer = Black_Studio_TinyMCE_Admin_Pointer::instance();
}
// Include and instantiate text filter class on frontend pages
else {
include_once( plugin_dir_path( __FILE__ ) . 'includes/class-text-filters.php' );
self::$text_filters = Black_Studio_TinyMCE_Text_Filters::instance();
}
// Register action and filter hooks
add_action( 'plugins_loaded', array( $this, 'load_compatibility' ), 50 );
add_action( 'widgets_init', array( $this, 'widgets_init' ) );
}
/**
* Prevent the class from being cloned
*
* @return void
* @since 2.0.0
*/
protected function __clone() {
_doing_it_wrong( __FUNCTION__, __( 'Cheatin’ uh?' ), '2.0' );
}
/**
* Load compatibility class
*
* @uses apply_filters()
* @uses get_bloginfo()
* @uses plugin_dir_path()
*
* @return void
* @since 2.0.0
*/
public function load_compatibility() {
// Compatibility load flag (for both deprecated functions and code for compatibility with other plugins)
$load_compatibility = apply_filters( 'black_studio_tinymce_load_compatibility', true );
if ( $load_compatibility ) {
include_once( plugin_dir_path( __FILE__ ) . 'includes/class-compatibility.php' );
self::$compatibility = Black_Studio_TinyMCE_Compatibility::instance();
}
}
/**
* Widget initialization
*
* @uses is_blog_installed()
* @uses register_widget()
*
* @return null|void
* @since 2.0.0
*/
public function widgets_init() {
if ( ! is_blog_installed() ) {
return;
}
register_widget( 'WP_Widget_Black_Studio_TinyMCE' );
}
/**
* Check if a widget is a Black Studio Tinyme Widget instance
*
* @param object $widget
* @return boolean
* @since 2.0.0
*/
public function check_widget( $widget ) {
return 'object' == gettype( $widget ) && ( 'WP_Widget_Black_Studio_TinyMCE' == get_class( $widget ) || is_subclass_of( $widget , 'WP_Widget_Black_Studio_TinyMCE' ) );
}
} // END class Black_Studio_TinyMCE_Plugin
} // END class_exists check
if ( ! function_exists( 'bstw' ) ) {
/**
* Return the main instance to prevent the need to use globals
*
* @return object
* @since 2.0.0
*/
function bstw() {
return Black_Studio_TinyMCE_Plugin::instance();
}
/* Create the main instance */
bstw();
} // END function_exists bstw check
else {
/* Check for multiple plugin instances */
if ( ! function_exists( 'bstw_multiple_notice' ) ) {
/**
* Show admin notice when multiple instances of the plugin are detected
*
* @return void
* @since 2.1.0
*/
function bstw_multiple_notice() {
global $pagenow;
if ( 'widgets.php' == $pagenow ) {
echo '<div class="error">';
/* translators: error message shown when multiple instance of the plugin are detected */
echo '<p>' . esc_html( __( 'ERROR: Multiple instances of the Black Studio TinyMCE Widget plugin were detected. Please activate only one instance at a time.', 'black-studio-tinymce-widget' ) ) . '</p>';
echo '</div>';
}
}
add_action( 'admin_notices', 'bstw_multiple_notice' );
} // END function_exists bstw_multiple_notice check
} // END else function_exists bstw check
| bbiehl/The-Bronze-Horse | wp-content/plugins/black-studio-tinymce-widget/black-studio-tinymce-widget.php | PHP | gpl-2.0 | 6,595 |
/*! PopUp Free - v4.7.11
* https://wordpress.org/plugins/wordpress-popup/
* Copyright (c) 2015; * Licensed GPLv2+ */
/*global window:false */
/*global document:false */
/*global wp:false */
/*global wpmUi:false */
/*global ace:false */
/**
* Admin Javascript functions for PopUp
*/
jQuery(function init_admin() {
// ----- POPUP EDITOR --
// Disables dragging of metaboxes: Users cannot change the metabox order.
function disable_metabox_dragging() {
var boxes = jQuery( '.meta-box-sortables' ),
handles = jQuery( '.postbox .hndle' );
if ( ! boxes.length ) { return; }
boxes.sortable({
disabled: true
});
handles.css( 'cursor', 'pointer' );
}
// Keeps the submitdiv always visible, even when scrolling.
function scrolling_submitdiv() {
var scroll_top,
top_offset,
submitdiv = jQuery( '#submitdiv' ),
postbody = jQuery( '#post-body' ),
body = jQuery( 'body' ),
padding = 20;
if ( ! submitdiv.length ) { return; }
top_offset = submitdiv.position().top;
var small_make_sticky = function() {
if ( ! body.hasClass( 'sticky-submit' ) ) {
body.addClass( 'sticky-submit' );
submitdiv.css({ 'marginTop': 0 } );
submitdiv.find( '.sticky-actions' ).show();
submitdiv.find( '.non-sticky' ).hide();
}
};
var small_remove_sticky = function() {
if ( body.hasClass( 'sticky-submit' ) ) {
body.removeClass( 'sticky-submit' );
submitdiv.find( '.sticky-actions' ).hide();
submitdiv.find( '.non-sticky' ).show();
}
};
jQuery( window ).resize(function() {
var is_small = jQuery( window ).width() <= 850;
if ( is_small ) {
if ( ! body.hasClass( 'po-small' ) ) {
body.addClass( 'po-small' );
}
} else {
if ( body.hasClass( 'po-small' ) ) {
body.removeClass( 'po-small' );
small_remove_sticky();
}
}
}).scroll(function(){
if ( postbody.hasClass( 'columns-1' ) || body.hasClass( 'po-small' ) ) {
// 1-column view:
// The div stays as sticky toolbar when scrolling down.
scroll_top = jQuery( window ).scrollTop() - top_offset;
if ( scroll_top > 0 ) {
small_make_sticky();
} else {
small_remove_sticky();
}
} else {
// 2-column view:
// The div scrolls with the page to stay visible.
scroll_top = jQuery( window ).scrollTop() - top_offset + padding;
if ( scroll_top > 0 ) {
submitdiv.css({ 'marginTop': scroll_top } );
} else {
submitdiv.css({ 'marginTop': 0 } );
}
}
});
window.setTimeout( function() {
jQuery( window ).trigger( 'scroll' );
}, 100 );
}
// Change the text-fields to colorpicker fields.
function init_colorpicker() {
var inp = jQuery( '.colorpicker' );
if ( ! inp.length || 'function' !== typeof inp.wpColorPicker ) { return; }
var maybe_hide_picker = function maybe_hide_picker( ev ) {
var el = jQuery( ev.target ),
cp = el.closest( '.wp-picker-container' ),
me = cp.find( '.colorpicker' ),
do_hide = jQuery( '.colorpicker' );
if ( cp.length ) {
do_hide = do_hide.not( me );
}
do_hide.each( function() {
var picker = jQuery( this ),
wrap = picker.closest( '.wp-picker-container' );
picker.iris( 'hide' );
// As mentioned: Color picker does not like to hide properly...
picker.hide();
wrap.find( '.wp-picker-clear').addClass( 'hidden' );
wrap.find( '.wp-picker-open').removeClass( 'wp-picker-open' );
});
};
inp.wpColorPicker();
// Don't ask why the handler is hooked three times ;-)
// The Color picker is a bit bitchy when it comes to hiding it...
jQuery( document ).on( 'mousedown', maybe_hide_picker );
jQuery( document ).on( 'click', maybe_hide_picker );
jQuery( document ).on( 'mouseup', maybe_hide_picker );
}
// Add event handlers for editor UI controls (i.e. to checkboxes)
function init_edit_controls() {
var chk_colors = jQuery( '#po-custom-colors' ),
chk_size = jQuery( '#po-custom-size' ),
opt_display = jQuery( '[name=po_display]' ),
chk_can_hide = jQuery( '#po-can-hide' ),
chk_close_hides = jQuery( '#po-close-hides' );
if ( ! chk_colors.length ) { return; }
var toggle_section = function toggle_section() {
var group,
me = jQuery( this ),
sel = me.data( 'toggle' ),
sect = jQuery( sel ),
group_or = me.data( 'or' ),
group_and = me.data( 'and' ),
is_active = false;
if ( group_or ) {
group = jQuery( group_or );
is_active = ( group.filter( ':checked' ).length > 0);
} else if ( group_and ) {
group = jQuery( group_and );
is_active = ( group.length === group.filter( ':checked' ).length );
} else {
is_active = me.prop( 'checked' );
}
if ( is_active ) {
sect.removeClass( 'inactive' );
sect.find( 'input,select,textarea,a' )
.prop( 'readonly', false )
.removeClass( 'disabled' );
} else {
sect.addClass( 'inactive' );
// Do NOT set .prop('disabled', true)!
sect.find( 'input,select,textarea,a' )
.prop( 'readonly', true )
.addClass( 'disabled' );
}
sect.addClass( 'inactive-anim' );
};
var toggle_section_group = function toggle_section_group() {
var me = jQuery( this ),
name = me.attr( 'name' ),
group = jQuery( '[name="' + name + '"]' );
group.each(function() {
toggle_section.call( this );
});
};
var create_sliders = function create_sliders() {
jQuery( '.slider' ).each(function() {
var me = jQuery( this ),
wrap = me.closest( '.slider-wrap' ),
inp_base = me.data( 'input' ),
inp_min = wrap.find( inp_base + 'min' ),
inp_max = wrap.find( inp_base + 'max' ),
min_input = wrap.find( '.slider-min-input' ),
min_ignore = wrap.find( '.slider-min-ignore' ),
max_input = wrap.find( '.slider-max-input' ),
max_ignore = wrap.find( '.slider-max-ignore' ),
min = me.data( 'min' ),
max = me.data( 'max' );
if ( isNaN( min ) ) { min = 0; }
if ( isNaN( max ) ) { max = 9999; }
inp_min.prop( 'readonly', true );
inp_max.prop( 'readonly', true );
var update_fields = function update_fields( val1, val2 ) {
inp_min.val( val1 );
inp_max.val( val2 );
if ( val1 === min ) {
min_input.hide();
min_ignore.show();
} else {
min_input.show();
min_ignore.hide();
}
if ( val2 === max ) {
max_input.hide();
max_ignore.show();
} else {
max_input.show();
max_ignore.hide();
}
};
me.slider({
range: true,
min: min,
max: max,
values: [ inp_min.val(), inp_max.val() ],
slide: function( event, ui ) {
update_fields( ui.values[0], ui.values[1] );
}
});
update_fields( inp_min.val(), inp_max.val() );
});
};
chk_colors.click( toggle_section );
chk_size.click( toggle_section );
chk_can_hide.click( toggle_section );
chk_close_hides.click( toggle_section );
opt_display.click( toggle_section_group );
toggle_section.call( chk_colors );
toggle_section.call( chk_size );
toggle_section.call( chk_can_hide );
toggle_section.call( chk_close_hides );
opt_display.each(function() {
toggle_section.call( jQuery( this ) );
});
create_sliders();
}
// Toggle rules on/off
function init_rules() {
var all_rules = jQuery( '#meta-rules .all-rules' ),
active_rules = jQuery( '#meta-rules .active-rules' );
if ( ! all_rules.length ) { return; }
var toggle_checkbox = function toggle_checkbox( ev ) {
var me = jQuery( ev.target ),
chk = me.find( 'input.wpmui-toggle-checkbox' );
if ( me.closest( '.wpmui-toggle' ).length ) { return; }
if ( me.hasClass( 'inactive' ) ) { return false; }
chk.trigger( 'click' );
};
var toggle_rule = function toggle_rule() {
var me = jQuery( this ),
rule = me.closest( '.rule' ),
sel = me.data( 'form' ),
form = active_rules.find( sel ),
active = me.prop( 'checked' );
if ( active ) {
rule.removeClass( 'off' ).addClass( 'on' );
form.removeClass( 'off' ).addClass( 'on open' );
} else {
rule.removeClass( 'on' ).addClass( 'off' );
form.removeClass( 'on' ).addClass( 'off' );
}
exclude_rules( me, active );
};
var exclude_rules = function exclude_rules( checkbox, active ) {
var ind, excl1, excl2,
excl = checkbox.data( 'exclude' ),
keys = (excl ? excl.split( ',' ) : []);
// Exclude other rules.
for ( ind = keys.length - 1; ind >= 0; ind -= 1 ) {
excl1 = all_rules.find( '.rule-' + keys[ ind ] );
excl2 = active_rules.find( '#po-rule-' + keys[ ind ] );
if ( excl1.hasClass( 'on' ) ) {
// Rule is active; possibly migrated from old PopUp editor
// so we cannot disable the rule now...
continue;
}
excl1.prop( 'disabled', active );
if ( active ) {
excl1.addClass( 'inactive off' ).removeClass( 'on' );
excl2.addClass( 'off' ).removeClass( 'on' );
} else {
excl1.removeClass( 'inactive off' );
}
}
};
var toggle_form = function toggle_form() {
var me = jQuery( this ),
form = me.closest( '.rule' );
form.toggleClass( 'open' );
};
all_rules.find( 'input.wpmui-toggle-checkbox' ).click( toggle_rule );
all_rules.find( '.rule' ).click( toggle_checkbox );
active_rules.on( 'click', '.rule-title,.rule-toggle', toggle_form );
// Exclude rules.
all_rules.find( '.rule.on input.wpmui-toggle-checkbox' ).each(function() {
exclude_rules( jQuery( this ), true );
});
jQuery( '.init-loading' ).removeClass( 'wpmui-loading' );
}
// Hook up the "Featured image" button.
function init_image() {
// Uploading files
var box = jQuery( '.content-image' ),
btn = box.find( '.add_image' ),
dropzone = box.find( '.featured-img' ),
reset = box.find( '.reset' ),
inp = box.find( '.po-image' ),
img_preview = box.find( '.img-preview' ),
img_label = box.find( '.lbl-empty' ),
img_pos = box.find( '.img-pos' ),
file_frame;
// User selected an image (via drag-drop or file_frame)
var use_image = function use_image( url ) {
inp.val( url );
img_preview.attr( 'src', url ).show();
img_label.hide();
img_pos.show();
dropzone.addClass( 'has-image' );
};
// User selected an image (via drag-drop or file_frame)
var reset_image = function reset_image( url ) {
inp.val( '' );
img_preview.attr( 'src', '' ).hide();
img_label.show();
img_pos.hide();
dropzone.removeClass( 'has-image' );
};
// User clicks on the "Add image" button.
var select_clicked = function select_clicked( ev ) {
ev.preventDefault();
// If the media frame already exists, reopen it.
if ( file_frame ) {
file_frame.open();
return;
}
// Create the media frame.
file_frame = wp.media.frames.file_frame = wp.media({
title: btn.attr( 'data-title' ),
button: {
text: btn.attr( 'data-button' )
},
multiple: false // Set to true to allow multiple files to be selected
});
// When an image is selected, run a callback.
file_frame.on( 'select', function() {
// We set multiple to false so only get one image from the uploader
var attachment = file_frame.state().get('selection').first().toJSON();
// Do something with attachment.id and/or attachment.url here
use_image( attachment.url );
});
// Finally, open the modal
file_frame.open();
};
var select_pos = function select_pos( ev ) {
var me = jQuery( this );
img_pos.find( '.option' ).removeClass( 'selected' );
me.addClass( 'selected' );
};
btn.on( 'click', select_clicked );
reset.on( 'click', reset_image );
img_pos.on( 'click', '.option', select_pos );
}
// ----- POPUP LIST --
// Adds custom bulk actions to the popup list.
function bulk_actions() {
var key,
ba1 = jQuery( 'select[name="action"] '),
ba2 = jQuery( 'select[name="action2"] ');
if ( ! ba1.length || 'object' !== typeof window.po_bulk ) { return; }
for ( key in window.po_bulk ) {
jQuery( '<option>' )
.val( key )
.text( window.po_bulk[key] )
.appendTo( ba1 )
.clone()
.appendTo( ba2 );
}
}
// Makes the post-list sortable (to change popup-order)
function sortable_list() {
var table = jQuery( 'table.posts' ),
tbody = table.find( '#the-list' );
if ( ! tbody.length ) { return; }
var ajax_done = function ajax_done( resp, okay ) {
table.removeClass( 'wpmui-loading' );
if ( okay ) {
for ( var id in resp ) {
if ( ! resp.hasOwnProperty( id ) ) { continue; }
tbody.find( '#post-' + id + ' .the-pos' ).text( resp[id] );
}
}
};
var save_order = function save_order( event, ui ) {
var i,
rows = tbody.find('tr'),
order = [];
for ( i = 0; i < rows.length; i+= 1 ) {
order.push( jQuery( rows[i] ).attr( 'id' ) );
}
table.addClass( 'wpmui-loading' );
wpmUi.ajax( null, 'po-ajax' )
.data({
'do': 'order',
'order': order
})
.ondone( ajax_done )
.load_json();
};
tbody.sortable({
placeholder: 'ui-sortable-placeholder',
axis: 'y',
handle: '.column-po_order',
helper: 'clone',
opacity: 0.75,
update: save_order
});
tbody.disableSelection();
}
// Shows a preview of the current PopUp.
function init_preview() {
var doc = jQuery( document ),
body = jQuery( '#wpcontent' );
var handle_list_click = function handle_list_click( ev ) {
var me = jQuery( this ),
po_id = me.data( 'id' );
ev.preventDefault();
if ( undefined === window.inc_popup ) { return false; }
body.addClass( 'wpmui-loading' );
window.inc_popup.load( po_id );
return false;
};
var handle_editor_click = function handle_editor_click( ev ) {
var data,
me = jQuery( this ),
form = jQuery( '#post' ),
ajax = wpmUi.ajax();
ev.preventDefault();
if ( undefined === window.inc_popup ) { return false; }
data = ajax.extract_data( form );
body.addClass( 'wpmui-loading' );
window.inc_popup.load( 0, data );
return false;
};
var show_popup = function show_popup( ev, popup ) {
body.removeClass( 'wpmui-loading' );
popup.init();
};
doc.on( 'click', '.posts .po-preview', handle_list_click );
doc.on( 'click', '#post .preview', handle_editor_click );
doc.on( 'popup-initialized', show_popup );
}
// Initialize the CSS editor
function init_css_editor() {
jQuery('.po_css_editor').each(function(){
var editor = ace.edit(this.id);
jQuery(this).data('editor', editor);
editor.setTheme('ace/theme/chrome');
editor.getSession().setMode('ace/mode/css');
editor.getSession().setUseWrapMode(true);
editor.getSession().setUseWrapMode(false);
});
jQuery('.po_css_editor').each(function(){
var self = this,
input = jQuery( jQuery(this).data('input') );
jQuery(this).data('editor').getSession().on('change', function () {
input.val( jQuery(self).data('editor').getSession().getValue() );
});
});
}
if ( ! jQuery( 'body.post-type-inc_popup' ).length ) {
return;
}
// EDITOR
if ( jQuery( 'body.post-php' ).length || jQuery( 'body.post-new-php' ).length ) {
disable_metabox_dragging();
scrolling_submitdiv();
init_colorpicker();
init_edit_controls();
init_rules();
init_preview();
init_image();
init_css_editor();
wpmUi.upgrade_multiselect();
}
// POPUP LIST
else if ( jQuery( 'body.edit-php' ).length ) {
sortable_list();
bulk_actions();
init_preview();
}
}); | iAPT/producerroom | wp-content/plugins/wordpress-popup/js/popup-admin.js | JavaScript | gpl-2.0 | 15,341 |
goog.provide('ol.source.ImageVector');
goog.require('goog.asserts');
goog.require('goog.events');
goog.require('goog.events.EventType');
goog.require('goog.vec.Mat4');
goog.require('ol.dom');
goog.require('ol.extent');
goog.require('ol.render.canvas.ReplayGroup');
goog.require('ol.renderer.vector');
goog.require('ol.source.ImageCanvas');
goog.require('ol.source.Vector');
goog.require('ol.style.Style');
goog.require('ol.vec.Mat4');
/**
* @classdesc
* An image source whose images are canvas elements into which vector features
* read from a vector source (`ol.source.Vector`) are drawn. An
* `ol.source.ImageVector` object is to be used as the `source` of an image
* layer (`ol.layer.Image`). Image layers are rotated, scaled, and translated,
* as opposed to being re-rendered, during animations and interactions. So, like
* any other image layer, an image layer configured with an
* `ol.source.ImageVector` will exhibit this behaviour. This is in contrast to a
* vector layer, where vector features are re-drawn during animations and
* interactions.
*
* @constructor
* @extends {ol.source.ImageCanvas}
* @param {olx.source.ImageVectorOptions} options Options.
* @api
*/
ol.source.ImageVector = function(options) {
/**
* @private
* @type {ol.source.Vector}
*/
this.source_ = options.source;
/**
* @private
* @type {!goog.vec.Mat4.Number}
*/
this.transform_ = goog.vec.Mat4.createNumber();
/**
* @private
* @type {CanvasRenderingContext2D}
*/
this.canvasContext_ = ol.dom.createCanvasContext2D();
/**
* @private
* @type {ol.Size}
*/
this.canvasSize_ = [0, 0];
/**
* @private
* @type {ol.render.canvas.ReplayGroup}
*/
this.replayGroup_ = null;
goog.base(this, {
attributions: options.attributions,
canvasFunction: goog.bind(this.canvasFunctionInternal_, this),
logo: options.logo,
projection: options.projection,
ratio: options.ratio,
resolutions: options.resolutions,
state: this.source_.getState()
});
/**
* User provided style.
* @type {ol.style.Style|Array.<ol.style.Style>|ol.style.StyleFunction}
* @private
*/
this.style_ = null;
/**
* Style function for use within the library.
* @type {ol.style.StyleFunction|undefined}
* @private
*/
this.styleFunction_ = undefined;
this.setStyle(options.style);
goog.events.listen(this.source_, goog.events.EventType.CHANGE,
this.handleSourceChange_, undefined, this);
};
goog.inherits(ol.source.ImageVector, ol.source.ImageCanvas);
/**
* @param {ol.Extent} extent Extent.
* @param {number} resolution Resolution.
* @param {number} pixelRatio Pixel ratio.
* @param {ol.Size} size Size.
* @param {ol.proj.Projection} projection Projection;
* @return {HTMLCanvasElement} Canvas element.
* @private
*/
ol.source.ImageVector.prototype.canvasFunctionInternal_ =
function(extent, resolution, pixelRatio, size, projection) {
var replayGroup = new ol.render.canvas.ReplayGroup(
ol.renderer.vector.getTolerance(resolution, pixelRatio), extent,
resolution);
this.source_.loadFeatures(extent, resolution, projection);
var loading = false;
this.source_.forEachFeatureInExtentAtResolution(extent, resolution,
/**
* @param {ol.Feature} feature Feature.
*/
function(feature) {
loading = loading ||
this.renderFeature_(feature, resolution, pixelRatio, replayGroup);
}, this);
replayGroup.finish();
if (loading) {
return null;
}
if (this.canvasSize_[0] != size[0] || this.canvasSize_[1] != size[1]) {
this.canvasContext_.canvas.width = size[0];
this.canvasContext_.canvas.height = size[1];
this.canvasSize_[0] = size[0];
this.canvasSize_[1] = size[1];
} else {
this.canvasContext_.clearRect(0, 0, size[0], size[1]);
}
var transform = this.getTransform_(ol.extent.getCenter(extent),
resolution, pixelRatio, size);
replayGroup.replay(this.canvasContext_, pixelRatio, transform, 0, {});
this.replayGroup_ = replayGroup;
return this.canvasContext_.canvas;
};
/**
* @inheritDoc
*/
ol.source.ImageVector.prototype.forEachFeatureAtCoordinate = function(
coordinate, resolution, rotation, skippedFeatureUids, callback) {
if (goog.isNull(this.replayGroup_)) {
return undefined;
} else {
/** @type {Object.<string, boolean>} */
var features = {};
return this.replayGroup_.forEachFeatureAtCoordinate(
coordinate, resolution, 0, skippedFeatureUids,
/**
* @param {ol.Feature} feature Feature.
* @return {?} Callback result.
*/
function(feature) {
goog.asserts.assert(goog.isDef(feature));
var key = goog.getUid(feature).toString();
if (!(key in features)) {
features[key] = true;
return callback(feature);
}
});
}
};
/**
* Get a reference to the wrapped source.
* @return {ol.source.Vector} Source.
* @api
*/
ol.source.ImageVector.prototype.getSource = function() {
return this.source_;
};
/**
* Get the style for features. This returns whatever was passed to the `style`
* option at construction or to the `setStyle` method.
* @return {ol.style.Style|Array.<ol.style.Style>|ol.style.StyleFunction}
* Layer style.
* @api stable
*/
ol.source.ImageVector.prototype.getStyle = function() {
return this.style_;
};
/**
* Get the style function.
* @return {ol.style.StyleFunction|undefined} Layer style function.
* @api stable
*/
ol.source.ImageVector.prototype.getStyleFunction = function() {
return this.styleFunction_;
};
/**
* @param {ol.Coordinate} center Center.
* @param {number} resolution Resolution.
* @param {number} pixelRatio Pixel ratio.
* @param {ol.Size} size Size.
* @return {!goog.vec.Mat4.Number} Transform.
* @private
*/
ol.source.ImageVector.prototype.getTransform_ =
function(center, resolution, pixelRatio, size) {
return ol.vec.Mat4.makeTransform2D(this.transform_,
size[0] / 2, size[1] / 2,
pixelRatio / resolution, -pixelRatio / resolution,
0,
-center[0], -center[1]);
};
/**
* Handle changes in image style state.
* @param {goog.events.Event} event Image style change event.
* @private
*/
ol.source.ImageVector.prototype.handleImageChange_ =
function(event) {
this.changed();
};
/**
* @private
*/
ol.source.ImageVector.prototype.handleSourceChange_ = function() {
// setState will trigger a CHANGE event, so we always rely
// change events by calling setState.
this.setState(this.source_.getState());
};
/**
* @param {ol.Feature} feature Feature.
* @param {number} resolution Resolution.
* @param {number} pixelRatio Pixel ratio.
* @param {ol.render.canvas.ReplayGroup} replayGroup Replay group.
* @return {boolean} `true` if an image is loading.
* @private
*/
ol.source.ImageVector.prototype.renderFeature_ =
function(feature, resolution, pixelRatio, replayGroup) {
var styles;
if (goog.isDef(feature.getStyleFunction())) {
styles = feature.getStyleFunction().call(feature, resolution);
} else if (goog.isDef(this.styleFunction_)) {
styles = this.styleFunction_(feature, resolution);
}
if (!goog.isDefAndNotNull(styles)) {
return false;
}
var i, ii, loading = false;
for (i = 0, ii = styles.length; i < ii; ++i) {
loading = ol.renderer.vector.renderFeature(
replayGroup, feature, styles[i],
ol.renderer.vector.getSquaredTolerance(resolution, pixelRatio),
this.handleImageChange_, this) || loading;
}
return loading;
};
/**
* Set the style for features. This can be a single style object, an array
* of styles, or a function that takes a feature and resolution and returns
* an array of styles. If it is `undefined` the default style is used. If
* it is `null` the layer has no style (a `null` style), so only features
* that have their own styles will be rendered in the layer. See
* {@link ol.style} for information on the default style.
* @param {ol.style.Style|Array.<ol.style.Style>|ol.style.StyleFunction|undefined}
* style Layer style.
* @api stable
*/
ol.source.ImageVector.prototype.setStyle = function(style) {
this.style_ = goog.isDef(style) ? style : ol.style.defaultStyleFunction;
this.styleFunction_ = goog.isNull(style) ?
undefined : ol.style.createStyleFunction(this.style_);
this.changed();
};
| henriquespedro/Autarquia-Livre | vendor/openlayers/ol/ol/source/imagevectorsource.js | JavaScript | gpl-2.0 | 8,407 |
/*
* OCaml Support For IntelliJ Platform.
* Copyright (C) 2010 Maxim Manuylov
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/gpl-2.0.html>.
*/
package manuylov.maxim.ocaml.lang.parser.psi.element.impl;
import com.intellij.lang.ASTNode;
import manuylov.maxim.ocaml.lang.parser.psi.OCamlElementVisitor;
import manuylov.maxim.ocaml.lang.parser.psi.element.OCamlParenthesesTypeParameters;
import org.jetbrains.annotations.NotNull;
/**
* @author Maxim.Manuylov
* Date: 13.05.2010
*/
public class OCamlParenthesesTypeParametersImpl extends OCamlParenthesesImpl implements OCamlParenthesesTypeParameters {
public OCamlParenthesesTypeParametersImpl(@NotNull final ASTNode node) {
super(node);
}
public void visit(@NotNull final OCamlElementVisitor visitor) {
visitor.visitParenthesesTypeParameters(this);
}
}
| emmeryn/intellij-ocaml | OCamlSources/src/manuylov/maxim/ocaml/lang/parser/psi/element/impl/OCamlParenthesesTypeParametersImpl.java | Java | gpl-2.0 | 1,487 |
//
// Copyright (c) 2004-2006 Jaroslaw Kowalski <jaak@jkowalski.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
using System;
using System.IO;
using System.Text;
using System.Xml;
using System.Reflection;
using System.Collections;
using System.Diagnostics;
using NLog.Internal;
using NLog.Config;
using NLog.Conditions;
using System.Collections.Generic;
namespace NLog.Targets.Wrappers
{
/// <summary>
/// A target wrapper that filters buffered log entries based on a set of conditions
/// that are evaluated on all events.
/// </summary>
/// <remarks>
/// PostFilteringWrapper must be used with some type of buffering target or wrapper, such as
/// AsyncTargetWrapper, BufferingWrapper or ASPNetBufferingWrapper.
/// </remarks>
/// <example>
/// <p>
/// This example works like this. If there are no Warn,Error or Fatal messages in the buffer
/// only Info messages are written to the file, but if there are any warnings or errors,
/// the output includes detailed trace (levels >= Debug). You can plug in a different type
/// of buffering wrapper (such as ASPNetBufferingWrapper) to achieve different
/// functionality.
/// </p>
/// <p>
/// To set up the target in the <a href="config.html">configuration file</a>,
/// use the following syntax:
/// </p>
/// <code lang="XML" src="examples/targets/Configuration File/PostFilteringWrapper/NLog.config" />
/// <p>
/// The above examples assume just one target and a single rule. See below for
/// a programmatic configuration that's equivalent to the above config file:
/// </p>
/// <code lang="C#" src="examples/targets/Configuration API/PostFilteringWrapper/Simple/Example.cs" />
/// </example>
[Target("PostFilteringWrapper", IgnoresLayout = true, IsWrapper = true)]
public class PostFilteringTargetWrapper: WrapperTargetBase
{
private ConditionExpression _defaultFilter;
private FilteringRuleCollection _rules = new FilteringRuleCollection();
/// <summary>
/// Creates a new instance of <see cref="PostFilteringTargetWrapper"/>.
/// </summary>
public PostFilteringTargetWrapper()
{
}
/// <summary>
/// Default filter to be applied when no specific rule matches.
/// </summary>
public string DefaultFilter
{
get { return _defaultFilter.ToString(); }
set { _defaultFilter = ConditionParser.ParseExpression(value); }
}
/// <summary>
/// Collection of filtering rules. The rules are processed top-down
/// and the first rule that matches determines the filtering condition to
/// be applied to log events.
/// </summary>
[ArrayParameter(typeof(FilteringRule), "when")]
public FilteringRuleCollection Rules
{
get { return _rules; }
}
/// <summary>
/// Evaluates all filtering rules to find the first one that matches.
/// The matching rule determines the filtering condition to be applied
/// to all items in a buffer. If no condition matches, default filter
/// is applied to the array of log events.
/// </summary>
/// <param name="logEvents">Array of log events to be post-filtered.</param>
public override void Write(LogEventInfo[] logEvents)
{
ConditionExpression resultFilter = null;
if (InternalLogger.IsTraceEnabled)
{
InternalLogger.Trace("Input: {0} events", logEvents.Length);
}
// evaluate all the rules to get the filtering condition
for (int i = 0; i < logEvents.Length; ++i)
{
for (int j = 0; j < _rules.Count; ++j)
{
object v = _rules[j].ExistsCondition.Evaluate(logEvents[i]);
if (v is bool && (bool)v)
{
if (InternalLogger.IsTraceEnabled)
InternalLogger.Trace("Rule matched: {0}", _rules[j].ExistsCondition);
resultFilter = _rules[j].FilterCondition;
break;
}
}
if (resultFilter != null)
break;
}
if (resultFilter == null)
resultFilter = _defaultFilter;
if (InternalLogger.IsTraceEnabled)
InternalLogger.Trace("Filter to apply: {0}", resultFilter);
// apply the condition to the buffer
List<LogEventInfo> resultBuffer = new List<LogEventInfo>();
for (int i = 0; i < logEvents.Length; ++i)
{
object v = resultFilter.Evaluate(logEvents[i]);
if (v is bool && (bool)v)
resultBuffer.Add(logEvents[i]);
}
if (InternalLogger.IsTraceEnabled)
InternalLogger.Trace("After filtering: {0} events", resultBuffer.Count);
if (resultBuffer.Count > 0)
{
WrappedTarget.Write(resultBuffer.ToArray());
}
}
/// <summary>
/// Processes a single log event. Not very useful for this post-filtering
/// wrapper.
/// </summary>
/// <param name="logEvent">Log event.</param>
public override void Write(LogEventInfo logEvent)
{
Write(new LogEventInfo[] { logEvent });
}
/// <summary>
/// Adds all layouts used by this target to the specified collection.
/// </summary>
/// <param name="layouts">The collection to add layouts to.</param>
public override void PopulateLayouts(LayoutCollection layouts)
{
base.PopulateLayouts(layouts);
foreach (FilteringRule fr in Rules)
{
fr.FilterCondition.PopulateLayouts(layouts);
fr.ExistsCondition.PopulateLayouts(layouts);
}
_defaultFilter.PopulateLayouts(layouts);
}
}
} | WCell/WCell | Libraries/Source/NLog/Targets/Wrappers/PostFilteringWrapper.cs | C# | gpl-2.0 | 7,654 |
//# LCExtension.cc: Extend an LCRegion along straight lines to other dimensions
//# Copyright (C) 1998,2001
//# Associated Universities, Inc. Washington DC, USA.
//#
//# This library is free software; you can redistribute it and/or modify it
//# under the terms of the GNU Library General Public License as published by
//# the Free Software Foundation; either version 2 of the License, or (at your
//# option) any later version.
//#
//# This library is distributed in the hope that it will be useful, but WITHOUT
//# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
//# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
//# License for more details.
//#
//# You should have received a copy of the GNU Library General Public License
//# along with this library; if not, write to the Free Software Foundation,
//# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
//#
//# Correspondence concerning AIPS++ should be addressed as follows:
//# Internet email: aips2-request@nrao.edu.
//# Postal address: AIPS++ Project Office
//# National Radio Astronomy Observatory
//# 520 Edgemont Road
//# Charlottesville, VA 22903-2475 USA
//#
//# $Id$
#include <casacore/lattices/LRegions/LCExtension.h>
#include <casacore/lattices/LRegions/LCBox.h>
#include <casacore/casa/Arrays/Vector.h>
#include <casacore/tables/Tables/TableRecord.h>
#include <casacore/casa/Utilities/GenSort.h>
#include <casacore/casa/Exceptions/Error.h>
namespace casacore { //# NAMESPACE CASACORE - BEGIN
LCExtension::LCExtension()
{}
LCExtension::LCExtension (const LCRegion& region,
const IPosition& extendAxes,
const LCBox& extendBox)
: LCRegionMulti (True, region.cloneRegion())
{
// Fill the other members variables and determine the bounding box.
fill (extendAxes, extendBox);
}
LCExtension::LCExtension (Bool takeOver,
const LCRegion* region,
const IPosition& extendAxes,
const LCBox& extendBox)
: LCRegionMulti (takeOver, region)
{
// Fill the other members variables and determine the bounding box.
fill (extendAxes, extendBox);
}
LCExtension::LCExtension (const LCExtension& other)
: LCRegionMulti (other),
itsExtendAxes (other.itsExtendAxes),
itsRegionAxes (other.itsRegionAxes),
itsExtendBox (other.itsExtendBox)
{}
LCExtension::~LCExtension()
{}
LCExtension& LCExtension::operator= (const LCExtension& other)
{
if (this != &other) {
LCRegionMulti::operator= (other);
itsExtendAxes.resize (other.itsExtendAxes.nelements());
itsRegionAxes.resize (other.itsRegionAxes.nelements());
itsExtendAxes = other.itsExtendAxes;
itsRegionAxes = other.itsRegionAxes;
itsExtendBox = other.itsExtendBox;
}
return *this;
}
Bool LCExtension::operator== (const LCRegion& other) const
{
// Check if parent class matches.
// If so, we can safely cast.
if (! LCRegionMulti::operator== (other)) {
return False;
}
const LCExtension& that = (const LCExtension&)other;
// Check the private data
if (! itsExtendAxes.isEqual (that.itsExtendAxes)
|| ! itsRegionAxes.isEqual (that.itsRegionAxes)
|| !(itsExtendBox == that.itsExtendBox)) {
return False;
}
return True;
}
LCRegion* LCExtension::cloneRegion() const
{
return new LCExtension (*this);
}
LCRegion* LCExtension::doTranslate (const Vector<Float>& translateVector,
const IPosition& newLatticeShape) const
{
uInt i;
// First translate the extendBox.
// Take appropriate elements from the vectors.
uInt nre = itsExtendAxes.nelements();
Vector<Float> boxTransVec (nre);
IPosition boxLatShape (nre);
for (i=0; i<nre; i++) {
uInt axis = itsExtendAxes(i);
boxTransVec(i) = translateVector(axis);
boxLatShape(i) = newLatticeShape(axis);
}
LCBox* boxPtr = (LCBox*)(itsExtendBox.translate (boxTransVec, boxLatShape));
// Now translate the region.
uInt nrr = itsRegionAxes.nelements();
Vector<Float> regTransVec (nrr);
IPosition regLatShape (nrr);
for (i=0; i<nrr; i++) {
uInt axis = itsRegionAxes(i);
regTransVec(i) = translateVector(axis);
regLatShape(i) = newLatticeShape(axis);
}
LCRegion* regPtr = region().translate (regTransVec, regLatShape);
// Create the new LCExtension object.
LCExtension* extPtr = new LCExtension (*regPtr, itsExtendAxes, *boxPtr);
delete boxPtr;
delete regPtr;
return extPtr;
}
String LCExtension::className()
{
return "LCExtension";
}
String LCExtension::type() const
{
return className();
}
TableRecord LCExtension::toRecord (const String& tableName) const
{
TableRecord rec;
defineRecordFields (rec, className());
rec.defineRecord ("region", region().toRecord (tableName));
rec.define ("axes", itsExtendAxes.asVector());
rec.defineRecord ("box", itsExtendBox.toRecord (tableName));
return rec;
}
LCExtension* LCExtension::fromRecord (const TableRecord& rec,
const String& tableName)
{
// Initialize pointers to 0 to get rid of gcc-2.95 warnings.
LCRegion* regPtr = 0;
regPtr = LCRegion::fromRecord (rec.asRecord("region"), tableName);
LCBox* boxPtr = 0;
boxPtr = (LCBox*)(LCRegion::fromRecord (rec.asRecord("box"), tableName));
LCExtension* extPtr = new LCExtension (True, regPtr,
Vector<Int>(rec.toArrayInt ("axes")),
*boxPtr);
delete boxPtr;
return extPtr;
}
void LCExtension::fillRegionAxes()
{
uInt nre = itsExtendAxes.nelements();
uInt nrr = region().ndim();
uInt nrdim = nre+nrr;
// allAxes will get the remaining (thus region) axes at the end.
IPosition allAxes = IPosition::makeAxisPath (nrdim, itsExtendAxes);
itsRegionAxes.resize (nrr);
for (uInt i=nre; i<nrdim; i++) {
uInt axis = allAxes(i);
itsRegionAxes(i-nre) = axis;
}
}
void LCExtension::fill (const IPosition& extendAxes, const LCBox& extendBox)
{
// Check if extend axes are specified correctly.
// They do not need to be in ascending order, but duplicates are
// not allowed.
IPosition regionShape = region().shape();
uInt nre = extendAxes.nelements();
if (nre == 0) {
throw (AipsError ("LCExtension::LCExtension - "
"no extend axes have been specified"));
}
if (nre != extendBox.blc().nelements()) {
throw (AipsError ("LCExtension::LCExtension - "
"number of axes in extend box mismatches "
"number of extend axes"));
}
// The axes can be specified in any order. We want them ordered.
// So sort them and fill itsExtendAxes and itsExtendBox.
itsExtendAxes.resize (nre);
IPosition boxLatShape(nre);
Vector<Float> boxLatBlc(nre);
Vector<Float> boxLatTrc(nre);
Vector<uInt> reginx(nre);
GenSortIndirect<ssize_t>::sort (reginx, extendAxes.storage(), nre);
Int first = -1;
for (uInt i=0; i<nre; i++) {
uInt axis = reginx(i);
itsExtendAxes(i) = extendAxes(axis);
boxLatShape(i) = extendBox.latticeShape()(axis);
boxLatBlc(i) = extendBox.blc()(axis);
boxLatTrc(i) = extendBox.trc()(axis);
if (itsExtendAxes(i) <= first) {
throw (AipsError ("LCExtension::LCExtension - "
"extend axes multiply specified"));
}
first = itsExtendAxes(i);
}
itsExtendBox = LCBox (boxLatBlc, boxLatTrc, boxLatShape);
// Fill itsRegionAxes, i.e. the mapping of the axis of the contributing
// region into the extended region.
fillRegionAxes();
// Make up the lattice shape from the region and box latticeshape.
// Fill the bounding box from blc/trc in region and box.
uInt nrr = itsRegionAxes.nelements();
uInt nrdim = nre+nrr;
IPosition latShape(nrdim);
IPosition blc (nrdim);
IPosition trc (nrdim);
const IPosition& regionShp = region().latticeShape();
const IPosition& regionBlc = region().boundingBox().start();
const IPosition& regionTrc = region().boundingBox().end();
for (uInt i=0; i<nrr; i++) {
uInt axis = itsRegionAxes(i);
latShape(axis) = regionShp(i);
blc(axis) = regionBlc(i);
trc(axis) = regionTrc(i);
}
const IPosition& boxShp = itsExtendBox.latticeShape();
const IPosition& boxBlc = itsExtendBox.boundingBox().start();
const IPosition& boxTrc = itsExtendBox.boundingBox().end();
for (uInt i=0; i<nre; i++) {
uInt axis = itsExtendAxes(i);
latShape(axis) = boxShp(i);
blc(axis) = boxBlc(i);
trc(axis) = boxTrc(i);
}
setShapeAndBoundingBox (latShape, Slicer(blc, trc, Slicer::endIsLast));
fillHasMask();
}
void LCExtension::multiGetSlice (Array<Bool>& buffer,
const Slicer& section)
{
buffer.resize (section.length());
uInt i;
uInt nre = itsExtendAxes.nelements();
uInt nrr = itsRegionAxes.nelements();
// Read the required region section.
// This means we have to create a Slicer for those axes only.
IPosition blc(nrr);
IPosition len(nrr);
IPosition inc(nrr);
IPosition shape(buffer.ndim(), 1);
for (i=0; i<nrr; i++) {
uInt axis = itsRegionAxes(i);
blc(i) = section.start()(axis);
len(i) = section.length()(axis);
inc(i) = section.stride()(axis);
shape(axis) = len(i);
}
Array<Bool> tmpbuf(len);
LCRegion* reg = (LCRegion*)(regions()[0]);
reg->doGetSlice (tmpbuf, Slicer(blc, len, inc));
// Reform tmpbuf, so it has the same dimensionality as buffer.
Array<Bool> mask = tmpbuf.reform (shape);
// Now we have to extend tmpbuf along all extend axes.
const IPosition& length = section.length();
IPosition pos (buffer.ndim(), 0);
IPosition end (buffer.shape() - 1);
//# Iterate along itsExtendAxes (the new axes) through the new mask.
for (;;) {
for (i=0; i<nre; i++) {
end(itsExtendAxes(i)) = pos(itsExtendAxes(i));
}
//# Set each section of the mask to the mask of the region.
buffer(pos,end) = mask;
//# Go to the next section.
for (i=0; i<nre; i++) {
if (++pos(itsExtendAxes(i)) < length(itsExtendAxes(i))) {
break;
}
// This dimension is done. Reset it and continue with the next.
pos(itsExtendAxes(i)) = 0;
}
//# End the iteration when all dimensions are done.
if (i == nre) {
break;
}
}
}
IPosition LCExtension::doNiceCursorShape (uInt maxPixels) const
{
return Lattice<Bool>::doNiceCursorShape (maxPixels);
}
} //# NAMESPACE CASACORE - END
| bmerry/casacore | lattices/LRegions/LCExtension.cc | C++ | gpl-2.0 | 10,331 |
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-forwprop2" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
class YY { public:
YY(const YY &v) { e[0] = v.e[0]; e[1] = v.e[1]; e[2] = v.e[2]; }
double &y() { return e[1]; }
double e[3]; };
class XX { public:
YY direction() const { return v; }
YY v; };
int foo(XX& r) {
if (r.direction().y() < 0.000001) return 0;
return 1; }
/* { dg-final { scan-tree-dump-times "&this" 0 "forwprop2" } } */
/* { dg-final { scan-tree-dump-times "&r" 0 "forwprop2" } } */
/* { dg-final { cleanup-tree-dump "forwprop2" } } */
| unofficial-opensource-apple/llvmgcc42 | gcc/testsuite/g++.dg/tree-ssa/pr14814.C | C++ | gpl-2.0 | 608 |
// methods (functions of objects)
// see: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/Method_definitions
// http://www.ecma-international.org/ecma-262/6.0/#sec-method-definitions
var Obj = {
myMethod(a, b) {
},
*myGenerator(a, b) {
}
}
| masatake/ctags | Units/parser-javascript.r/js-methods.d/input.js | JavaScript | gpl-2.0 | 284 |
/*
* Copyright (C) 2013 University of Dundee & Open Microscopy Environment.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.openmicroscopy.shoola.keywords;
import java.awt.Component;
import java.awt.Graphics2D;
import java.awt.image.BufferedImage;
import java.awt.image.Raster;
import java.awt.image.RenderedImage;
import java.util.NoSuchElementException;
import javax.swing.JPanel;
import org.robotframework.abbot.finder.BasicFinder;
import org.robotframework.abbot.finder.ComponentNotFoundException;
import org.robotframework.abbot.finder.Matcher;
import org.robotframework.abbot.finder.MultipleComponentsFoundException;
import com.google.common.hash.Hasher;
import com.google.common.hash.Hashing;
/**
* Robot Framework SwingLibrary keyword library offering methods for checking thumbnails.
* @author m.t.b.carroll@dundee.ac.uk
* @since 4.4.9
*/
public class ThumbnailCheckLibrary
{
/** Allow Robot Framework to instantiate this library only once. */
public static final String ROBOT_LIBRARY_SCOPE = "GLOBAL";
/**
* An iterator over the integer pixel values of a rendered image,
* first increasing <em>x</em>, then <em>y</em> when <em>x</em> wraps back to 0.
* This is written so as to be scalable over arbitrary image sizes
* and to not cause heap allocations during the iteration.
* @author m.t.b.carroll@dundee.ac.uk
* @since 4.4.9
*/
private static class IteratorIntPixel {
final Raster raster;
final int width;
final int height;
final int[] pixel = new int[1];
int x = 0;
int y = 0;
/**
* Create a new pixel iterator for the given image.
* The image is assumed to be of a type that packs data for each pixel into an <code>int</code>.
* @param image the image over whose pixels to iterate
*/
IteratorIntPixel(RenderedImage image) {
this.raster = image.getData();
this.width = image.getWidth();
this.height = image.getHeight();
}
/**
* @return if any pixels remain to be read with {@link #next()}
*/
boolean hasNext() {
return y < height;
}
/**
* @return the next pixel
* @throws NoSuchElementException if no more pixels remain
*/
int next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
raster.getDataElements(x, y, pixel);
if (++x == width) {
x = 0;
++y;
}
return pixel[0];
}
}
/**
* Find the thumbnail <code>Component</code> in the AWT hierarchy.
* @param panelType if the thumbnail should be the whole <code>"image node"</code> or just its <code>"thumbnail"</code> canvas
* @param imageFilename the name of the image whose thumbnail is to be rasterized
* @return the AWT <code>Component</code> for the thumbnail
* @throws MultipleComponentsFoundException if multiple thumbnails are for the given image name
* @throws ComponentNotFoundException if no thumbnails are for the given image name
*/
private static Component componentFinder(final String panelType, final String imageFilename)
throws ComponentNotFoundException, MultipleComponentsFoundException {
return new BasicFinder().find(new Matcher() {
private final String soughtName = panelType + " for " + imageFilename;
public boolean matches(Component component) {
return component instanceof JPanel && this.soughtName.equals(component.getName());
}});
}
/**
* Convert the thumbnail for the image of the given filename into rasterized pixel data.
* Each pixel is represented by an <code>int</code>.
* @param panelType if the thumbnail should be the whole <code>"image node"</code> or just its <code>"thumbnail"</code> canvas
* @param imageFilename the name of the image whose thumbnail is to be rasterized
* @return the image on the thumbnail
* @throws MultipleComponentsFoundException if multiple thumbnails are for the given image name
* @throws ComponentNotFoundException if no thumbnails are for the given image name
*/
private static RenderedImage captureImage(final String panelType, final String imageFilename)
throws ComponentNotFoundException, MultipleComponentsFoundException {
final JPanel thumbnail = (JPanel) componentFinder(panelType, imageFilename);
final int width = thumbnail.getWidth();
final int height = thumbnail.getHeight();
final BufferedImage image = new BufferedImage(width, height, StaticFieldLibrary.IMAGE_TYPE);
final Graphics2D graphics = image.createGraphics();
if (graphics == null) {
throw new RuntimeException("thumbnail is not displayable");
}
thumbnail.paint(graphics);
graphics.dispose();
return image;
}
/**
* <table>
* <td>Get Thumbnail Border Color</td>
* <td>name of image whose thumbnail is queried</td>
* </table>
* @param imageFilename the name of the image
* @return the color of the thumbnail's corner pixel
* @throws MultipleComponentsFoundException if multiple thumbnails exist for the given name
* @throws ComponentNotFoundException if no thumbnails exist for the given name
*/
public String getThumbnailBorderColor(String imageFilename)
throws ComponentNotFoundException, MultipleComponentsFoundException {
final RenderedImage image = captureImage("image node", imageFilename);
final IteratorIntPixel pixels = new IteratorIntPixel(image);
if (!pixels.hasNext()) {
throw new RuntimeException("image node has no pixels");
}
return Integer.toHexString(pixels.next());
}
/**
* <table>
* <td>Is Thumbnail Monochromatic</td>
* <td>name of image whose thumbnail is queried</td>
* </table>
* @param imageFilename the name of the image
* @return if the image's thumbnail canvas is solidly one color
* @throws MultipleComponentsFoundException if multiple thumbnails exist for the given name
* @throws ComponentNotFoundException if no thumbnails exist for the given name
*/
public boolean isThumbnailMonochromatic(String imageFilename)
throws ComponentNotFoundException, MultipleComponentsFoundException {
final RenderedImage image = captureImage("thumbnail", imageFilename);
final IteratorIntPixel pixels = new IteratorIntPixel(image);
if (!pixels.hasNext()) {
throw new RuntimeException("thumbnail image has no pixels");
}
final int oneColor = pixels.next();
while (pixels.hasNext()) {
if (pixels.next() != oneColor) {
return false;
}
}
return true;
}
/**
* <table>
* <td>Get Thumbnail Hash</td>
* <td>name of image whose thumbnail is queried</td>
* </table>
* @param imageFilename the name of the image
* @return the hash of the thumbnail canvas image
* @throws MultipleComponentsFoundException if multiple thumbnails exist for the given name
* @throws ComponentNotFoundException if no thumbnails exist for the given name
*/
public String getThumbnailHash(String imageFilename)
throws ComponentNotFoundException, MultipleComponentsFoundException {
final RenderedImage image = captureImage("thumbnail", imageFilename);
final IteratorIntPixel pixels = new IteratorIntPixel(image);
final Hasher hasher = Hashing.goodFastHash(128).newHasher();
while (pixels.hasNext()) {
hasher.putInt(pixels.next());
}
return hasher.hash().toString();
}
/**
* <table>
* <td>Get Name Of Thumbnail For Image</td>
* <td>name of image whose thumbnail is queried</td>
* </table>
* @param imageFilename the name of the image
* @return the return value of the corresponding <code>ThumbnailCanvas.getName()</code>
* @throws MultipleComponentsFoundException if multiple thumbnails exist for the given name
* @throws ComponentNotFoundException if no thumbnails exist for the given name
*/
public String getNameOfThumbnailForImage(final String imageFilename)
throws ComponentNotFoundException, MultipleComponentsFoundException {
return componentFinder("thumbnail", imageFilename).getName();
}
}
| jballanc/openmicroscopy | components/tests/ui/library/java/src/org/openmicroscopy/shoola/keywords/ThumbnailCheckLibrary.java | Java | gpl-2.0 | 9,339 |
<?php
/**
* Akeeba Engine
* The modular PHP5 site backup engine
* @copyright Copyright (c)2009-2013 Nicholas K. Dionysopoulos
* @license GNU GPL version 3 or, at your option, any later version
* @package akeebaengine
*/
// Protection against direct access
defined('AKEEBAENGINE') or die();
/**
* Database interface class.
*
* Based on Joomla! Platform 11.2
*/
interface AEAbstractDriverInterface
{
/**
* Test to see if the connector is available.
*
* @return boolean True on success, false otherwise.
*/
public static function test();
/**
* Test to see if the connector is available.
*
* @return boolean True on success, false otherwise.
*
* @since 11.2
*/
public static function isSupported();
}
/**
* Database driver superclass. Used as the base of all Akeeba Engine database drivers.
* Strongly based on Joomla Platform's JDatabase class.
*/
abstract class AEAbstractDriver extends AEAbstractObject implements AEAbstractDriverInterface
{
/** @var string The name of the database. */
private $_database;
/** @var string The name of the database driver. */
public $name;
/** @var resource The db conenction resource */
protected $connection = '';
/** @var integer The number of SQL statements executed by the database driver. */
protected $count = 0;
/** @var resource The database connection cursor from the last query. */
protected $cursor;
/** @var boolean The database driver debugging state. */
protected $debug = false;
/** @var int Query's limit */
protected $limit = 0;
/** @var array The log of executed SQL statements by the database driver. */
protected $log = array();
/** @var string Quote for named objects */
protected $nameQuote = '';
/** @var string The null or zero representation of a timestamp for the database driver. */
protected $nullDate;
/** @var int Query's offset */
protected $offset = 0;
/** @var array Passed in upon instantiation and saved. */
protected $options;
/** @var mixed The SQL query string */
protected $sql = '';
/** @var string The prefix used in the database, if any */
protected $tablePrefix = '';
/** @var bool Support for UTF-8 */
protected $utf = true;
/** @var int The db server's error number */
protected $errorNum = 0;
/** @var string The db server's error string */
protected $errorMsg = '';
/** @var array JDatabaseDriver instances container. */
protected static $instances = array();
/** @var string The minimum supported database version. */
protected static $dbMinimum;
/** @var string Driver type, e.g. mysql, mssql, pgsql and so on */
protected $driverType = '';
/**
* Splits a string of multiple queries into an array of individual queries.
*
* @param string $query Input SQL string with which to split into individual queries.
*
* @return array The queries from the input string separated into an array.
*/
public static function splitSql($query)
{
$start = 0;
$open = false;
$char = '';
$end = strlen($query);
$queries = array();
for ($i = 0; $i < $end; $i++)
{
$current = substr($query, $i, 1);
if (($current == '"' || $current == '\''))
{
$n = 2;
while (substr($query, $i - $n + 1, 1) == '\\' && $n < $i)
{
$n++;
}
if ($n % 2 == 0)
{
if ($open)
{
if ($current == $char)
{
$open = false;
$char = '';
}
}
else
{
$open = true;
$char = $current;
}
}
}
if (($current == ';' && !$open) || $i == $end - 1)
{
$queries[] = substr($query, $start, ($i - $start + 1));
$start = $i + 1;
}
}
return $queries;
}
/**
* Magic method to provide method alias support for quote() and quoteName().
*
* @param string $method The called method.
* @param array $args The array of arguments passed to the method.
*
* @return string The aliased method's return value or null.
*/
public function __call($method, $args)
{
if (empty($args))
{
return;
}
switch ($method)
{
case 'q':
return $this->quote($args[0], isset($args[1]) ? $args[1] : true);
break;
case 'nq':
case 'qn':
return $this->quoteName($args[0]);
break;
}
}
/**
* Database object constructor
* @param array List of options used to configure the connection
*/
public function __construct( $options )
{
$prefix = array_key_exists('prefix', $options) ? $options['prefix'] : '';
$database = array_key_exists('database', $options) ? $options['database'] : '';
$connection = array_key_exists('connection', $options) ? $options['connection']: null;
$this->tablePrefix = $prefix;
$this->_database = $database;
$this->connection = $connection;
$this->errorNum = 0;
$this->count = 0;
$this->log = array();
$this->options = $options;
}
/**
* Database object destructor
* @return bool
*/
public function __destruct()
{
return $this->close();
}
/**
* By default, when the object is shutting down, the connection is closed
*/
public function _onSerialize()
{
$this->close();
}
public function __wakeup()
{
$this->open();
}
/**
* Alter database's character set, obtaining query string from protected member.
*
* @param string $dbName The database name that will be altered
*
* @return string The query that alter the database query string
*
* @throws RuntimeException
*/
public function alterDbCharacterSet($dbName)
{
if (is_null($dbName))
{
throw new RuntimeException('Database name must not be null.');
}
$this->setQuery($this->getAlterDbCharacterSet($dbName));
return $this->execute();
}
/**
* Opens a database connection. It MUST be overriden by children classes
* @return AEAbstractDriver
*/
public function open()
{
// Don't try to reconnect if we're already connected
if(is_resource($this->connection) && !is_null($this->connection)) return $this;
// Determine utf-8 support
$this->utf = $this->hasUTF();
// Set charactersets (needed for MySQL 4.1.2+)
if ($this->utf){
$this->setUTF();
}
// Select the current database
$this->select($this->_database);
return $this;
}
/**
* Closes the database connection
*/
abstract public function close();
/**
* Determines if the connection to the server is active.
*
* @return boolean True if connected to the database engine.
*/
abstract public function connected();
/**
* Create a new database using information from $options object, obtaining query string
* from protected member.
*
* @param stdClass $options Object used to pass user and database name to database driver.
* This object must have "db_name" and "db_user" set.
* @param boolean $utf True if the database supports the UTF-8 character set.
*
* @return string The query that creates database
*
* @throws RuntimeException
*/
public function createDatabase($options, $utf = true)
{
if (is_null($options))
{
throw new RuntimeException('$options object must not be null.');
}
elseif (empty($options->db_name))
{
throw new RuntimeException('$options object must have db_name set.');
}
elseif (empty($options->db_user))
{
throw new RuntimeException('$options object must have db_user set.');
}
$this->setQuery($this->getCreateDatabaseQuery($options, $utf));
return $this->execute();
}
/**
* Drops a table from the database.
*
* @param string $table The name of the database table to drop.
* @param boolean $ifExists Optionally specify that the table must exist before it is dropped.
*
* @return AEAbstractDriver Returns this object to support chaining.
*/
public abstract function dropTable($table, $ifExists = true);
/**
* Method to escape a string for usage in an SQL statement.
*
* @param string $text The string to be escaped.
* @param boolean $extra Optional parameter to provide extra escaping.
*
* @return string The escaped string.
*/
abstract public function escape($text, $extra = false);
/**
* Method to fetch a row from the result set cursor as an array.
*
* @param mixed $cursor The optional result set cursor from which to fetch the row.
*
* @return mixed Either the next row from the result set or false if there are no more rows.
*/
abstract protected function fetchArray($cursor = null);
/**
* Method to fetch a row from the result set cursor as an associative array.
*
* @param mixed $cursor The optional result set cursor from which to fetch the row.
*
* @return mixed Either the next row from the result set or false if there are no more rows.
*/
abstract public function fetchAssoc($cursor = null);
/**
* Method to fetch a row from the result set cursor as an object.
*
* @param mixed $cursor The optional result set cursor from which to fetch the row.
* @param string $class The class name to use for the returned row object.
*
* @return mixed Either the next row from the result set or false if there are no more rows.
*/
abstract protected function fetchObject($cursor = null, $class = 'stdClass');
/**
* Method to free up the memory used for the result set.
*
* @param mixed $cursor The optional result set cursor from which to fetch the row.
*
* @return void
*/
abstract public function freeResult($cursor = null);
/**
* Get the number of affected rows for the previous executed SQL statement.
*
* @return integer The number of affected rows.
*/
abstract public function getAffectedRows();
/**
* Return the query string to alter the database character set.
*
* @param string $dbName The database name
*
* @return string The query that alter the database query string
*/
protected function getAlterDbCharacterSet($dbName)
{
$query = 'ALTER DATABASE ' . $this->quoteName($dbName) . ' CHARACTER SET `utf8`';
return $query;
}
/**
* Return the query string to create new Database.
* Each database driver, other than MySQL, need to override this member to return correct string.
*
* @param stdClass $options Object used to pass user and database name to database driver.
* This object must have "db_name" and "db_user" set.
* @param boolean $utf True if the database supports the UTF-8 character set.
*
* @return string The query that creates database
*/
protected function getCreateDatabaseQuery($options, $utf)
{
if ($utf)
{
$query = 'CREATE DATABASE ' . $this->quoteName($options->db_name) . ' CHARACTER SET `utf8`';
}
else
{
$query = 'CREATE DATABASE ' . $this->quoteName($options->db_name);
}
return $query;
}
/**
* Method to get the database collation in use by sampling a text field of a table in the database.
*
* @return mixed The collation in use by the database or boolean false if not supported.
*/
abstract public function getCollation();
/**
* Method that provides access to the underlying database connection. Useful for when you need to call a
* proprietary method such as postgresql's lo_* methods.
*
* @return resource The underlying database connection resource.
*/
public function getConnection()
{
return $this->connection;
}
/**
* Inherits the connection of another database driver. Useful for cloning
* the CMS database connection into an Akeeba Engine database driver.
*
* @param resource $connection
*/
public function setConnection($connection)
{
$this->connection = $connection;
}
/**
* Get the total number of SQL statements executed by the database driver.
*
* @return integer
*
* @since 11.1
*/
public function getCount()
{
return $this->count;
}
/**
* Gets the name of the database used by this conneciton.
*
* @return string
*/
protected function getDatabase()
{
return $this->_database;
}
/**
* Returns a PHP date() function compliant date format for the database driver.
*
* @return string The format string.
*/
public function getDateFormat()
{
return 'Y-m-d H:i:s';
}
/**
* Get the database driver SQL statement log.
*
* @return array SQL statements executed by the database driver.
*
* @since 11.1
*/
public function getLog()
{
return $this->log;
}
/**
* Get the minimum supported database version.
*
* @return string The minimum version number for the database driver.
*
* @since 12.1
*/
public function getMinimum()
{
return static::$dbMinimum;
}
/**
* Get the null or zero representation of a timestamp for the database driver.
*
* @return string Null or zero representation of a timestamp.
*/
public function getNullDate()
{
return $this->nullDate;
}
/**
* Get the number of returned rows for the previous executed SQL statement.
*
* @param resource $cursor An optional database cursor resource to extract the row count from.
*
* @return integer The number of returned rows.
*/
abstract public function getNumRows($cursor = null);
/**
* Get the database table prefix
*
* @return string The database prefix
*/
public final function getPrefix()
{
return $this->tablePrefix;
}
/**
* Get the current query object or a new AEAbstractQuery object.
*
* @param boolean $new False to return the current query object, True to return a new AEAbstractQuery object.
*
* @return AEAbstractQuery The current query object or a new object extending the AEAbstractQuery class.
*/
abstract public function getQuery($new = false);
/**
* Retrieves field information about the given tables.
*
* @param string $table The name of the database table.
* @param boolean $typeOnly True (default) to only return field types.
*
* @return array An array of fields by table.
*/
abstract public function getTableColumns($table, $typeOnly = true);
/**
* Shows the table CREATE statement that creates the given tables.
*
* @param mixed $tables A table name or a list of table names.
*
* @return array A list of the create SQL for the tables.
*/
abstract public function getTableCreate($tables);
/**
* Retrieves field information about the given tables.
*
* @param mixed $tables A table name or a list of table names.
*
* @return array An array of keys for the table(s).
*/
abstract public function getTableKeys($tables);
/**
* Method to get an array of all tables in the database.
*
* @return array An array of all the tables in the database.
*/
abstract public function getTableList();
/**
* Determine whether or not the database engine supports UTF-8 character encoding.
*
* @return boolean True if the database engine supports UTF-8 character encoding.
*/
public function getUTFSupport()
{
return $this->utf;
}
/**
* Determine whether or not the database engine supports UTF-8 character encoding.
*
* @return boolean True if the database engine supports UTF-8 character encoding.
*/
public function hasUTFSupport()
{
return $this->utf;
}
/**
* Determines if the database engine supports UTF-8 character encoding.
*
* @return boolean True if supported.
*/
public function hasUTF()
{
return $this->utf;
}
/**
* Get the version of the database connector
*
* @return string The database connector version.
*/
abstract public function getVersion();
/**
* Method to get the auto-incremented value from the last INSERT statement.
*
* @return integer The value of the auto-increment field from the last inserted row.
*/
abstract public function insertid();
/**
* Inserts a row into a table based on an object's properties.
*
* @param string $table The name of the database table to insert into.
* @param object &$object A reference to an object whose public properties match the table fields.
* @param string $key The name of the primary key. If provided the object property is updated.
*
* @return boolean True on success.
*/
public function insertObject($table, &$object, $key = null)
{
$fields = array();
$values = array();
// Iterate over the object variables to build the query fields and values.
foreach (get_object_vars($object) as $k => $v)
{
// Only process non-null scalars.
if (is_array($v) or is_object($v) or $v === null)
{
continue;
}
// Ignore any internal fields.
if ($k[0] == '_')
{
continue;
}
// Prepare and sanitize the fields and values for the database query.
$fields[] = $this->quoteName($k);
$values[] = $this->quote($v);
}
// Create the base insert statement.
$query = $this->getQuery(true)
->insert($this->quoteName($table))
->columns($fields)
->values(implode(',', $values));
// Set the query and execute the insert.
$this->setQuery($query);
if (!$this->execute())
{
return false;
}
// Update the primary key if it exists.
$id = $this->insertid();
if ($key && $id && is_string($key))
{
$object->$key = $id;
}
return true;
}
/**
* Method to check whether the installed database version is supported by the database driver
*
* @return boolean True if the database version is supported
*
* @since 12.1
*/
public function isMinimumVersion()
{
return version_compare($this->getVersion(), static::$dbMinimum) >= 0;
}
/**
* Method to get the first row of the result set from the database query as an associative array
* of ['field_name' => 'row_value'].
*
* @return mixed The return value or null if the query failed.
*/
public function loadAssoc()
{
$ret = null;
// Execute the query and get the result set cursor.
if (!($cursor = $this->execute()))
{
return null;
}
// Get the first row from the result set as an associative array.
if ($array = $this->fetchAssoc($cursor))
{
$ret = $array;
}
// Free up system resources and return.
$this->freeResult($cursor);
return $ret;
}
/**
* Method to get an array of the result set rows from the database query where each row is an associative array
* of ['field_name' => 'row_value']. The array of rows can optionally be keyed by a field name, but defaults to
* a sequential numeric array.
*
* NOTE: Chosing to key the result array by a non-unique field name can result in unwanted
* behavior and should be avoided.
*
* @param string $key The name of a field on which to key the result array.
* @param string $column An optional column name. Instead of the whole row, only this column value will be in
* the result array.
*
* @return mixed The return value or null if the query failed.
*/
public function loadAssocList($key = null, $column = null)
{
$array = array();
// Execute the query and get the result set cursor.
if (!($cursor = $this->execute()))
{
return null;
}
// Get all of the rows from the result set.
while ($row = $this->fetchAssoc($cursor))
{
$value = ($column) ? (isset($row[$column]) ? $row[$column] : $row) : $row;
if ($key)
{
$array[$row[$key]] = $value;
}
else
{
$array[] = $value;
}
}
// Free up system resources and return.
$this->freeResult($cursor);
return $array;
}
/**
* Method to get an array of values from the <var>$offset</var> field in each row of the result set from
* the database query.
*
* @param integer $offset The row offset to use to build the result array.
*
* @return mixed The return value or null if the query failed.
*/
public function loadColumn($offset = 0)
{
$array = array();
// Execute the query and get the result set cursor.
if (!($cursor = $this->execute()))
{
return null;
}
// Get all of the rows from the result set as arrays.
while ($row = $this->fetchArray($cursor))
{
$array[] = $row[$offset];
}
// Free up system resources and return.
$this->freeResult($cursor);
return $array;
}
/**
* Method to get the next row in the result set from the database query as an object.
*
* @param string $class The class name to use for the returned row object.
*
* @return mixed The result of the query as an array, false if there are no more rows.
*/
public function loadNextObject($class = 'stdClass')
{
static $cursor = null;
// Execute the query and get the result set cursor.
if ( is_null($cursor) )
{
if (!($cursor = $this->execute()))
{
return $this->errorNum ? null : false;
}
}
// Get the next row from the result set as an object of type $class.
if ($row = $this->fetchObject($cursor, $class))
{
return $row;
}
// Free up system resources and return.
$this->freeResult($cursor);
$cursor = null;
return false;
}
/**
* Method to get the next row in the result set from the database query as an array.
*
* @return mixed The result of the query as an array, false if there are no more rows.
*/
public function loadNextRow()
{
static $cursor = null;
// Execute the query and get the result set cursor.
if ( is_null($cursor) )
{
if (!($cursor = $this->execute()))
{
return $this->errorNum ? null : false;
}
}
// Get the next row from the result set as an object of type $class.
if ($row = $this->fetchArray($cursor))
{
return $row;
}
// Free up system resources and return.
$this->freeResult($cursor);
$cursor = null;
return false;
}
/**
* Method to get the first row of the result set from the database query as an object.
*
* @param string $class The class name to use for the returned row object.
*
* @return mixed The return value or null if the query failed.
*/
public function loadObject($class = 'stdClass')
{
$ret = null;
// Execute the query and get the result set cursor.
if (!($cursor = $this->execute()))
{
return null;
}
// Get the first row from the result set as an object of type $class.
if ($object = $this->fetchObject($cursor, $class))
{
$ret = $object;
}
// Free up system resources and return.
$this->freeResult($cursor);
return $ret;
}
/**
* Method to get an array of the result set rows from the database query where each row is an object. The array
* of objects can optionally be keyed by a field name, but defaults to a sequential numeric array.
*
* NOTE: Choosing to key the result array by a non-unique field name can result in unwanted
* behavior and should be avoided.
*
* @param string $key The name of a field on which to key the result array.
* @param string $class The class name to use for the returned row objects.
*
* @return mixed The return value or null if the query failed.
*/
public function loadObjectList($key = '', $class = 'stdClass')
{
$array = array();
// Execute the query and get the result set cursor.
if (!($cursor = $this->execute()))
{
return null;
}
// Get all of the rows from the result set as objects of type $class.
while ($row = $this->fetchObject($cursor, $class))
{
if ($key)
{
$array[$row->$key] = $row;
}
else
{
$array[] = $row;
}
}
// Free up system resources and return.
$this->freeResult($cursor);
return $array;
}
/**
* Method to get the first field of the first row of the result set from the database query.
*
* @return mixed The return value or null if the query failed.
*/
public function loadResult()
{
$ret = null;
// Execute the query and get the result set cursor.
if (!($cursor = $this->execute()))
{
return null;
}
// Get the first row from the result set as an array.
if ($row = $this->fetchArray($cursor))
{
$ret = $row[0];
}
// Free up system resources and return.
$this->freeResult($cursor);
return $ret;
}
/**
* Method to get the first row of the result set from the database query as an array. Columns are indexed
* numerically so the first column in the result set would be accessible via <var>$row[0]</var>, etc.
*
* @return mixed The return value or null if the query failed.
*/
public function loadRow()
{
$ret = null;
// Execute the query and get the result set cursor.
if (!($cursor = $this->execute()))
{
return null;
}
// Get the first row from the result set as an array.
if ($row = $this->fetchArray($cursor))
{
$ret = $row;
}
// Free up system resources and return.
$this->freeResult($cursor);
return $ret;
}
/**
* Method to get an array of the result set rows from the database query where each row is an array. The array
* of objects can optionally be keyed by a field offset, but defaults to a sequential numeric array.
*
* NOTE: Choosing to key the result array by a non-unique field can result in unwanted
* behavior and should be avoided.
*
* @param string $key The name of a field on which to key the result array.
*
* @return mixed The return value or null if the query failed.
*/
public function loadRowList($key = null)
{
$array = array();
// Execute the query and get the result set cursor.
if (!($cursor = $this->execute()))
{
return null;
}
// Get all of the rows from the result set as arrays.
while ($row = $this->fetchArray($cursor))
{
if ($key !== null)
{
$array[$row[$key]] = $row;
}
else
{
$array[] = $row;
}
}
// Free up system resources and return.
$this->freeResult($cursor);
return $array;
}
/**
* Locks a table in the database.
*
* @param string $tableName The name of the table to unlock.
*
* @return AEAbstractDriver Returns this object to support chaining.
*/
public abstract function lockTable($tableName);
/**
* Execute the SQL statement.
*
* @return mixed A database cursor resource on success, boolean false on failure.
*/
abstract public function query();
/**
* An alias for query(), for compatibility with Joomla! 2.5+ which has deprecated
* query() in favour of execute()
*
* @return mixed A database cursor resource on success, boolean false on failure.
*/
public function execute()
{
return $this->query();
}
/**
* Method to quote and optionally escape a string to database requirements for insertion into the database.
*
* @param string $text The string to quote.
* @param boolean $escape True (default) to escape the string, false to leave it unchanged.
*/
public function quote($text, $escape = true)
{
return '\'' . ($escape ? $this->escape($text) : $text) . '\'';
}
/**
* Wrap an SQL statement identifier name such as column, table or database names in quotes to prevent injection
* risks and reserved word conflicts.
*
* @param mixed $name The identifier name to wrap in quotes, or an array of identifier names to wrap in quotes.
* Each type supports dot-notation name.
* @param mixed $as The AS query part associated to $name. It can be string or array, in latter case it has to be
* same length of $name; if is null there will not be any AS part for string or array element.
*/
public function quoteName($name, $as = null)
{
if (is_string($name))
{
$quotedName = $this->quoteNameStr(explode('.', $name));
$quotedAs = '';
if (!is_null($as))
{
settype($as, 'array');
$quotedAs .= ' AS ' . $this->quoteNameStr($as);
}
return $quotedName . $quotedAs;
}
else
{
$fin = array();
if (is_null($as))
{
foreach ($name as $str)
{
$fin[] = $this->quoteName($str);
}
}
elseif (is_array($name) && (count($name) == count($as)))
{
$count = count($name);
for ($i = 0; $i < $count; $i++)
{
$fin[] = $this->quoteName($name[$i], $as[$i]);
}
}
return $fin;
}
}
/**
* Quote strings coming from quoteName call.
*
* @param array $strArr Array of strings coming from quoteName dot-explosion.
*
* @return string Dot-imploded string of quoted parts.
*/
protected function quoteNameStr($strArr)
{
$parts = array();
$q = $this->nameQuote;
foreach ($strArr as $part)
{
if (is_null($part))
{
continue;
}
if (strlen($q) == 1)
{
$parts[] = $q . $part . $q;
}
else
{
$parts[] = $q{0} . $part . $q{1};
}
}
return implode('.', $parts);
}
/**
* This function replaces a string identifier <var>$prefix</var> with the string held is the
* <var>tablePrefix</var> class variable.
*
* @param string $query The SQL statement to prepare.
* @param string $prefix The common table prefix.
*
* @return string The processed SQL statement.
*/
public function replacePrefix($query, $prefix = '#__')
{
$escaped = false;
$startPos = 0;
$quoteChar = '';
$literal = '';
$query = trim($query);
$n = strlen($query);
while ($startPos < $n)
{
$ip = strpos($query, $prefix, $startPos);
if ($ip === false)
{
break;
}
$j = strpos($query, "'", $startPos);
$k = strpos($query, '"', $startPos);
if (($k !== false) && (($k < $j) || ($j === false)))
{
$quoteChar = '"';
$j = $k;
}
else
{
$quoteChar = "'";
}
if ($j === false)
{
$j = $n;
}
$literal .= str_replace($prefix, $this->tablePrefix, substr($query, $startPos, $j - $startPos));
$startPos = $j;
$j = $startPos + 1;
if ($j >= $n)
{
break;
}
// Quote comes first, find end of quote
while (true)
{
$k = strpos($query, $quoteChar, $j);
$escaped = false;
if ($k === false)
{
break;
}
$l = $k - 1;
while ($l >= 0 && $query{$l} == '\\')
{
$l--;
$escaped = !$escaped;
}
if ($escaped)
{
$j = $k + 1;
continue;
}
break;
}
if ($k === false)
{
// Error in the query - no end quote; ignore it
break;
}
$literal .= substr($query, $startPos, $k - $startPos + 1);
$startPos = $k + 1;
}
if ($startPos < $n)
{
$literal .= substr($query, $startPos, $n - $startPos);
}
return $literal;
}
/**
* Renames a table in the database.
*
* @param string $oldTable The name of the table to be renamed
* @param string $newTable The new name for the table.
* @param string $backup Table prefix
* @param string $prefix For the table - used to rename constraints in non-mysql databases
*
* @return AEAbstractDriver Returns this object to support chaining.
*/
public abstract function renameTable($oldTable, $newTable, $backup = null, $prefix = null);
/**
* Select a database for use.
*
* @param string $database The name of the database to select for use.
*
* @return boolean True if the database was successfully selected.
*/
abstract public function select($database);
/**
* Sets the database debugging state for the driver.
*
* @param boolean $level True to enable debugging.
*
* @return boolean The old debugging level.
*/
public function setDebug($level)
{
$previous = $this->debug;
$this->debug = (bool) $level;
return $previous;
}
/**
* Sets the SQL statement string for later execution.
*
* @param mixed $query The SQL statement to set either as a AEAbstractQuery object or a string.
* @param integer $offset The affected row offset to set.
* @param integer $limit The maximum affected rows to set.
*
* @return AEAbstractDriver This object to support method chaining.
*/
public function setQuery($query, $offset = 0, $limit = 0)
{
$this->sql = $query;
$this->limit = (int) $limit;
$this->offset = (int) $offset;
return $this;
}
/**
* Set the connection to use UTF-8 character encoding.
*
* @return boolean True on success.
*/
abstract public function setUTF();
/**
* Method to commit a transaction.
*
* @return void
*/
abstract public function transactionCommit();
/**
* Method to roll back a transaction.
*
* @return void
*/
abstract public function transactionRollback();
/**
* Method to initialize a transaction.
*
* @return void
*/
abstract public function transactionStart();
/**
* Method to truncate a table.
*
* @param string $table The table to truncate
*
* @return void
*/
public function truncateTable($table)
{
$this->setQuery('TRUNCATE TABLE ' . $this->quoteName($table));
$this->query();
}
/**
* Updates a row in a table based on an object's properties.
*
* @param string $table The name of the database table to update.
* @param object &$object A reference to an object whose public properties match the table fields.
* @param string $key The name of the primary key.
* @param boolean $nulls True to update null fields or false to ignore them.
*
* @return boolean True on success.
*/
public function updateObject($table, &$object, $key, $nulls = false)
{
$fields = array();
$where = array();
if (is_string($key))
{
$key = array($key);
}
if (is_object($key))
{
$key = (array) $key;
}
// Create the base update statement.
$statement = 'UPDATE ' . $this->quoteName($table) . ' SET %s WHERE %s';
// Iterate over the object variables to build the query fields/value pairs.
foreach (get_object_vars($object) as $k => $v)
{
// Only process scalars that are not internal fields.
if (is_array($v) or is_object($v) or $k[0] == '_')
{
continue;
}
// Set the primary key to the WHERE clause instead of a field to update.
if (in_array($k, $key))
{
$where[] = $this->quoteName($k) . '=' . $this->quote($v);
continue;
}
// Prepare and sanitize the fields and values for the database query.
if ($v === null)
{
// If the value is null and we want to update nulls then set it.
if ($nulls)
{
$val = 'NULL';
}
// If the value is null and we do not want to update nulls then ignore this field.
else
{
continue;
}
}
// The field is not null so we prep it for update.
else
{
$val = $this->quote($v);
}
// Add the field to be updated.
$fields[] = $this->quoteName($k) . '=' . $val;
}
// We don't have any fields to update.
if (empty($fields))
{
return true;
}
// Set the query and execute the update.
$this->setQuery(sprintf($statement, implode(",", $fields), implode(' AND ', $where)));
return $this->execute();
}
/**
* Unlocks tables in the database.
*
* @return AEAbstractDriver Returns this object to support chaining.
*/
public abstract function unlockTables();
/**
* Get the error message
* @return string The error message for the most recent query
*/
public final function getErrorMsg($escaped = false)
{
if($escaped) {
return addslashes($this->errorMsg);
} else {
return $this->errorMsg;
}
}
/**
* Get the error number
* @return int The error number for the most recent query
*/
public final function getErrorNum() {
return $this->errorNum;
}
/**
* Method to escape a string for usage in an SQL statement.
*
* @param string $text The string to be escaped.
* @param boolean $extra Optional parameter to provide extra escaping.
*
* @return string The escaped string.
*/
public function getEscaped($text, $extra = false)
{
return $this->escape($text, $extra);
}
/**
* Retrieves field information about the given tables.
*
* @param mixed $tables A table name or a list of table names.
* @param boolean $typeOnly True to only return field types.
*
* @return array An array of fields by table.
*/
public function getTableFields($tables, $typeOnly = true)
{
$results = array();
settype($tables, 'array');
foreach ($tables as $table)
{
$results[$table] = $this->getTableColumns($table, $typeOnly);
}
return $results;
}
/**
* Method to get an array of values from the <var>$offset</var> field in each row of the result set from
* the database query.
*
* @param integer $offset The row offset to use to build the result array.
*
* @return mixed The return value or null if the query failed.
*/
public function loadResultArray($offset = 0)
{
return $this->loadColumn($offset);
}
/**
* Wrap an SQL statement identifier name such as column, table or database names in quotes to prevent injection
* risks and reserved word conflicts.
*
* @param string $name The identifier name to wrap in quotes.
*
* @return string The quote wrapped name.
*/
public function nameQuote($name)
{
return $this->quoteName($name);
}
/**
* Returns the abstracted name of a database object
* @param string $tableName
* @return srting
*/
public function getAbstract( $tableName )
{
$prefix = $this->getPrefix();
// Don't return abstract names for non-CMS tables
if(is_null($prefix)) return $tableName;
switch( $prefix )
{
case '':
// This is more of a hack; it assumes all tables are CMS tables if the prefix is empty.
return '#__' . $tableName;
break;
default:
// Normal behaviour for 99% of sites
$tableAbstract = $tableName;
if(!empty($prefix)) {
if( substr($tableName, 0, strlen($prefix)) == $prefix ) {
$tableAbstract = '#__' . substr($tableName, strlen($prefix));
} else {
$tableAbstract = $tableName;
}
}
return $tableAbstract;
break;
}
}
public final function getDriverType()
{
return $this->driverType;
}
public static function test()
{
return self::isSupported();
}
} | effortlesssites/template | tmp/com_akeeba-3.9.2-core/backend/akeeba/abstract/driver.php | PHP | gpl-2.0 | 39,065 |
/*
* Creado por SharpDevelop.
* Usuario: jonathan
* Fecha: 09/12/2013
* Hora: 11:17 p. m.
*
* Para cambiar esta plantilla use Herramientas | Opciones | Codificación | Editar Encabezados Estándar
*/
using System;
using System.Drawing;
using System.Windows.Forms;
namespace Proyecto
{
/// <summary>
/// Description of MenuConsulta.
/// </summary>
public partial class MenuConsulta : Form
{
public MenuConsulta()
{
//
// The InitializeComponent() call is required for Windows Forms designer support.
//
InitializeComponent();
//
// TODO: Add constructor code after the InitializeComponent() call.
//
}
void ButSalirClick(object sender, EventArgs e)
{
this.Close();
}
void ButActualizarClick(object sender, EventArgs e)
{
}
void ButConsultaClienteClick(object sender, EventArgs e)
{
ConsultaCliente cliente = new ConsultaCliente();
cliente.Show();
}
}
}
| csaldana/Proyecto-final | ZombieSystem1.2.0/Proyecto V 1.1.1/Zombie System V1.2/Zombie System V1.3/Proyecto/MenuConsulta.cs | C# | gpl-2.0 | 939 |
//-----------------------------------------------------------------------------
// boost-libs variant/test/test8.cpp header file
// See http://www.boost.org for updates, documentation, and revision history.
//-----------------------------------------------------------------------------
//
// Copyright (c) 2003
// Eric Friedman, Itay Maman
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include "boost/test/minimal.hpp"
#include "boost/variant.hpp"
#include <iostream>
#include <vector>
#include <string>
using namespace std;
using namespace boost;
typedef variant<float, std::string, int, std::vector<std::string> > t_var1;
struct int_sum : static_visitor<>
{
int_sum() : result_(0) { }
void operator()(int t)
{
result_ += t;
}
result_type operator()(float ) { }
result_type operator()(const std::string& ) { }
result_type operator()(const std::vector<std::string>& ) { }
int result_;
};
template <typename T, typename Variant>
T& check_pass(Variant& v, T value)
{
BOOST_CHECK(get<T>(&v));
try
{
T& r = get<T>(v);
BOOST_CHECK(r == value);
return r;
}
catch(boost::bad_get&)
{
throw; // must never reach
}
}
template <typename T, typename Variant>
void check_fail(Variant& v)
{
BOOST_CHECK(!relaxed_get<T>(&v));
try
{
T& r = relaxed_get<T>(v);
(void)r; // suppress warning about r not being used
BOOST_CHECK(false && &r); // should never reach
}
catch(const boost::bad_get& e)
{
BOOST_CHECK(!!e.what()); // make sure that what() is const qualified and returnes something
}
}
int test_main(int , char* [])
{
int_sum acc;
t_var1 v1 = 800;
// check get on non-const variant
{
int& r1 = check_pass<int>(v1, 800);
const int& cr1 = check_pass<const int>(v1, 800);
check_fail<float>(v1);
check_fail<const float>(v1);
check_fail<short>(v1);
check_fail<const short>(v1);
apply_visitor(acc, v1);
BOOST_CHECK(acc.result_ == 800);
r1 = 920; // NOTE: modifies content of v1
apply_visitor(acc, v1);
BOOST_CHECK(cr1 == 920);
BOOST_CHECK(acc.result_ == 800 + 920);
}
// check const correctness:
{
const t_var1& c = v1;
check_pass<const int>(c, 920);
//check_fail<int>(c);
check_fail<const float>(c);
//check_fail<float>(c);
check_fail<const short>(c);
//check_fail<short>(c);
}
return boost::exit_success;
}
| rprata/boost | libs/variant/test/test8.cpp | C++ | gpl-2.0 | 2,621 |
/*
* iaf_psc_alpha_presc.cpp
*
* This file is part of NEST.
*
* Copyright (C) 2004 The NEST Initiative
*
* NEST is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* NEST is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with NEST. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include "iaf_psc_alpha_presc.h"
#include "exceptions.h"
#include "network.h"
#include "dict.h"
#include "integerdatum.h"
#include "doubledatum.h"
#include "dictutils.h"
#include "numerics.h"
#include "universal_data_logger_impl.h"
#include <limits>
/* ----------------------------------------------------------------
* Recordables map
* ---------------------------------------------------------------- */
nest::RecordablesMap<nest::iaf_psc_alpha_presc> nest::iaf_psc_alpha_presc::recordablesMap_;
namespace nest
{
/*
* Override the create() method with one call to RecordablesMap::insert_()
* for each quantity to be recorded.
*/
template <>
void RecordablesMap<iaf_psc_alpha_presc>::create()
{
// use standard names whereever you can for consistency!
insert_(names::V_m, &iaf_psc_alpha_presc::get_V_m_);
}
}
/* ----------------------------------------------------------------
* Default constructors defining default parameters and state
* ---------------------------------------------------------------- */
nest::iaf_psc_alpha_presc::Parameters_::Parameters_()
: tau_m_ ( 10.0 ), // ms
tau_syn_( 2.0 ), // ms
c_m_ (250.0 ), // pF
t_ref_ ( 2.0 ), // ms
E_L_ (-70.0 ), // mV
I_e_ ( 0.0 ), // pA
U_th_ (-55.0-E_L_), // mV, rel to E_L_
U_min_ (-std::numeric_limits<double_t>::infinity()),
U_reset_(-70.0-E_L_),
Interpol_(iaf_psc_alpha_presc::LINEAR)
{}
nest::iaf_psc_alpha_presc::State_::State_()
: y0_(0.0),
y1_(0.0),
y2_(0.0),
y3_(0.0),
r_(0),
last_spike_step_(-1),
last_spike_offset_(0.0)
{}
/* ----------------------------------------------------------------
* Parameter and state extractions and manipulation functions
* ---------------------------------------------------------------- */
void nest::iaf_psc_alpha_presc::Parameters_::get(DictionaryDatum &d) const
{
def<double>(d, names::E_L, E_L_);
def<double>(d, names::I_e, I_e_);
def<double>(d, names::V_th, U_th_+E_L_);
def<double>(d, names::V_min, U_min_+E_L_);
def<double>(d, names::V_reset, U_reset_+E_L_);
def<double>(d, names::C_m, c_m_);
def<double>(d, names::tau_m, tau_m_);
def<double>(d, names::tau_syn, tau_syn_);
def<double>(d, names::t_ref, t_ref_);
def<long>(d, names::Interpol_Order, Interpol_);
}
double nest::iaf_psc_alpha_presc::Parameters_::set(const DictionaryDatum& d)
{
// if E_L_ is changed, we need to adjust all variables defined relative to E_L_
const double ELold = E_L_;
updateValue<double>(d, names::E_L, E_L_);
const double delta_EL = E_L_ - ELold;
updateValue<double>(d, names::tau_m, tau_m_);
updateValue<double>(d, names::tau_syn, tau_syn_);
updateValue<double>(d, names::C_m, c_m_);
updateValue<double>(d, names::t_ref, t_ref_);
updateValue<double>(d, names::I_e, I_e_);
if (updateValue<double>(d, names::V_th, U_th_))
U_th_ -= E_L_;
else
U_th_ -= delta_EL;
if (updateValue<double>(d, names::V_min, U_min_))
U_min_ -= E_L_;
else
U_min_ -= delta_EL;
if (updateValue<double>(d, names::V_reset, U_reset_))
U_reset_ -= E_L_;
else
U_reset_ -= delta_EL;
long_t tmp;
if ( updateValue<long_t>(d, names::Interpol_Order, tmp) )
{
if ( NO_INTERPOL <= tmp && tmp < END_INTERP_ORDER )
Interpol_ = static_cast<interpOrder>(tmp);
else
throw BadProperty("Invalid interpolation order. "
"Valid orders are 0, 1, 2, 3.");
}
if ( U_reset_ >= U_th_ )
throw BadProperty("Reset potential must be smaller than threshold.");
if ( U_reset_ < U_min_ )
throw BadProperty("Reset potential must be greater equal minimum potential.");
if ( c_m_ <= 0 )
throw BadProperty("Capacitance must be strictly positive.");
if ( t_ref_ < 0 )
throw BadProperty("Refractory time must not be negative.");
if ( tau_m_ <= 0 || tau_syn_ <= 0 )
throw BadProperty("All time constants must be strictly positive.");
if ( tau_m_ == tau_syn_ )
throw BadProperty("Membrane and synapse time constant(s) must differ."
"See note in documentation.");
return delta_EL;
}
void nest::iaf_psc_alpha_presc::State_::get(DictionaryDatum &d,
const Parameters_& p) const
{
def<double>(d, names::V_m, y3_ + p.E_L_); // Membrane potential
def<double>(d, names::t_spike, Time(Time::step(last_spike_step_)).get_ms());
def<double>(d, names::offset, last_spike_offset_);
}
void nest::iaf_psc_alpha_presc::State_::set(const DictionaryDatum& d, const Parameters_& p, double delta_EL)
{
if ( updateValue<double>(d, names::V_m, y3_) )
y3_ -= p.E_L_;
else
y3_ -= delta_EL;
}
nest::iaf_psc_alpha_presc::Buffers_::Buffers_(iaf_psc_alpha_presc& n)
: logger_(n)
{}
nest::iaf_psc_alpha_presc::Buffers_::Buffers_(const Buffers_&, iaf_psc_alpha_presc& n)
: logger_(n)
{}
/* ----------------------------------------------------------------
* Default and copy constructor for node
* ---------------------------------------------------------------- */
nest::iaf_psc_alpha_presc::iaf_psc_alpha_presc()
: Node(),
P_(),
S_(),
B_(*this)
{
recordablesMap_.create();
}
nest::iaf_psc_alpha_presc::iaf_psc_alpha_presc(const iaf_psc_alpha_presc& n)
: Node(n),
P_(n.P_),
S_(n.S_),
B_(n.B_, *this)
{}
/* ----------------------------------------------------------------
* Node initialization functions
* ---------------------------------------------------------------- */
void nest::iaf_psc_alpha_presc::init_state_(const Node& proto)
{
const iaf_psc_alpha_presc& pr = downcast<iaf_psc_alpha_presc>(proto);
S_ = pr.S_;
}
void nest::iaf_psc_alpha_presc::init_buffers_()
{
B_.spike_y1_.clear(); // includes resize
B_.spike_y2_.clear(); // includes resize
B_.spike_y3_.clear(); // includes resize
B_.currents_.clear(); // includes resize
B_.logger_.reset();
}
void nest::iaf_psc_alpha_presc::calibrate()
{
B_.logger_.init();
V_.h_ms_ = Time::get_resolution().get_ms();
V_.PSCInitialValue_ = 1.0 * numerics::e / P_.tau_syn_;
V_.gamma_ = 1/P_.c_m_ / ( 1/P_.tau_syn_ - 1/P_.tau_m_ );
V_.gamma_sq_ = 1/P_.c_m_ / ( ( 1/P_.tau_syn_ - 1/P_.tau_m_ ) * ( 1/P_.tau_syn_ - 1/P_.tau_m_ ) );
// pre-compute matrix for full time step
V_.expm1_tau_m_ = numerics::expm1(-V_.h_ms_/P_.tau_m_);
V_.expm1_tau_syn_ = numerics::expm1(-V_.h_ms_/P_.tau_syn_);
V_.P30_ = -P_.tau_m_ / P_.c_m_ * V_.expm1_tau_m_;
V_.P31_ = V_.gamma_sq_ * V_.expm1_tau_m_ - V_.gamma_sq_ * V_.expm1_tau_syn_
- V_.h_ms_ * V_.gamma_ * V_.expm1_tau_syn_ - V_.h_ms_ * V_.gamma_;
V_.P32_ = V_.gamma_ * V_.expm1_tau_m_ - V_.gamma_ * V_.expm1_tau_syn_;
// t_ref_ is the refractory period in ms
// refractory_steps_ is the duration of the refractory period in whole
// steps, rounded down
V_.refractory_steps_ = Time(Time::ms(P_.t_ref_)).get_steps();
assert(V_.refractory_steps_ >= 0); // since t_ref_ >= 0, this can only fail in error
}
void nest::iaf_psc_alpha_presc::update(Time const & origin,
const long_t from, const long_t to)
{
assert(to >= 0);
assert(static_cast<delay>(from) < Scheduler::get_min_delay());
assert(from < to);
/* Neurons may have been initialized to superthreshold potentials.
We need to check for this here and issue spikes at the beginning of
the interval.
*/
if ( S_.y3_ >= P_.U_th_ )
{
set_spiketime(Time::step(origin.get_steps() + from + 1));
S_.last_spike_offset_ = V_.h_ms_ * (1-std::numeric_limits<double_t>::epsilon());
// reset neuron and make it refractory
S_.y3_ = P_.U_reset_;
S_.r_ = V_.refractory_steps_;
// send spike
SpikeEvent se;
se.set_offset(S_.last_spike_offset_);
network()->send(*this, se, from);
}
for ( long_t lag = from ; lag < to ; ++lag )
{
// time at start of update step
const long_t T = origin.get_steps() + lag;
// save state at beginning of interval for spike-time interpolation
V_.y0_before_ = S_.y0_;
V_.y1_before_ = S_.y1_;
V_.y2_before_ = S_.y2_;
V_.y3_before_ = S_.y3_;
/* obtain input to y3_
We need to collect this value even while the neuron is refractory,
since we need to clear any spikes that have come in from the
ring buffer.
*/
const double_t dy3 = B_.spike_y3_.get_value(lag);
if ( S_.r_ == 0 )
{
// neuron is not refractory
S_.y3_ = V_.P30_ * (P_.I_e_+S_.y0_) + V_.P31_*S_.y1_ + V_.P32_*S_.y2_ + V_.expm1_tau_m_*S_.y3_ + S_.y3_;
S_.y3_ += dy3; // add input
// enforce lower bound
S_.y3_ = ( S_.y3_< P_.U_min_ ? P_.U_min_ : S_.y3_);
}
else if ( S_.r_ == 1 )
{
// neuron returns from refractoriness during interval
S_.r_ = 0;
// Iterate third component (membrane pot) from end of
// refractory period to end of interval. As first-order
// approximation, add a proportion of the effect of synaptic
// input during the interval to membrane pot. The proportion
// is given by the part of the interval after the end of the
// refractory period.
S_.y3_ = P_.U_reset_ + // try fix 070623, md
update_y3_delta_() + dy3 - dy3 * (1 - S_.last_spike_offset_/V_.h_ms_);
// enforce lower bound
S_.y3_ = ( S_.y3_< P_.U_min_ ? P_.U_min_ : S_.y3_);
}
else
{
// neuron is refractory
// y3_ remains unchanged at 0.0
--S_.r_;
}
// update synaptic currents
S_.y2_ =V_. expm1_tau_syn_*V_.h_ms_*S_.y1_ + V_.expm1_tau_syn_*S_.y2_ + V_.h_ms_*S_.y1_ + S_.y2_;
S_.y1_ = V_.expm1_tau_syn_*S_.y1_ + S_.y1_;
// add synaptic inputs from the ring buffer
// this must happen BEFORE threshold-crossing interpolation,
// since synaptic inputs occured during the interval
S_.y1_ += B_.spike_y1_.get_value(lag);
S_.y2_ += B_.spike_y2_.get_value(lag);
//neuron spikes
if ( S_.y3_ >= P_.U_th_ )
{
// compute spike time
set_spiketime(Time::step(T+1));
// The time for the threshpassing
S_.last_spike_offset_ = V_.h_ms_ - thresh_find_(V_.h_ms_);
// reset AFTER spike-time interpolation
S_.y3_ = P_.U_reset_;
S_.r_ = V_.refractory_steps_;
// sent event
SpikeEvent se;
se.set_offset(S_.last_spike_offset_);
network()->send(*this, se, lag);
}
// Set new input current. The current change occurs at the
// end of the interval and thus must come AFTER the threshold-
// crossing interpolation
S_.y0_ = B_.currents_.get_value(lag);
// logging
B_.logger_.record_data(origin.get_steps()+lag);
} // from lag = from ...
}
//function handles exact spike times
void nest::iaf_psc_alpha_presc::handle(SpikeEvent & e)
{
assert(e.get_delay() > 0 );
const long_t Tdeliver = e.get_rel_delivery_steps(network()->get_slice_origin());
const double_t spike_weight = V_.PSCInitialValue_ * e.get_weight() * e.get_multiplicity();
const double_t dt = e.get_offset();
// Building the new matrix for the offset of the spike
// NOTE: We do not use get matrix, but compute only those
// components we actually need for spike registration
const double_t ps_e_TauSyn = numerics::expm1(-dt/P_.tau_syn_); // needed in any case
const double_t ps_e_Tau = numerics::expm1(-dt/P_.tau_m_);
const double_t ps_P31 = V_.gamma_sq_ * ps_e_Tau - V_.gamma_sq_ * ps_e_TauSyn
- dt * V_.gamma_ * ps_e_TauSyn - dt * V_.gamma_;
B_.spike_y1_.add_value(Tdeliver, spike_weight*ps_e_TauSyn + spike_weight);
B_.spike_y2_.add_value(Tdeliver, spike_weight*dt*ps_e_TauSyn + spike_weight*dt);
B_.spike_y3_.add_value(Tdeliver, spike_weight*ps_P31);
}
void nest::iaf_psc_alpha_presc::handle(CurrentEvent& e)
{
assert(e.get_delay() > 0);
const double_t c=e.get_current();
const double_t w=e.get_weight();
// add weighted current; HEP 2002-10-04
B_.currents_.add_value(e.get_rel_delivery_steps(network()->get_slice_origin()),
w * c);
}
void nest::iaf_psc_alpha_presc::handle(DataLoggingRequest& e)
{
B_.logger_.handle(e);
}
// auxiliary functions ---------------------------------------------
inline
void nest::iaf_psc_alpha_presc::set_spiketime(Time const & now)
{
S_.last_spike_step_ = now.get_steps();
}
inline
nest::Time nest::iaf_psc_alpha_presc::get_spiketime() const
{
return Time::step(S_.last_spike_step_);
}
nest::double_t nest::iaf_psc_alpha_presc::update_y3_delta_() const
{
/* We need to proceed in two steps:
1. Update the synaptic currents as far as h_ms-last_spike_offset, when the refractory
period ends. y3_ is clamped to 0 during this time.
2. Update y3_ from t_th to the end of the interval. The synaptic
currents need not be updated during this time, since they are
anyways updated for the entire interval outside.
Instead of calling get_matrix(), we compute only those components
we actually need locally.
*/
// update synaptic currents
const double t_th = V_.h_ms_ - S_.last_spike_offset_;
double_t ps_e_TauSyn = numerics::expm1(-t_th/P_.tau_syn_);
//ps_y2_ = ps_P21_*y1_before_ + ps_P22_* y2_before_;
const double ps_y2 = t_th * ps_e_TauSyn * V_.y1_before_
+ ps_e_TauSyn * V_.y2_before_ + t_th * V_.y1_before_ + V_.y2_before_ ;
//ps_y1_ = y1_before_*ps_P11_;
const double ps_y1 = ps_e_TauSyn * V_.y1_before_ + V_.y1_before_ ;
// update y3_ over remaineder of interval
const double_t dt = S_.last_spike_offset_;
ps_e_TauSyn = numerics::expm1(-dt / P_.tau_syn_);
const double_t ps_e_Tau = numerics::expm1(-dt/P_.tau_m_);
const double_t ps_P30 = - P_.tau_m_ / P_.c_m_ * ps_e_Tau;
const double_t ps_P31 = V_.gamma_sq_ * ps_e_Tau - V_.gamma_sq_ * ps_e_TauSyn
- dt*V_.gamma_*ps_e_TauSyn - dt*V_.gamma_;
const double_t ps_P32 = V_.gamma_*ps_e_Tau - V_.gamma_* ps_e_TauSyn;
// y3_ == 0.0 at beginning of sub-step
return ps_P30 * (P_.I_e_+V_.y0_before_) + ps_P31 * ps_y1 + ps_P32 * ps_y2;
}
// finds threshpassing
inline
nest::double_t nest::iaf_psc_alpha_presc::thresh_find_(double_t const dt) const
{
switch (P_.Interpol_) {
case NO_INTERPOL: return dt;
case LINEAR : return thresh_find1_(dt);
case QUADRATIC : return thresh_find2_(dt);
case CUBIC : return thresh_find3_(dt);
default:
network()->message(SLIInterpreter::M_ERROR, "iaf_psc_alpha_presc::thresh_find_()",
"Invalid interpolation---Internal model error.");
throw BadProperty();
}
return 0;
}
// finds threshpassing via linear interpolation
nest::double_t nest::iaf_psc_alpha_presc::thresh_find1_(double_t const dt) const
{
double_t tau = ( P_.U_th_ - V_.y3_before_ ) * dt / ( S_.y3_ - V_.y3_before_ );
return tau;
}
// finds threshpassing via quadratic interpolation
nest::double_t nest::iaf_psc_alpha_presc::thresh_find2_(double_t const dt) const
{
const double_t h_sq = dt * dt;
const double_t derivative = - V_.y3_before_/P_.tau_m_ + (P_.I_e_ + V_.y0_before_ + V_.y2_before_)/P_.c_m_;
const double_t a = (-V_.y3_before_/h_sq) + (S_.y3_/h_sq) - (derivative/dt);
const double_t b = derivative;
const double_t c = V_.y3_before_;
const double_t sqr_ = std::sqrt(b*b - 4*a*c + 4*a*P_.U_th_);
const double_t tau1 = (-b + sqr_) / (2*a);
const double_t tau2 = (- b - sqr_) / (2*a);
if (tau1 >= 0)
return tau1;
else if (tau2 >= 0)
return tau2;
else
return thresh_find1_(dt);
}
nest::double_t nest::iaf_psc_alpha_presc::thresh_find3_(double_t const dt) const
{
const double_t h_ms_=dt;
const double_t h_sq = h_ms_*h_ms_;
const double_t h_cb = h_sq*h_ms_;
const double_t deriv_t1 = - V_.y3_before_/P_.tau_m_ + (P_.I_e_ + V_.y0_before_ + V_.y2_before_)/P_.c_m_;
const double_t deriv_t2 = - S_.y3_/P_.tau_m_ + (P_.I_e_ + S_.y0_ + S_.y2_)/P_.c_m_;
const double_t w3_ = (2 * V_.y3_before_ / h_cb) - (2 * S_.y3_ / h_cb)
+ ( deriv_t1 / h_sq) + ( deriv_t2 / h_sq) ;
const double_t w2_ = - (3 * V_.y3_before_ / h_sq) + (3 * S_.y3_ / h_sq)
- ( 2 * deriv_t1 / h_ms_) - ( deriv_t2 / h_ms_) ;
const double_t w1_ = deriv_t1;
const double_t w0_ = V_.y3_before_;
//normal form : x^3 + r*x^2 + s*x + t with coefficients : r, s, t
const double_t r = w2_ / w3_;
const double_t s = w1_ / w3_;
const double_t t = (w0_ - P_.U_th_) / w3_;
const double_t r_sq= r*r;
//substitution y = x + r/3 : y^3 + p*y + q == 0
const double_t p = - r_sq / 3 + s;
const double_t q = 2 * ( r_sq * r ) / 27 - r * s / 3 + t;
//discriminante
const double_t D = std::pow( (p/3), 3) + std::pow( (q/2), 2);
double_t tau1;
double_t tau2;
double_t tau3;
if(D<0){
const double_t roh = std::sqrt( -(p*p*p)/ 27 );
const double_t phi = std::acos( -q/ (2*roh) );
const double_t a = 2 * std::pow(roh, (1.0/3.0));
tau1 = (a * std::cos( phi/3 )) - r/3;
tau2 = (a * std::cos( phi/3 + 2* numerics::pi/3 )) - r/3;
tau3 = (a * std::cos( phi/3 + 4* numerics::pi/3 )) - r/3;
}
else{
const double_t sgnq = (q >= 0 ? 1 : -1);
const double_t u = -sgnq * std::pow(std::fabs(q)/2.0 + std::sqrt(D), 1.0/3.0);
const double_t v = - p/(3*u);
tau1= (u+v) - r/3;
if (tau1 >= 0) {
return tau1;
}
else {
return thresh_find2_(dt);
}
}
//set tau to the smallest root above 0
double tau = (tau1 >= 0) ? tau1 : 2*h_ms_;
if ((tau2 >=0) && (tau2 < tau)) tau = tau2;
if ((tau3 >=0) && (tau3 < tau)) tau = tau3;
return (tau <= h_ms_) ? tau : thresh_find2_(dt);
}
| INM-6/nest-git-migration | precise/iaf_psc_alpha_presc.cpp | C++ | gpl-2.0 | 18,368 |
/*
Copyright (c) 2006-2009 by Jakob Schroeter <js@camaya.net>
This file is part of the gloox library. http://camaya.net/gloox
This software is distributed under a license. The full license
agreement can be found in the file LICENSE in this distribution.
This software may not be copied, modified, sold or distributed
other than expressed in the named license agreement.
This software is distributed without any warranty.
*/
#include "vcardupdate.h"
#include "tag.h"
namespace gloox
{
VCardUpdate::VCardUpdate()
: StanzaExtension( ExtVCardUpdate ),
m_notReady( true ), m_noImage( true ), m_valid( true )
{
}
VCardUpdate::VCardUpdate( const std::string& hash )
: StanzaExtension( ExtVCardUpdate ),
m_hash( hash ), m_notReady( false ), m_noImage( false ), m_valid( true )
{
if( m_hash.empty() )
{
m_noImage = true;
m_valid = false;
}
}
VCardUpdate::VCardUpdate( const Tag* tag )
: StanzaExtension( ExtVCardUpdate ),
m_notReady( true ), m_noImage( true ), m_valid( false )
{
if( tag && tag->name() == "x" && tag->hasAttribute( XMLNS, XMLNS_X_VCARD_UPDATE ) )
{
m_valid = true;
if( tag->hasChild( "photo" ) )
{
m_notReady = false;
m_hash = tag->findChild( "photo" )->cdata();
if( !m_hash.empty() )
m_noImage = false;
}
}
}
VCardUpdate::~VCardUpdate()
{
}
const std::string& VCardUpdate::filterString() const
{
static const std::string filter = "/presence/x[@xmlns='" + XMLNS_X_VCARD_UPDATE + "']";
return filter;
}
Tag* VCardUpdate::tag() const
{
if( !m_valid )
return 0;
Tag* x = new Tag( "x", XMLNS, XMLNS_X_VCARD_UPDATE );
if( !m_notReady )
{
Tag* p = new Tag( x, "photo" );
if( !m_noImage )
p->setCData( m_hash );
}
return x;
}
}
| segfault/gloox-clone | src/vcardupdate.cpp | C++ | gpl-2.0 | 1,874 |
<?php
/**
* TestLink Open Source Project - http://testlink.sourceforge.net/
* $Id: keywordBarChart.php,v 1.16.2.1 2010/12/10 15:52:23 franciscom Exp $
*
* @author Kevin Levy
*
* - PHP autoload feature is used to load classes on demand
*
* @internal revisions
*
*/
require_once('../../config.inc.php');
require_once('common.php');
require_once('charts.inc.php');
testlinkInitPage($db,true,false,"checkRights");
$cfg = new stdClass();
$cfg->scale = new stdClass();
$chart_cfg = config_get('results');
$chart_cfg = $chart_cfg['charts']['dimensions']['keywordBarChart'];
$cfg->chartTitle = lang_get($chart_cfg['chartTitle']);
$cfg->XSize = $chart_cfg['XSize'];
$cfg->YSize = $chart_cfg['YSize'];
$cfg->beginX = $chart_cfg['beginX'];
$cfg->beginY = $chart_cfg['beginY'];
$cfg->scale->legendXAngle = $chart_cfg['legendXAngle'];
$args = init_args();
$info = getDataAndScale($db,$args);
createChart($info,$cfg);
/*
function: getDataAndScale
args: dbHandler
returns: object
*/
function getDataAndScale(&$dbHandler,$argsObj)
{
$resultsCfg = config_get('results');
$obj = new stdClass();
$items = array();
$totals = null;
$metricsMgr = new tlTestPlanMetrics($dbHandler);
$dummy = $metricsMgr->getStatusTotalsByKeywordForRender($argsObj->tplan_id);
$obj->canDraw = false;
if( !is_null($dummy) )
{
$dataSet = $dummy->info;
$obj->canDraw = !is_null($dataSet) && (count($dataSet) > 0);
}
if($obj->canDraw)
{
// Process to enable alphabetical order
foreach($dataSet as $keyword_id => $elem)
{
$item_descr[$elem['name']] = $keyword_id;
}
ksort($item_descr);
foreach($item_descr as $name => $keyword_id)
{
$items[] = htmlspecialchars($name);
foreach($dataSet[$keyword_id]['details'] as $status => $value)
{
$totals[$status][] = $value['qty'];
}
}
}
$obj->xAxis = new stdClass();
$obj->xAxis->values = $items;
$obj->xAxis->serieName = 'Serie8';
$obj->series_color = null;
$obj->scale = new stdClass();
$obj->scale->maxY = 0;
$obj->scale->minY = 0;
$obj->scale->divisions = 0;
if(!is_null($totals))
{
// in this array position we will find minimun value after an rsort
$minPos = count($dataSet)-1;
$obj->scale->maxY = 0;
$obj->scale->minY = 0;
foreach($totals as $status => $values)
{
$obj->chart_data[] = $values;
$obj->series_label[] = lang_get($resultsCfg['status_label'][$status]);
if( isset($resultsCfg['charts']['status_colour'][$status]) )
{
$obj->series_color[] = $resultsCfg['charts']['status_colour'][$status];
}
}
}
return $obj;
}
function init_args()
{
$argsObj = new stdClass();
// $argsObj->tproject_id = intval($_REQUEST['tproject_id']);
$argsObj->tplan_id = intval($_REQUEST['tplan_id']);
if( isset($_REQUEST['debug']) )
{
$argsObj->debug = 'yes';
}
return $argsObj;
}
function checkRights(&$db,&$user)
{
return $user->hasRight($db,'testplan_metrics');
}
?> | TabbedOut/testlink-1.9.9 | lib/results/keywordBarChart.php | PHP | gpl-2.0 | 3,079 |
require 'migrate'
class RenameBugsToNotes < ActiveRecord::Migration
def self.up
rename_enumeration "map_bug_status_enum", "note_status_enum"
rename_enumeration "map_bug_event_enum", "note_event_enum"
rename_table :map_bugs, :notes
rename_index :notes, "map_bugs_pkey", "notes_pkey"
rename_index :notes, "map_bugs_changed_idx", "notes_updated_at_idx"
rename_index :notes, "map_bugs_created_idx", "notes_created_at_idx"
rename_index :notes, "map_bugs_tile_idx", "notes_tile_status_idx"
remove_foreign_key :map_bug_comment, [:bug_id], :map_bugs, [:id]
rename_column :map_bug_comment, :author_id, :commenter_id
remove_foreign_key :map_bug_comment, [:commenter_id], :users, [:id]
rename_column :map_bug_comment, :commenter_id, :author_id
rename_table :map_bug_comment, :note_comments
rename_column :note_comments, :bug_id, :note_id
rename_index :note_comments, "map_bug_comment_pkey", "note_comments_pkey"
rename_index :note_comments, "map_bug_comment_id_idx", "note_comments_note_id_idx"
add_foreign_key :note_comments, [:note_id], :notes, [:id]
add_foreign_key :note_comments, [:author_id], :users, [:id]
end
def self.down
remove_foreign_key :note_comments, [:author_id], :users, [:id]
remove_foreign_key :note_comments, [:note_id], :notes, [:id]
rename_index :note_comments, "note_comments_note_id_idx", "map_bug_comment_id_idx"
rename_index :notes, "note_comments_pkey", "map_bug_comment_pkey"
rename_column :note_comments, :note_id, :bug_id
rename_table :note_comments, :map_bug_comment
rename_column :map_bug_comment, :author_id, :commenter_id
add_foreign_key :map_bug_comment, [:commenter_id], :users, [:id]
rename_column :map_bug_comment, :commenter_id, :author_id
add_foreign_key :map_bug_comment, [:bug_id], :notes, [:id]
rename_index :notes, "notes_tile_status_idx", "map_bugs_tile_idx"
rename_index :notes, "notes_created_at_idx", "map_bugs_created_idx"
rename_index :notes, "notes_updated_at_idx", "map_bugs_changed_idx"
rename_index :notes, "notes_pkey", "map_bugs_pkey"
rename_table :notes, :map_bugs
rename_enumeration "note_event_enum", "map_bug_event_enum"
rename_enumeration "note_status_enum", "map_bug_status_enum"
end
end
| anatoliegolovco/grmdemo | db/migrate/20110521142405_rename_bugs_to_notes.rb | Ruby | gpl-2.0 | 2,293 |
<?php
/*
V4.98 13 Feb 2008 (c) 2000-2008 John Lim (jlim#natsoft.com.my). All rights reserved.
Released under both BSD license and Lesser GPL library license.
Whenever there is any discrepancy between the two licenses,
the BSD license will take precedence. See License.txt.
Set tabs to 4 for best viewing.
Latest version is available at http://adodb.sourceforge.net
Library for basic performance monitoring and tuning
*/
// security - hide paths
if (!defined('ADODB_DIR')) die();
/*
MSSQL has moved most performance info to Performance Monitor
*/
class perf_mssql extends adodb_perf{
var $sql1 = 'cast(sql1 as text)';
var $createTableSQL = "CREATE TABLE adodb_logsql (
created datetime NOT NULL,
sql0 varchar(250) NOT NULL,
sql1 varchar(4000) NOT NULL,
params varchar(3000) NOT NULL,
tracer varchar(500) NOT NULL,
timer decimal(16,6) NOT NULL
)";
var $settings = array(
'Ratios',
'data cache hit ratio' => array('RATIO',
"select round((a.cntr_value*100.0)/b.cntr_value,2) from master.dbo.sysperfinfo a, master.dbo.sysperfinfo b where a.counter_name = 'Buffer cache hit ratio' and b.counter_name='Buffer cache hit ratio base'",
'=WarnCacheRatio'),
'prepared sql hit ratio' => array('RATIO',
array('dbcc cachestats','Prepared',1,100),
''),
'adhoc sql hit ratio' => array('RATIO',
array('dbcc cachestats','Adhoc',1,100),
''),
'IO',
'data reads' => array('IO',
"select cntr_value from master.dbo.sysperfinfo where counter_name = 'Page reads/sec'"),
'data writes' => array('IO',
"select cntr_value from master.dbo.sysperfinfo where counter_name = 'Page writes/sec'"),
'Data Cache',
'data cache size' => array('DATAC',
"select cntr_value*8192 from master.dbo.sysperfinfo where counter_name = 'Total Pages' and object_name='SQLServer:Buffer Manager'",
'' ),
'data cache blocksize' => array('DATAC',
"select 8192",'page size'),
'Connections',
'current connections' => array('SESS',
'=sp_who',
''),
'max connections' => array('SESS',
"SELECT @@MAX_CONNECTIONS",
''),
false
);
function perf_mssql(&$conn)
{
if ($conn->dataProvider == 'odbc') {
$this->sql1 = 'sql1';
//$this->explain = false;
}
$this->conn =& $conn;
}
function Explain($sql,$partial=false)
{
$save = $this->conn->LogSQL(false);
if ($partial) {
$sqlq = $this->conn->qstr($sql.'%');
$arr = $this->conn->GetArray("select distinct sql1 from adodb_logsql where sql1 like $sqlq");
if ($arr) {
foreach($arr as $row) {
$sql = reset($row);
if (crc32($sql) == $partial) break;
}
}
}
$s = '<p><b>Explain</b>: '.htmlspecialchars($sql).'</p>';
$this->conn->Execute("SET SHOWPLAN_ALL ON;");
$sql = str_replace('?',"''",$sql);
global $ADODB_FETCH_MODE;
$save = $ADODB_FETCH_MODE;
$ADODB_FETCH_MODE = ADODB_FETCH_NUM;
$rs =& $this->conn->Execute($sql);
//adodb_printr($rs);
$ADODB_FETCH_MODE = $save;
if ($rs) {
$rs->MoveNext();
$s .= '<table bgcolor=white border=0 cellpadding="1" callspacing=0><tr><td nowrap align=center> Rows<td nowrap align=center> IO<td nowrap align=center> CPU<td align=left> Plan</tr>';
while (!$rs->EOF) {
$s .= '<tr><td>'.round($rs->fields[8],1).'<td>'.round($rs->fields[9],3).'<td align=right>'.round($rs->fields[10],3).'<td nowrap><pre>'.htmlspecialchars($rs->fields[0])."</td></pre></tr>\n"; ## NOTE CORRUPT </td></pre> tag is intentional!!!!
$rs->MoveNext();
}
$s .= '</table>';
$rs->NextRecordSet();
}
$this->conn->Execute("SET SHOWPLAN_ALL OFF;");
$this->conn->LogSQL($save);
$s .= $this->Tracer($sql);
return $s;
}
function Tables()
{
global $ADODB_FETCH_MODE;
$save = $ADODB_FETCH_MODE;
$ADODB_FETCH_MODE = ADODB_FETCH_NUM;
//$this->conn->debug=1;
$s = '<table border=1 bgcolor=white><tr><td><b>tablename</b></td><td><b>size_in_k</b></td><td><b>index size</b></td><td><b>reserved size</b></td></tr>';
$rs1 = $this->conn->Execute("select distinct name from sysobjects where xtype='U'");
if ($rs1) {
while (!$rs1->EOF) {
$tab = $rs1->fields[0];
$tabq = $this->conn->qstr($tab);
$rs2 = $this->conn->Execute("sp_spaceused $tabq");
if ($rs2) {
$s .= '<tr><td>'.$tab.'</td><td align=right>'.$rs2->fields[3].'</td><td align=right>'.$rs2->fields[4].'</td><td align=right>'.$rs2->fields[2].'</td></tr>';
$rs2->Close();
}
$rs1->MoveNext();
}
$rs1->Close();
}
$ADODB_FETCH_MODE = $save;
return $s.'</table>';
}
function sp_who()
{
$arr = $this->conn->GetArray('sp_who');
return sizeof($arr);
}
function HealthCheck($cli=false)
{
$this->conn->Execute('dbcc traceon(3604)');
$html = adodb_perf::HealthCheck($cli);
$this->conn->Execute('dbcc traceoff(3604)');
return $html;
}
}
?>
| jcannava/bleedcrimson.net | photos/lib/adodb/perf/perf-mssql.inc.php | PHP | gpl-2.0 | 4,822 |
/*
* Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ScriptMgr.h"
#include "Chat.h"
#include "DatabaseEnv.h"
#include "Item.h"
#include "Language.h"
#include "Mail.h"
#include "ObjectMgr.h"
#include "Pet.h"
#include "Player.h"
#include "RBAC.h"
#include "WorldSession.h"
class send_commandscript : public CommandScript
{
public:
send_commandscript() : CommandScript("send_commandscript") { }
std::vector<ChatCommand> GetCommands() const override
{
static std::vector<ChatCommand> sendCommandTable =
{
{ "items", rbac::RBAC_PERM_COMMAND_SEND_ITEMS, true, &HandleSendItemsCommand, "" },
{ "mail", rbac::RBAC_PERM_COMMAND_SEND_MAIL, true, &HandleSendMailCommand, "" },
{ "message", rbac::RBAC_PERM_COMMAND_SEND_MESSAGE, true, &HandleSendMessageCommand, "" },
{ "money", rbac::RBAC_PERM_COMMAND_SEND_MONEY, true, &HandleSendMoneyCommand, "" },
};
static std::vector<ChatCommand> commandTable =
{
{ "send", rbac::RBAC_PERM_COMMAND_SEND, false, nullptr, "", sendCommandTable },
};
return commandTable;
}
// Send mail by command
static bool HandleSendMailCommand(ChatHandler* handler, char const* args)
{
// format: name "subject text" "mail text"
Player* target;
ObjectGuid targetGuid;
std::string targetName;
if (!handler->extractPlayerTarget((char*)args, &target, &targetGuid, &targetName))
return false;
char* tail1 = strtok(nullptr, "");
if (!tail1)
return false;
char const* msgSubject = handler->extractQuotedArg(tail1);
if (!msgSubject)
return false;
char* tail2 = strtok(nullptr, "");
if (!tail2)
return false;
char const* msgText = handler->extractQuotedArg(tail2);
if (!msgText)
return false;
// msgSubject, msgText isn't NUL after prev. check
std::string subject = msgSubject;
std::string text = msgText;
// from console, use non-existing sender
MailSender sender(MAIL_NORMAL, handler->GetSession() ? handler->GetSession()->GetPlayer()->GetGUID().GetCounter() : 0, MAIL_STATIONERY_GM);
/// @todo Fix poor design
SQLTransaction trans = CharacterDatabase.BeginTransaction();
MailDraft(subject, text)
.SendMailTo(trans, MailReceiver(target, targetGuid.GetCounter()), sender);
CharacterDatabase.CommitTransaction(trans);
std::string nameLink = handler->playerLink(targetName);
handler->PSendSysMessage(LANG_MAIL_SENT, nameLink.c_str());
return true;
}
// Send items by mail
static bool HandleSendItemsCommand(ChatHandler* handler, char const* args)
{
// format: name "subject text" "mail text" item1[:count1] item2[:count2] ... item12[:count12]
Player* receiver;
ObjectGuid receiverGuid;
std::string receiverName;
if (!handler->extractPlayerTarget((char*)args, &receiver, &receiverGuid, &receiverName))
return false;
char* tail1 = strtok(nullptr, "");
if (!tail1)
return false;
char const* msgSubject = handler->extractQuotedArg(tail1);
if (!msgSubject)
return false;
char* tail2 = strtok(nullptr, "");
if (!tail2)
return false;
char const* msgText = handler->extractQuotedArg(tail2);
if (!msgText)
return false;
// msgSubject, msgText isn't NUL after prev. check
std::string subject = msgSubject;
std::string text = msgText;
// extract items
typedef std::pair<uint32, uint32> ItemPair;
typedef std::list< ItemPair > ItemPairs;
ItemPairs items;
// get all tail string
char* tail = strtok(nullptr, "");
// get from tail next item str
while (char* itemStr = strtok(tail, " "))
{
// and get new tail
tail = strtok(nullptr, "");
// parse item str
char const* itemIdStr = strtok(itemStr, ":");
char const* itemCountStr = strtok(nullptr, " ");
uint32 itemId = atoi(itemIdStr);
if (!itemId)
return false;
ItemTemplate const* item_proto = sObjectMgr->GetItemTemplate(itemId);
if (!item_proto)
{
handler->PSendSysMessage(LANG_COMMAND_ITEMIDINVALID, itemId);
handler->SetSentErrorMessage(true);
return false;
}
uint32 itemCount = itemCountStr ? atoi(itemCountStr) : 1;
if (itemCount < 1 || (item_proto->MaxCount > 0 && itemCount > uint32(item_proto->MaxCount)))
{
handler->PSendSysMessage(LANG_COMMAND_INVALID_ITEM_COUNT, itemCount, itemId);
handler->SetSentErrorMessage(true);
return false;
}
while (itemCount > item_proto->GetMaxStackSize())
{
items.push_back(ItemPair(itemId, item_proto->GetMaxStackSize()));
itemCount -= item_proto->GetMaxStackSize();
}
items.push_back(ItemPair(itemId, itemCount));
if (items.size() > MAX_MAIL_ITEMS)
{
handler->PSendSysMessage(LANG_COMMAND_MAIL_ITEMS_LIMIT, MAX_MAIL_ITEMS);
handler->SetSentErrorMessage(true);
return false;
}
}
// from console show nonexisting sender
MailSender sender(MAIL_NORMAL, handler->GetSession() ? handler->GetSession()->GetPlayer()->GetGUID().GetCounter() : 0, MAIL_STATIONERY_GM);
// fill mail
MailDraft draft(subject, text);
SQLTransaction trans = CharacterDatabase.BeginTransaction();
for (ItemPairs::const_iterator itr = items.begin(); itr != items.end(); ++itr)
{
if (Item* item = Item::CreateItem(itr->first, itr->second, handler->GetSession() ? handler->GetSession()->GetPlayer() : 0))
{
item->SaveToDB(trans); // Save to prevent being lost at next mail load. If send fails, the item will be deleted.
draft.AddItem(item);
}
}
draft.SendMailTo(trans, MailReceiver(receiver, receiverGuid.GetCounter()), sender);
CharacterDatabase.CommitTransaction(trans);
std::string nameLink = handler->playerLink(receiverName);
handler->PSendSysMessage(LANG_MAIL_SENT, nameLink.c_str());
return true;
}
/// Send money by mail
static bool HandleSendMoneyCommand(ChatHandler* handler, char const* args)
{
/// format: name "subject text" "mail text" money
Player* receiver;
ObjectGuid receiverGuid;
std::string receiverName;
if (!handler->extractPlayerTarget((char*)args, &receiver, &receiverGuid, &receiverName))
return false;
char* tail1 = strtok(nullptr, "");
if (!tail1)
return false;
char* msgSubject = handler->extractQuotedArg(tail1);
if (!msgSubject)
return false;
char* tail2 = strtok(nullptr, "");
if (!tail2)
return false;
char* msgText = handler->extractQuotedArg(tail2);
if (!msgText)
return false;
char* moneyStr = strtok(nullptr, "");
int32 money = moneyStr ? atoi(moneyStr) : 0;
if (money <= 0)
return false;
// msgSubject, msgText isn't NUL after prev. check
std::string subject = msgSubject;
std::string text = msgText;
// from console show nonexisting sender
MailSender sender(MAIL_NORMAL, handler->GetSession() ? handler->GetSession()->GetPlayer()->GetGUID().GetCounter() : 0, MAIL_STATIONERY_GM);
SQLTransaction trans = CharacterDatabase.BeginTransaction();
MailDraft(subject, text)
.AddMoney(money)
.SendMailTo(trans, MailReceiver(receiver, receiverGuid.GetCounter()), sender);
CharacterDatabase.CommitTransaction(trans);
std::string nameLink = handler->playerLink(receiverName);
handler->PSendSysMessage(LANG_MAIL_SENT, nameLink.c_str());
return true;
}
/// Send a message to a player in game
static bool HandleSendMessageCommand(ChatHandler* handler, char const* args)
{
/// - Find the player
Player* player;
if (!handler->extractPlayerTarget((char*)args, &player))
return false;
char* msgStr = strtok(nullptr, "");
if (!msgStr)
return false;
/// - Check if player is logging out.
if (player->GetSession()->isLogingOut())
{
handler->SendSysMessage(LANG_PLAYER_NOT_FOUND);
handler->SetSentErrorMessage(true);
return false;
}
/// - Send the message
// Use SendAreaTriggerMessage for fastest delivery.
player->GetSession()->SendAreaTriggerMessage("%s", msgStr);
player->GetSession()->SendAreaTriggerMessage("|cffff0000[Message from administrator]:|r");
// Confirmation message
std::string nameLink = handler->GetNameLink(player);
handler->PSendSysMessage(LANG_SENDMESSAGE, nameLink.c_str(), msgStr);
return true;
}
};
void AddSC_send_commandscript()
{
new send_commandscript();
}
| Effec7/Adamantium | src/server/scripts/Commands/cs_send.cpp | C++ | gpl-2.0 | 10,199 |
<?php
namespace TYPO3\CMS\Workspaces\Tests\Unit\Controller\Remote;
/*
* This file is part of the TYPO3 CMS project.
*
* It is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License, either version 2
* of the License, or any later version.
*
* For the full copyright and license information, please read the
* LICENSE.txt file that was distributed with this source code.
*
* The TYPO3 project - inspiring people to share!
*/
use Prophecy\Argument;
use Prophecy\Prophecy\ObjectProphecy;
use TYPO3\CMS\Core\Resource\File;
use TYPO3\CMS\Core\Resource\FileReference;
use TYPO3\CMS\Core\Resource\ProcessedFile;
use TYPO3\CMS\Core\Utility\GeneralUtility;
/**
* RemoteServer test
*/
class RemoteServerTest extends \TYPO3\TestingFramework\Core\Unit\UnitTestCase
{
/**
* @var \TYPO3\CMS\Workspaces\Controller\Remote\RemoteServer
*/
protected $subject;
/**
* @var FileReference[]|ObjectProphecy[]
*/
protected $fileReferenceProphecies;
/**
* Set up
*/
protected function setUp()
{
parent::setUp();
$this->subject = $this->getAccessibleMock(\TYPO3\CMS\Workspaces\Controller\Remote\RemoteServer::class, ['__none']);
}
/**
* Tear down.
*/
protected function tearDown()
{
parent::tearDown();
unset($this->subject);
unset($this->fileReferenceProphecies);
}
/**
* @return array
*/
public function prepareFileReferenceDifferencesAreCorrectDataProvider()
{
return [
// without thumbnails
'unchanged wo/thumbnails' => ['1,2,3,4', '1,2,3,4', false, null],
'front addition wo/thumbnails' => ['1,2,3,4', '99,1,2,3,4', false, [
'live' => '/img/1.png /img/2.png /img/3.png /img/4.png',
'differences' => '<ins>/img/99.png </ins>/img/1.png /img/2.png /img/3.png /img/4.png',
]],
'end addition wo/thumbnails' => ['1,2,3,4', '1,2,3,4,99', false, [
'live' => '/img/1.png /img/2.png /img/3.png /img/4.png',
'differences' => '/img/1.png /img/2.png /img/3.png /img/4.png <ins>/img/99.png </ins>',
]],
'reorder wo/thumbnails' => ['1,2,3,4', '1,3,2,4', false, [
'live' => '/img/1.png /img/2.png /img/3.png /img/4.png',
'differences' => '/img/1.png <ins>/img/3.png </ins>/img/2.png <del>/img/3.png </del>/img/4.png',
]],
'move to end wo/thumbnails' => ['1,2,3,4', '2,3,4,1', false, [
'live' => '/img/1.png /img/2.png /img/3.png /img/4.png',
'differences' => '<del>/img/1.png </del>/img/2.png /img/3.png /img/4.png <ins>/img/1.png </ins>',
]],
'move to front wo/thumbnails' => ['1,2,3,4', '4,1,2,3', false, [
'live' => '/img/1.png /img/2.png /img/3.png /img/4.png',
'differences' => '<ins>/img/4.png </ins>/img/1.png /img/2.png /img/3.png <del>/img/4.png </del>',
]],
'keep last wo/thumbnails' => ['1,2,3,4', '4', false, [
'live' => '/img/1.png /img/2.png /img/3.png /img/4.png',
'differences' => '<del>/img/1.png /img/2.png /img/3.png </del>/img/4.png',
]],
// with thumbnails
'unchanged w/thumbnails' => ['1,2,3,4', '1,2,3,4', true, null],
'front addition w/thumbnails' => ['1,2,3,4', '99,1,2,3,4', true, [
'live' => '<img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" />',
'differences' => '<ins><img src="/tmb/99.png" /> </ins><img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" />',
]],
'end addition w/thumbnails' => ['1,2,3,4', '1,2,3,4,99', true, [
'live' => '<img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" />',
'differences' => '<img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" /> <ins><img src="/tmb/99.png" /> </ins>',
]],
'reorder w/thumbnails' => ['1,2,3,4', '1,3,2,4', true, [
'live' => '<img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" />',
'differences' => '<img src="/tmb/1.png" /> <ins><img src="/tmb/3.png" /> </ins><img src="/tmb/2.png" /> <del><img src="/tmb/3.png" /> </del><img src="/tmb/4.png" />',
]],
'move to end w/thumbnails' => ['1,2,3,4', '2,3,4,1', true, [
'live' => '<img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" />',
'differences' => '<del><img src="/tmb/1.png" /> </del><img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" /> <ins><img src="/tmb/1.png" /> </ins>',
]],
'move to front w/thumbnails' => ['1,2,3,4', '4,1,2,3', true, [
'live' => '<img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" />',
'differences' => '<ins><img src="/tmb/4.png" /> </ins><img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <del><img src="/tmb/4.png" /> </del>',
]],
'keep last w/thumbnails' => ['1,2,3,4', '4', true, [
'live' => '<img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> <img src="/tmb/4.png" />',
'differences' => '<del><img src="/tmb/1.png" /> <img src="/tmb/2.png" /> <img src="/tmb/3.png" /> </del><img src="/tmb/4.png" />',
]],
];
}
/**
* @param string $fileFileReferenceList
* @param string $versionFileReferenceList
* @param $useThumbnails
* @param array|null $expected
* @dataProvider prepareFileReferenceDifferencesAreCorrectDataProvider
* @test
*/
public function prepareFileReferenceDifferencesAreCorrect($fileFileReferenceList, $versionFileReferenceList, $useThumbnails, array $expected = null)
{
$liveFileReferences = $this->getFileReferenceProphecies($fileFileReferenceList);
$versionFileReferences = $this->getFileReferenceProphecies($versionFileReferenceList);
$result = $this->subject->_call(
'prepareFileReferenceDifferences',
$liveFileReferences,
$versionFileReferences,
$useThumbnails
);
$this->assertSame($expected, $result);
}
/**
* @param string $idList List of ids
* @return FileReference[]|ObjectProphecy[]
*/
protected function getFileReferenceProphecies($idList)
{
$fileReferenceProphecies = [];
$ids = GeneralUtility::trimExplode(',', $idList, true);
foreach ($ids as $id) {
$fileReferenceProphecies[$id] = $this->getFileReferenceProphecy($id);
}
return $fileReferenceProphecies;
}
/**
* @param int $id
* @return ObjectProphecy|FileReference
*/
protected function getFileReferenceProphecy($id)
{
if (isset($this->fileReferenceProphecies[$id])) {
return $this->fileReferenceProphecies[$id];
}
$processedFileProphecy = $this->prophesize(ProcessedFile::class);
$processedFileProphecy->getPublicUrl(Argument::cetera())->willReturn('/tmb/' . $id . '.png');
$fileProphecy = $this->prophesize(File::class);
$fileProphecy->process(Argument::cetera())->willReturn($processedFileProphecy->reveal());
$fileReferenceProphecy = $this->prophesize(FileReference::class);
$fileReferenceProphecy->getUid()->willReturn($id);
$fileReferenceProphecy->getOriginalFile()->willReturn($fileProphecy->reveal());
$fileReferenceProphecy->getPublicUrl(Argument::cetera())->willReturn('/img/' . $id . '.png');
$this->fileReferenceProphecies[$id] = $fileReferenceProphecy->reveal();
return $this->fileReferenceProphecies[$id];
}
}
| morinfa/TYPO3.CMS | typo3/sysext/workspaces/Tests/Unit/Controller/Remote/RemoteServerTest.php | PHP | gpl-2.0 | 8,153 |
<?php
/**
* Summon record fallback loader
*
* PHP version 7
*
* Copyright (C) Villanova University 2018.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Record
* @author Demian Katz <demian.katz@villanova.edu>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
namespace VuFind\Record\FallbackLoader;
use SerialsSolutions\Summon\Zend2 as Connector;
use VuFind\Db\Table\Resource;
use VuFindSearch\Backend\Summon\Backend;
use VuFindSearch\ParamBag;
/**
* Summon record fallback loader
*
* @category VuFind
* @package Record
* @author Demian Katz <demian.katz@villanova.edu>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
class Summon implements FallbackLoaderInterface
{
/**
* Resource table
*
* @var Resource
*/
protected $table;
/**
* Summon backend
*
* @var Backend
*/
protected $backend;
/**
* Constructor
*
* @param Resource $table Resource database table object
* @param Backend $backend Summon search backend
*/
public function __construct(Resource $table, Backend $backend)
{
$this->table = $table;
$this->backend = $backend;
}
/**
* Given an array of IDs that failed to load, try to find them using a
* fallback mechanism.
*
* @param array $ids IDs to load
*
* @return array
*/
public function load($ids)
{
$retVal = [];
foreach ($ids as $id) {
foreach ($this->fetchSingleRecord($id) as $record) {
$this->updateRecord($record, $id);
$retVal[] = $record;
}
}
return $retVal;
}
/**
* Fetch a single record (null if not found).
*
* @param string $id ID to load
*
* @return \VuFindSearch\Response\RecordCollectionInterface
*/
protected function fetchSingleRecord($id)
{
$resource = $this->table->findResource($id, 'Summon');
if ($resource && ($extra = json_decode($resource->extra_metadata, true))) {
$bookmark = $extra['bookmark'] ?? '';
if (strlen($bookmark) > 0) {
$params = new ParamBag(
['summonIdType' => Connector::IDENTIFIER_BOOKMARK]
);
return $this->backend->retrieve($bookmark, $params);
}
}
return new \VuFindSearch\Backend\Summon\Response\RecordCollection([]);
}
/**
* When a record ID has changed, update the record driver and database to
* reflect the changes.
*
* @param \VuFind\RecordDriver\AbstractBase $record Record to update
* @param string $previousId Old ID of record
*
* @return void
*/
protected function updateRecord($record, $previousId)
{
// Update the record driver with knowledge of the previous identifier...
$record->setPreviousUniqueId($previousId);
// Update the database to replace the obsolete identifier...
$this->table->updateRecordId($previousId, $record->getUniqueId(), 'Summon');
}
}
| samueloph/vufind | module/VuFind/src/VuFind/Record/FallbackLoader/Summon.php | PHP | gpl-2.0 | 3,913 |
<?php
/**
* Side Box Template
*
* @copyright Copyright 2003-2020 Zen Cart Development Team
* @copyright Portions Copyright 2003 osCommerce
* @license http://www.zen-cart.com/license/2_0.txt GNU Public License V2.0
* @version $Id: DrByte 2020 Jun 19 Modified in v1.5.7 $
*/
$content = "";
$content .= '<div id="' . str_replace('_', '-', $box_id . 'Content') . '" class="sideBoxContent">' . "\n";
$content .= '<ul class="list-links orderHistList">' . "\n" ;
foreach ($customer_orders as $row) {
$content .= '
<li>
<a href="' . zen_href_link(zen_get_info_page($row['id']), 'products_id=' . $row['id']) . '">' . $row['name'] . '</a>
<a href="' . zen_href_link($_GET['main_page'], zen_get_all_get_params(array('action')) . 'action=cust_order&pid=' . $row['id']) . '"><i class="fa fa-cart-arrow-down"></i></a>
</li>
';
}
$content .= '</ul>' . "\n" ;
$content .= '</div>';
| barco57/zencart | includes/templates/responsive_classic/sideboxes/tpl_order_history.php | PHP | gpl-2.0 | 879 |
#include "expire-tiles.hpp"
#include "options.hpp"
#include <iterator>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdexcept>
#include <boost/format.hpp>
#include <set>
#define EARTH_CIRCUMFERENCE (40075016.68)
namespace {
void run_test(const char* test_name, void (*testfunc)())
{
try
{
fprintf(stderr, "%s\n", test_name);
testfunc();
}
catch(const std::exception& e)
{
fprintf(stderr, "%s\n", e.what());
fprintf(stderr, "FAIL\n");
exit(EXIT_FAILURE);
}
fprintf(stderr, "PASS\n");
}
#define RUN_TEST(x) run_test(#x, &(x))
#define ASSERT_EQ(a, b) { if (!((a) == (b))) { throw std::runtime_error((boost::format("Expecting %1% == %2%, but %3% != %4%") % #a % #b % (a) % (b)).str()); } }
struct xyz {
int z, x, y;
xyz(int z_, int x_, int y_) : z(z_), x(x_), y(y_) {}
bool operator==(const xyz &other) const {
return ((z == other.z) &&
(x == other.x) &&
(y == other.y));
}
bool operator<(const xyz &other) const {
return ((z < other.z) ||
((z == other.z) &&
((x < other.x) ||
((x == other.x) &&
(y < other.y)))));
}
void to_bbox(double &x0, double &y0,
double &x1, double &y1) const {
const double datum = 0.5 * (1 << z);
const double scale = EARTH_CIRCUMFERENCE / (1 << z);
x0 = (x - datum) * scale;
y0 = (datum - (y + 1)) * scale;
x1 = ((x + 1) - datum) * scale;
y1 = (datum - y) * scale;
}
void to_centroid(double &x0, double &y0) const {
const double datum = 0.5 * (1 << z);
const double scale = EARTH_CIRCUMFERENCE / (1 << z);
x0 = ((x + 0.5) - datum) * scale;
y0 = (datum - (y + 0.5)) * scale;
}
};
std::ostream &operator<<(std::ostream &out, const xyz &tile) {
out << tile.z << "/" << tile.x << "/" << tile.y;
return out;
}
struct tile_output_set : public expire_tiles::tile_output {
tile_output_set() {}
virtual ~tile_output_set() {}
virtual void output_dirty_tile(int x, int y, int zoom, int min_zoom) {
int y_min, x_iter, y_iter, x_max, y_max, out_zoom, zoom_diff;
if (zoom > min_zoom) out_zoom = zoom;
else out_zoom = min_zoom;
zoom_diff = out_zoom - zoom;
y_min = y << zoom_diff;
x_max = (x + 1) << zoom_diff;
y_max = (y + 1) << zoom_diff;
for (x_iter = x << zoom_diff; x_iter < x_max; x_iter++) {
for (y_iter = y_min; y_iter < y_max; y_iter++) {
m_tiles.insert(xyz(out_zoom, x_iter, y_iter));
}
}
}
std::set<xyz> m_tiles;
};
void test_expire_simple_z1() {
options_t opt;
opt.expire_tiles_zoom = 1;
opt.expire_tiles_zoom_min = 1;
expire_tiles et(&opt);
tile_output_set set;
// as big a bbox as possible at the origin to dirty all four
// quadrants of the world.
et.from_bbox(-10000, -10000, 10000, 10000);
et.output_and_destroy(&set);
ASSERT_EQ(set.m_tiles.size(), 4);
std::set<xyz>::iterator itr = set.m_tiles.begin();
ASSERT_EQ(*itr, xyz(1, 0, 0)); ++itr;
ASSERT_EQ(*itr, xyz(1, 0, 1)); ++itr;
ASSERT_EQ(*itr, xyz(1, 1, 0)); ++itr;
ASSERT_EQ(*itr, xyz(1, 1, 1)); ++itr;
}
void test_expire_simple_z3() {
options_t opt;
opt.expire_tiles_zoom = 3;
opt.expire_tiles_zoom_min = 3;
expire_tiles et(&opt);
tile_output_set set;
// as big a bbox as possible at the origin to dirty all four
// quadrants of the world.
et.from_bbox(-10000, -10000, 10000, 10000);
et.output_and_destroy(&set);
ASSERT_EQ(set.m_tiles.size(), 4);
std::set<xyz>::iterator itr = set.m_tiles.begin();
ASSERT_EQ(*itr, xyz(3, 3, 3)); ++itr;
ASSERT_EQ(*itr, xyz(3, 3, 4)); ++itr;
ASSERT_EQ(*itr, xyz(3, 4, 3)); ++itr;
ASSERT_EQ(*itr, xyz(3, 4, 4)); ++itr;
}
void test_expire_simple_z18() {
options_t opt;
opt.expire_tiles_zoom = 18;
opt.expire_tiles_zoom_min = 18;
expire_tiles et(&opt);
tile_output_set set;
// dirty a smaller bbox this time, as at z18 the scale is
// pretty small.
et.from_bbox(-1, -1, 1, 1);
et.output_and_destroy(&set);
ASSERT_EQ(set.m_tiles.size(), 4);
std::set<xyz>::iterator itr = set.m_tiles.begin();
ASSERT_EQ(*itr, xyz(18, 131071, 131071)); ++itr;
ASSERT_EQ(*itr, xyz(18, 131071, 131072)); ++itr;
ASSERT_EQ(*itr, xyz(18, 131072, 131071)); ++itr;
ASSERT_EQ(*itr, xyz(18, 131072, 131072)); ++itr;
}
std::set<xyz> generate_random(int zoom, size_t count) {
size_t num = 0;
std::set<xyz> set;
const int coord_mask = (1 << zoom) - 1;
while (num < count) {
xyz item(zoom, rand() & coord_mask, rand() & coord_mask);
if (set.count(item) == 0) {
set.insert(item);
++num;
}
}
return set;
}
void assert_tilesets_equal(const std::set<xyz> &a,
const std::set<xyz> &b) {
ASSERT_EQ(a.size(), b.size());
std::set<xyz>::const_iterator a_itr = a.begin();
std::set<xyz>::const_iterator b_itr = b.begin();
while ((a_itr != a.end()) &&
(b_itr != b.end())) {
ASSERT_EQ(*a_itr, *b_itr);
++a_itr;
++b_itr;
}
}
void expire_centroids(const std::set<xyz> &check_set,
expire_tiles &et) {
for (std::set<xyz>::const_iterator itr = check_set.begin();
itr != check_set.end(); ++itr) {
double x0 = 0.0, y0 = 0.0;
itr->to_centroid(x0, y0);
et.from_bbox(x0, y0, x0, y0);
}
}
// tests that expiring a set of tile centroids means that
// those tiles get expired.
void test_expire_set() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt);
tile_output_set set;
std::set<xyz> check_set = generate_random(zoom, 100);
expire_centroids(check_set, et);
et.output_and_destroy(&set);
assert_tilesets_equal(set.m_tiles, check_set);
}
}
// this tests that, after expiring a random set of tiles
// in one expire_tiles object and a different set in
// another, when they are merged together they are the
// same as if the union of the sets of tiles had been
// expired.
void test_expire_merge() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt), et1(&opt), et2(&opt);
tile_output_set set;
std::set<xyz> check_set1 = generate_random(zoom, 100);
expire_centroids(check_set1, et1);
std::set<xyz> check_set2 = generate_random(zoom, 100);
expire_centroids(check_set2, et2);
et.merge_and_destroy(et1);
et.merge_and_destroy(et2);
std::set<xyz> check_set;
std::set_union(check_set1.begin(), check_set1.end(),
check_set2.begin(), check_set2.end(),
std::inserter(check_set, check_set.end()));
et.output_and_destroy(&set);
assert_tilesets_equal(set.m_tiles, check_set);
}
}
// tests that merging two identical sets results in
// the same set. this guarantees that we check some
// pathways of the merging which possibly could be
// skipped by the random tile set in the previous
// test.
void test_expire_merge_same() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt), et1(&opt), et2(&opt);
tile_output_set set;
std::set<xyz> check_set = generate_random(zoom, 100);
expire_centroids(check_set, et1);
expire_centroids(check_set, et2);
et.merge_and_destroy(et1);
et.merge_and_destroy(et2);
et.output_and_destroy(&set);
assert_tilesets_equal(set.m_tiles, check_set);
}
}
// makes sure that we're testing the case where some
// tiles are in both.
void test_expire_merge_overlap() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt), et1(&opt), et2(&opt);
tile_output_set set;
std::set<xyz> check_set1 = generate_random(zoom, 100);
expire_centroids(check_set1, et1);
std::set<xyz> check_set2 = generate_random(zoom, 100);
expire_centroids(check_set2, et2);
std::set<xyz> check_set3 = generate_random(zoom, 100);
expire_centroids(check_set3, et1);
expire_centroids(check_set3, et2);
et.merge_and_destroy(et1);
et.merge_and_destroy(et2);
std::set<xyz> check_set;
std::set_union(check_set1.begin(), check_set1.end(),
check_set2.begin(), check_set2.end(),
std::inserter(check_set, check_set.end()));
std::set_union(check_set1.begin(), check_set1.end(),
check_set3.begin(), check_set3.end(),
std::inserter(check_set, check_set.end()));
et.output_and_destroy(&set);
assert_tilesets_equal(set.m_tiles, check_set);
}
}
// checks that the set union still works when we expire
// large contiguous areas of tiles (i.e: ensure that we
// handle the "complete" flag correctly).
void test_expire_merge_complete() {
options_t opt;
int zoom = 18;
opt.expire_tiles_zoom = zoom;
opt.expire_tiles_zoom_min = zoom;
for (int i = 0; i < 100; ++i) {
expire_tiles et(&opt), et1(&opt), et2(&opt), et0(&opt);
tile_output_set set, set0;
// et1&2 are two halves of et0's box
et0.from_bbox(-10000, -10000, 10000, 10000);
et1.from_bbox(-10000, -10000, 0, 10000);
et2.from_bbox( 0, -10000, 10000, 10000);
et.merge_and_destroy(et1);
et.merge_and_destroy(et2);
et.output_and_destroy(&set);
et0.output_and_destroy(&set0);
assert_tilesets_equal(set.m_tiles, set0.m_tiles);
}
}
} // anonymous namespace
int main(int argc, char *argv[])
{
srand(0);
//try each test if any fail we will exit
RUN_TEST(test_expire_simple_z1);
RUN_TEST(test_expire_simple_z3);
RUN_TEST(test_expire_simple_z18);
RUN_TEST(test_expire_set);
RUN_TEST(test_expire_merge);
RUN_TEST(test_expire_merge_same);
RUN_TEST(test_expire_merge_overlap);
RUN_TEST(test_expire_merge_complete);
//passed
return 0;
}
| MaxSem/osm2pgsql | tests/test-expire-tiles.cpp | C++ | gpl-2.0 | 10,053 |
<?php
/**
* Customizer settings for this theme.
*
* @package WordPress
* @subpackage Twenty_Twenty
* @since Twenty Twenty 1.0
*/
if ( ! class_exists( 'TwentyTwenty_Customize' ) ) {
/**
* CUSTOMIZER SETTINGS
*
* @since Twenty Twenty 1.0
*/
class TwentyTwenty_Customize {
/**
* Register customizer options.
*
* @since Twenty Twenty 1.0
*
* @param WP_Customize_Manager $wp_customize Theme Customizer object.
*/
public static function register( $wp_customize ) {
/**
* Site Title & Description.
* */
$wp_customize->get_setting( 'blogname' )->transport = 'postMessage';
$wp_customize->get_setting( 'blogdescription' )->transport = 'postMessage';
$wp_customize->selective_refresh->add_partial(
'blogname',
array(
'selector' => '.site-title a',
'render_callback' => 'twentytwenty_customize_partial_blogname',
)
);
$wp_customize->selective_refresh->add_partial(
'blogdescription',
array(
'selector' => '.site-description',
'render_callback' => 'twentytwenty_customize_partial_blogdescription',
)
);
$wp_customize->selective_refresh->add_partial(
'custom_logo',
array(
'selector' => '.header-titles [class*=site-]:not(.site-description)',
'render_callback' => 'twentytwenty_customize_partial_site_logo',
)
);
$wp_customize->selective_refresh->add_partial(
'retina_logo',
array(
'selector' => '.header-titles [class*=site-]:not(.site-description)',
'render_callback' => 'twentytwenty_customize_partial_site_logo',
)
);
/**
* Site Identity
*/
/* 2X Header Logo ---------------- */
$wp_customize->add_setting(
'retina_logo',
array(
'capability' => 'edit_theme_options',
'sanitize_callback' => array( __CLASS__, 'sanitize_checkbox' ),
'transport' => 'postMessage',
)
);
$wp_customize->add_control(
'retina_logo',
array(
'type' => 'checkbox',
'section' => 'title_tagline',
'priority' => 10,
'label' => __( 'Retina logo', 'twentytwenty' ),
'description' => __( 'Scales the logo to half its uploaded size, making it sharp on high-res screens.', 'twentytwenty' ),
)
);
// Header & Footer Background Color.
$wp_customize->add_setting(
'header_footer_background_color',
array(
'default' => '#ffffff',
'sanitize_callback' => 'sanitize_hex_color',
'transport' => 'postMessage',
)
);
$wp_customize->add_control(
new WP_Customize_Color_Control(
$wp_customize,
'header_footer_background_color',
array(
'label' => __( 'Header & Footer Background Color', 'twentytwenty' ),
'section' => 'colors',
)
)
);
// Enable picking an accent color.
$wp_customize->add_setting(
'accent_hue_active',
array(
'capability' => 'edit_theme_options',
'sanitize_callback' => array( __CLASS__, 'sanitize_select' ),
'transport' => 'postMessage',
'default' => 'default',
)
);
$wp_customize->add_control(
'accent_hue_active',
array(
'type' => 'radio',
'section' => 'colors',
'label' => __( 'Primary Color', 'twentytwenty' ),
'choices' => array(
'default' => _x( 'Default', 'color', 'twentytwenty' ),
'custom' => _x( 'Custom', 'color', 'twentytwenty' ),
),
)
);
/**
* Implementation for the accent color.
* This is different to all other color options because of the accessibility enhancements.
* The control is a hue-only colorpicker, and there is a separate setting that holds values
* for other colors calculated based on the selected hue and various background-colors on the page.
*
* @since Twenty Twenty 1.0
*/
// Add the setting for the hue colorpicker.
$wp_customize->add_setting(
'accent_hue',
array(
'default' => 344,
'type' => 'theme_mod',
'sanitize_callback' => 'absint',
'transport' => 'postMessage',
)
);
// Add setting to hold colors derived from the accent hue.
$wp_customize->add_setting(
'accent_accessible_colors',
array(
'default' => array(
'content' => array(
'text' => '#000000',
'accent' => '#cd2653',
'secondary' => '#6d6d6d',
'borders' => '#dcd7ca',
),
'header-footer' => array(
'text' => '#000000',
'accent' => '#cd2653',
'secondary' => '#6d6d6d',
'borders' => '#dcd7ca',
),
),
'type' => 'theme_mod',
'transport' => 'postMessage',
'sanitize_callback' => array( __CLASS__, 'sanitize_accent_accessible_colors' ),
)
);
// Add the hue-only colorpicker for the accent color.
$wp_customize->add_control(
new WP_Customize_Color_Control(
$wp_customize,
'accent_hue',
array(
'section' => 'colors',
'settings' => 'accent_hue',
'description' => __( 'Apply a custom color for links, buttons, featured images.', 'twentytwenty' ),
'mode' => 'hue',
'active_callback' => function() use ( $wp_customize ) {
return ( 'custom' === $wp_customize->get_setting( 'accent_hue_active' )->value() );
},
)
)
);
// Update background color with postMessage, so inline CSS output is updated as well.
$wp_customize->get_setting( 'background_color' )->transport = 'postMessage';
/**
* Theme Options
*/
$wp_customize->add_section(
'options',
array(
'title' => __( 'Theme Options', 'twentytwenty' ),
'priority' => 40,
'capability' => 'edit_theme_options',
)
);
/* Enable Header Search ----------------------------------------------- */
$wp_customize->add_setting(
'enable_header_search',
array(
'capability' => 'edit_theme_options',
'default' => true,
'sanitize_callback' => array( __CLASS__, 'sanitize_checkbox' ),
)
);
$wp_customize->add_control(
'enable_header_search',
array(
'type' => 'checkbox',
'section' => 'options',
'priority' => 10,
'label' => __( 'Show search in header', 'twentytwenty' ),
)
);
/* Show author bio ---------------------------------------------------- */
$wp_customize->add_setting(
'show_author_bio',
array(
'capability' => 'edit_theme_options',
'default' => true,
'sanitize_callback' => array( __CLASS__, 'sanitize_checkbox' ),
)
);
$wp_customize->add_control(
'show_author_bio',
array(
'type' => 'checkbox',
'section' => 'options',
'priority' => 10,
'label' => __( 'Show author bio', 'twentytwenty' ),
)
);
/* Display full content or excerpts on the blog and archives --------- */
$wp_customize->add_setting(
'blog_content',
array(
'capability' => 'edit_theme_options',
'default' => 'full',
'sanitize_callback' => array( __CLASS__, 'sanitize_select' ),
)
);
$wp_customize->add_control(
'blog_content',
array(
'type' => 'radio',
'section' => 'options',
'priority' => 10,
'label' => __( 'On archive pages, posts show:', 'twentytwenty' ),
'choices' => array(
'full' => __( 'Full text', 'twentytwenty' ),
'summary' => __( 'Summary', 'twentytwenty' ),
),
)
);
/**
* Template: Cover Template.
*/
$wp_customize->add_section(
'cover_template_options',
array(
'title' => __( 'Cover Template', 'twentytwenty' ),
'capability' => 'edit_theme_options',
'description' => __( 'Settings for the "Cover Template" page template. Add a featured image to use as background.', 'twentytwenty' ),
'priority' => 42,
)
);
/* Overlay Fixed Background ------ */
$wp_customize->add_setting(
'cover_template_fixed_background',
array(
'capability' => 'edit_theme_options',
'default' => true,
'sanitize_callback' => array( __CLASS__, 'sanitize_checkbox' ),
'transport' => 'postMessage',
)
);
$wp_customize->add_control(
'cover_template_fixed_background',
array(
'type' => 'checkbox',
'section' => 'cover_template_options',
'label' => __( 'Fixed Background Image', 'twentytwenty' ),
'description' => __( 'Creates a parallax effect when the visitor scrolls.', 'twentytwenty' ),
)
);
$wp_customize->selective_refresh->add_partial(
'cover_template_fixed_background',
array(
'selector' => '.cover-header',
'type' => 'cover_fixed',
)
);
/* Separator --------------------- */
$wp_customize->add_setting(
'cover_template_separator_1',
array(
'sanitize_callback' => 'wp_filter_nohtml_kses',
)
);
$wp_customize->add_control(
new TwentyTwenty_Separator_Control(
$wp_customize,
'cover_template_separator_1',
array(
'section' => 'cover_template_options',
)
)
);
/* Overlay Background Color ------ */
$wp_customize->add_setting(
'cover_template_overlay_background_color',
array(
'default' => twentytwenty_get_color_for_area( 'content', 'accent' ),
'sanitize_callback' => 'sanitize_hex_color',
)
);
$wp_customize->add_control(
new WP_Customize_Color_Control(
$wp_customize,
'cover_template_overlay_background_color',
array(
'label' => __( 'Overlay Background Color', 'twentytwenty' ),
'description' => __( 'The color used for the overlay. Defaults to the accent color.', 'twentytwenty' ),
'section' => 'cover_template_options',
)
)
);
/* Overlay Text Color ------------ */
$wp_customize->add_setting(
'cover_template_overlay_text_color',
array(
'default' => '#ffffff',
'sanitize_callback' => 'sanitize_hex_color',
)
);
$wp_customize->add_control(
new WP_Customize_Color_Control(
$wp_customize,
'cover_template_overlay_text_color',
array(
'label' => __( 'Overlay Text Color', 'twentytwenty' ),
'description' => __( 'The color used for the text in the overlay.', 'twentytwenty' ),
'section' => 'cover_template_options',
)
)
);
/* Overlay Color Opacity --------- */
$wp_customize->add_setting(
'cover_template_overlay_opacity',
array(
'default' => 80,
'sanitize_callback' => 'absint',
'transport' => 'postMessage',
)
);
$wp_customize->add_control(
'cover_template_overlay_opacity',
array(
'label' => __( 'Overlay Opacity', 'twentytwenty' ),
'description' => __( 'Make sure that the contrast is high enough so that the text is readable.', 'twentytwenty' ),
'section' => 'cover_template_options',
'type' => 'range',
'input_attrs' => twentytwenty_customize_opacity_range(),
)
);
$wp_customize->selective_refresh->add_partial(
'cover_template_overlay_opacity',
array(
'selector' => '.cover-color-overlay',
'type' => 'cover_opacity',
)
);
}
/**
* Sanitization callback for the "accent_accessible_colors" setting.
*
* @since Twenty Twenty 1.0
*
* @param array $value The value we want to sanitize.
* @return array Returns sanitized value. Each item in the array gets sanitized separately.
*/
public static function sanitize_accent_accessible_colors( $value ) {
// Make sure the value is an array. Do not typecast, use empty array as fallback.
$value = is_array( $value ) ? $value : array();
// Loop values.
foreach ( $value as $area => $values ) {
foreach ( $values as $context => $color_val ) {
$value[ $area ][ $context ] = sanitize_hex_color( $color_val );
}
}
return $value;
}
/**
* Sanitize select.
*
* @since Twenty Twenty 1.0
*
* @param string $input The input from the setting.
* @param object $setting The selected setting.
* @return string The input from the setting or the default setting.
*/
public static function sanitize_select( $input, $setting ) {
$input = sanitize_key( $input );
$choices = $setting->manager->get_control( $setting->id )->choices;
return ( array_key_exists( $input, $choices ) ? $input : $setting->default );
}
/**
* Sanitize boolean for checkbox.
*
* @since Twenty Twenty 1.0
*
* @param bool $checked Whether or not a box is checked.
* @return bool
*/
public static function sanitize_checkbox( $checked ) {
return ( ( isset( $checked ) && true === $checked ) ? true : false );
}
}
// Setup the Theme Customizer settings and controls.
add_action( 'customize_register', array( 'TwentyTwenty_Customize', 'register' ) );
}
/**
* PARTIAL REFRESH FUNCTIONS
* */
if ( ! function_exists( 'twentytwenty_customize_partial_blogname' ) ) {
/**
* Render the site title for the selective refresh partial.
*
* @since Twenty Twenty 1.0
*/
function twentytwenty_customize_partial_blogname() {
bloginfo( 'name' );
}
}
if ( ! function_exists( 'twentytwenty_customize_partial_blogdescription' ) ) {
/**
* Render the site description for the selective refresh partial.
*
* @since Twenty Twenty 1.0
*/
function twentytwenty_customize_partial_blogdescription() {
bloginfo( 'description' );
}
}
if ( ! function_exists( 'twentytwenty_customize_partial_site_logo' ) ) {
/**
* Render the site logo for the selective refresh partial.
*
* Doing it this way so we don't have issues with `render_callback`'s arguments.
*
* @since Twenty Twenty 1.0
*/
function twentytwenty_customize_partial_site_logo() {
twentytwenty_site_logo();
}
}
/**
* Input attributes for cover overlay opacity option.
*
* @since Twenty Twenty 1.0
*
* @return array Array containing attribute names and their values.
*/
function twentytwenty_customize_opacity_range() {
/**
* Filters the input attributes for opacity.
*
* @since Twenty Twenty 1.0
*
* @param array $attrs {
* The attributes.
*
* @type int $min Minimum value.
* @type int $max Maximum value.
* @type int $step Interval between numbers.
* }
*/
return apply_filters(
'twentytwenty_customize_opacity_range',
array(
'min' => 0,
'max' => 90,
'step' => 5,
)
);
}
| rasken2003/fuga-it-business | wp-content/themes/twentytwenty/classes/class-twentytwenty-customize.php | PHP | gpl-2.0 | 14,593 |
class CreateArticlePage
include PageObject
page_url '<%=params[:article_name]%>'
a(:doesnotexist_msg, text: 'Look for pages within Wikipedia that link to this title')
end
| paladox/mediawiki-extensions-MobileFrontend | tests/browser/features/support/pages/create_article_page.rb | Ruby | gpl-2.0 | 179 |